85 Commits
0.0.0 ... 0.3.1

Author SHA1 Message Date
62c2f2fd25 [RELEASE] create release 0.3.1 2016-09-12 21:06:37 +02:00
9a96211853 [DEV] update to future lutin 2.0 2016-09-08 21:35:02 +02:00
7e104a1f72 [DEV] update to future lutin 2.0 and update to new ejson 2016-09-07 22:05:42 +02:00
94e2bbabe3 [DEV] update dev tag version 2016-08-30 22:54:57 +02:00
b1eebd75e9 [RELEASE] create release 0.3.0 2016-08-30 22:54:08 +02:00
8174d45416 [DEV] add support of multiple input stream type 2016-08-22 21:52:31 +02:00
db89c092be [DEV] update sharedPtr 2016-07-19 21:43:58 +02:00
151a3ddcf4 [DEV] rm __class__ 2016-05-02 22:01:55 +02:00
bc78a1858a [DEV] update the change on 'enum' to 'enum class' 2016-04-29 23:16:07 +02:00
f4f1ee888b [DEBUG] correct build on windows 2016-03-17 22:13:08 +01:00
e7511a0b92 [DEV] update of external of elog and ethread 2016-03-08 21:29:34 +01:00
b5bdb4e2db [DEV] replace 'include guard' with 'pragma once' 2016-02-02 21:18:54 +01:00
e90271754f [DEBUG] corect oss build (deprecated) 2015-10-20 21:25:11 +02:00
c60d6a8240 [DEV] update new lutin 0.8.0 2015-10-14 21:21:03 +02:00
4d2758cc63 [DEV] remove alsa log 2015-09-29 21:12:20 +02:00
2b665e9383 [DEV] update next lutin version and debug android play audio 2015-09-24 21:44:04 +02:00
71fc8af983 [DEV] update Build interface 2015-09-14 21:11:04 +02:00
145d930567 [CI] update travis with new interface (no sudo) 2015-08-24 23:55:27 +02:00
94c16ad846 [DEV] simplify APIs and remove OSS (not so used) 2015-07-10 23:42:42 +02:00
a8c1a92c7a [DEV] continue rework of list of device search 2015-07-07 22:39:09 +02:00
22dd01978a [DEBUG] correct the Mac audio interface 2015-07-07 21:37:03 +02:00
09e32a815a [DEV] update java interfaec of Input and output 2015-07-01 22:06:29 +02:00
3a0ab73a3a [DEV] rename android interface for java 2015-06-30 23:25:34 +02:00
36b0231a11 [DEV correct audio output 2015-06-26 22:07:50 +02:00
7aad6c26c4 [DEV] correct some interface of android 2015-06-23 21:09:57 +02:00
fbd6eceee6 [DEV] continue integration of audio interface 2015-06-22 23:11:04 +02:00
7d0a38e087 [DEV] continue dev of android audio interface 2015-06-22 21:39:29 +02:00
07684a0e54 [DEV] real integration for java 2015-06-21 21:58:15 +02:00
54ce284b1b [DEV] pulseaudio missing compilation flag 2015-06-16 21:34:51 +02:00
7b0316a8aa [DEV] rework continue (better integration of pulseaudio and low level devices 2015-06-16 21:08:23 +02:00
4b5bbd9626 [DEV] try to add a list of device for pulse 2015-06-16 21:08:23 +02:00
57c9cc1132 [CI] reme run of the library" 2015-06-15 22:23:55 +02:00
cc12384aea [DOC] create readme 2015-06-15 22:22:35 +02:00
06290dd92d [DEV] update new worktree 2015-06-15 19:27:55 +02:00
fa618958b8 [DEV] add basic tools and change some API 2015-06-11 21:39:56 +02:00
bb16adc099 [DEV] alsa poll mode availlable ... 2015-06-11 21:33:32 +02:00
dbd3c18ac3 [DEV] rework for stream read/write in mmap 2015-06-07 22:32:54 +02:00
9dec54d4c7 [DEV] start rework Alsa API to support poll event and MMAP system 2015-06-05 22:00:17 +02:00
17d59cf370 update the create function and MacOs Ios time stamp for input output... 2015-05-11 21:33:58 +02:00
5992923c88 [BUILD] update new lutin 0.5.3 2015-05-08 22:35:39 +02:00
32c0784d3b [DEBUG] build error with pulse 2015-04-17 22:02:09 +02:00
488b44b03b [DEV] update Catkin 2015-04-14 21:53:14 +02:00
24af15748d [DEV] update audio time interface 2015-04-13 21:49:48 +02:00
32858d6104 [DEV] add dummi compiled 2015-04-11 17:41:53 +02:00
f5c3affccb [DEV] rework Airtaudio in audioo::orchestra 2015-04-10 22:06:17 +02:00
d4c53a53bf [DEV] update Log interafce 2015-03-25 21:14:12 +01:00
38f51ec421 [DEV] update ALSA error log. 2015-03-23 23:14:45 +01:00
102a4e5ca5 [DEV] update thread set priority 2015-03-20 21:07:58 +01:00
e873a44615 [BUILD] set catkin etk correct define 2015-03-18 23:43:06 +01:00
40f2b25c5e [DEBUG] segfault 2015-03-13 22:30:44 +01:00
c4b4c40931 [DEV] add some log in ALSA 2015-03-12 22:30:43 +01:00
12bd24d064 [DEV] add basic catkin 2015-03-12 21:33:44 +01:00
36e5f26cb4 [DEV] try change the XRun detection in soft 2015-03-06 22:50:03 +01:00
e4c46dbd64 [DEV] minor change (add a usleep in alsa) 2015-03-04 23:52:53 +01:00
1bf633b346 [DEV] romove log 2015-03-04 21:31:10 +01:00
0f5667bb67 [DEBUG] add some log in alsa to help error 2015-03-03 21:28:07 +01:00
8f3d17fdf8 [DEV] many internal change and add linker alsa flow 2015-02-27 21:07:17 +01:00
a5dbe5a607 [DEV] start add option 2015-02-26 23:49:26 +01:00
e6de495285 [DEV] add API to open stream with his name (alsa specific) 2015-02-26 22:25:15 +01:00
12342a4ffa [TEST] open named channel 2015-02-26 21:57:13 +01:00
4b30ecbfff [DEV] add some debug 2015-02-26 21:44:16 +01:00
1e8794e576 [DEV] correct version of CPP 2015-02-25 22:05:00 +01:00
1d5ac5fecb [DEV] WORK on a port for BOOST 2015-02-24 22:20:11 +01:00
3a9ce1ffd9 [DEV] try have good timestamp 2015-02-19 22:49:33 +01:00
c685c176dd [DEV] try alsa time stamp 2015-02-19 22:00:21 +01:00
1baebd1029 [DEV] remove log 2015-02-18 15:22:48 +01:00
24fef86124 [DEV] rework and correct timestamp of Alsa (pase 1) 2015-02-17 21:08:15 +01:00
b72d6f31df [DEV] correct ALSA timestamp 2015-02-16 23:01:26 +01:00
d9453e6e7a [DEV] add auto and thread name 2015-02-13 21:06:55 +01:00
650f24c288 [DEV] correct IOs and MacOs audio interface (tested on MacOs but not on IOs) 2015-02-12 23:19:48 +01:00
4fc9a3e05f [DEV] set windows ds interface work (in theory) build is ok 2015-02-12 21:02:51 +01:00
1a24bb9254 [DEV] change time properties 2015-02-10 22:38:30 +01:00
5c9361c199 [DEV] set member in private for internal backend interface 2015-02-10 21:01:53 +01:00
4febe7b119 [DEV] correct the jack output 2015-02-09 23:24:41 +01:00
7d91d12152 [DEV] set the fork work corectly 2015-02-09 21:44:32 +01:00
f4471f25e8 [DEV] correct jack and pulse output 2015-02-08 15:09:39 +01:00
028279a74f [DEV] rework many APIs 2015-02-06 23:54:08 +01:00
7c6a495d86 [DEV] start compleate rework 2015-02-06 00:14:14 +01:00
3629886590 [DEV] start remove some uneeded element in the airtaudio lib 2015-02-05 23:31:22 +01:00
6de9bac0fc [DEV] minor log change 2015-02-04 22:39:05 +01:00
af22fafffa [DEV] add think of new api for lutin 2015-01-28 22:07:11 +01:00
d8933f0989 [DEV] add log 2015-01-27 23:06:19 +01:00
6aa1746a27 [DEV] start compleate rework
Conflicts:
	airtaudio/Interface.cpp
	airtaudio/Interface.h
2015-01-26 23:46:53 +01:00
dd604bc736 [DEV] small change 2015-01-26 21:52:29 +01:00
00af426a47 [DEV] interface with ALSA 2015-01-08 21:33:02 +01:00
99 changed files with 10602 additions and 9732 deletions

50
.travis.yml Normal file
View File

@@ -0,0 +1,50 @@
language:
- cpp
sudo: false
os:
- linux
- osx
branches:
only:
- master
- dev
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-4.9
install:
- pip install --user lutin
env:
- CONF=debug BOARD=Linux BUILDER=clang GCOV=
- CONF=release BOARD=Linux BUILDER=clang GCOV=
- CONF=debug BOARD=Linux BUILDER=gcc GCOV=
- CONF=release BOARD=Linux BUILDER=gcc GCOV=
- CONF=debug BOARD=Linux BUILDER=gcc GCOV=--gcov
before_script:
- cd ..
- wget http://atria-soft.com/ci/coverage_send.py
- wget http://atria-soft.com/ci/test_send.py
- wget http://atria-soft.com/ci/warning_send.py
- git clone https://github.com/atria-soft/etk.git
- git clone https://github.com/musicdsp/audio.git
- pwd
- ls -l
- if [ "$BUILDER" == "gcc" ]; then COMPILATOR_OPTION="--compilator-version=4.9"; else COMPILATOR_OPTION=""; fi
script:
- lutin -w -j4 -C -P -c $BUILDER $COMPILATOR_OPTION -m $CONF $GCOV -p audio-orchestra
# - ./out/Linux_x86_64/$CONF/staging/$BUILDER/audio-orchestra/usr/bin/audio-orchestra -l6
notifications:
email:
- yui.heero@gmail.com

4
README.md Normal file
View File

@@ -0,0 +1,4 @@
# audio-orchestra
(MIT) audio: backend to acces hardware access (Fork of the original RTAudio lib)
[![Build Status](https://travis-ci.org/musicdsp/audio-orchestra.svg?branch=master)](https://travis-ci.org/musicdsp/audio-orchestra)

View File

@@ -1,882 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
//#include <etk/types.h>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
std::ostream& operator <<(std::ostream& _os, const airtaudio::api::type& _obj){
switch (_obj) {
default:
case airtaudio::api::UNSPECIFIED: _os << "UNSPECIFIED"; break;
case airtaudio::api::LINUX_ALSA: _os << "LINUX_ALSA"; break;
case airtaudio::api::LINUX_PULSE: _os << "LINUX_PULSE"; break;
case airtaudio::api::LINUX_OSS: _os << "LINUX_OSS"; break;
case airtaudio::api::UNIX_JACK: _os << "UNIX_JACK"; break;
case airtaudio::api::MACOSX_CORE: _os << "MACOSX_CORE"; break;
case airtaudio::api::IOS_CORE: _os << "IOS_CORE"; break;
case airtaudio::api::WINDOWS_ASIO: _os << "WINDOWS_ASIO"; break;
case airtaudio::api::WINDOWS_DS: _os << "WINDOWS_DS"; break;
case airtaudio::api::RTAUDIO_DUMMY: _os << "RTAUDIO_DUMMY"; break;
case airtaudio::api::ANDROID_JAVA: _os << "ANDROID_JAVA"; break;
case airtaudio::api::USER_INTERFACE_1: _os << "USER_INTERFACE_1"; break;
case airtaudio::api::USER_INTERFACE_2: _os << "USER_INTERFACE_2"; break;
case airtaudio::api::USER_INTERFACE_3: _os << "USER_INTERFACE_3"; break;
case airtaudio::api::USER_INTERFACE_4: _os << "USER_INTERFACE_4"; break;
}
return _os;
}
// Static variable definitions.
const uint32_t airtaudio::api::MAX_SAMPLE_RATES = 14;
const uint32_t airtaudio::api::SAMPLE_RATES[] = {
4000,
5512,
8000,
9600,
11025,
16000,
22050,
32000,
44100,
48000,
88200,
96000,
176400,
192000
};
airtaudio::Api::Api() {
m_stream.state = airtaudio::api::STREAM_CLOSED;
m_stream.mode = airtaudio::api::UNINITIALIZED;
m_stream.apiHandle = 0;
m_stream.userBuffer[0] = 0;
m_stream.userBuffer[1] = 0;
}
airtaudio::Api::~Api() {
}
enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters *oParams,
airtaudio::StreamParameters *iParams,
airtaudio::format format,
uint32_t sampleRate,
uint32_t *bufferFrames,
airtaudio::AirTAudioCallback callback,
void *userData,
airtaudio::StreamOptions *options) {
if (m_stream.state != airtaudio::api::STREAM_CLOSED) {
ATA_ERROR("airtaudio::Api::openStream: a stream is already open!");
return airtaudio::errorInvalidUse;
}
if (oParams && oParams->nChannels < 1) {
ATA_ERROR("airtaudio::Api::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.");
return airtaudio::errorInvalidUse;
}
if (iParams && iParams->nChannels < 1) {
ATA_ERROR("airtaudio::Api::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.");
return airtaudio::errorInvalidUse;
}
if (oParams == NULL && iParams == NULL) {
ATA_ERROR("airtaudio::Api::openStream: input and output StreamParameters structures are both NULL!");
return airtaudio::errorInvalidUse;
}
if (formatBytes(format) == 0) {
ATA_ERROR("airtaudio::Api::openStream: 'format' parameter value is undefined.");
return airtaudio::errorInvalidUse;
}
uint32_t nDevices = getDeviceCount();
uint32_t oChannels = 0;
if (oParams) {
oChannels = oParams->nChannels;
if (oParams->deviceId >= nDevices) {
ATA_ERROR("airtaudio::Api::openStream: output device parameter value is invalid.");
return airtaudio::errorInvalidUse;
}
}
uint32_t iChannels = 0;
if (iParams) {
iChannels = iParams->nChannels;
if (iParams->deviceId >= nDevices) {
ATA_ERROR("airtaudio::Api::openStream: input device parameter value is invalid.");
return airtaudio::errorInvalidUse;
}
}
clearStreamInfo();
bool result;
if (oChannels > 0) {
result = probeDeviceOpen(oParams->deviceId,
airtaudio::api::OUTPUT,
oChannels,
oParams->firstChannel,
sampleRate,
format,
bufferFrames,
options);
if (result == false) {
ATA_ERROR("system ERROR");
return airtaudio::errorSystemError;
}
}
if (iChannels > 0) {
result = probeDeviceOpen(iParams->deviceId,
airtaudio::api::INPUT,
iChannels,
iParams->firstChannel,
sampleRate,
format,
bufferFrames,
options);
if (result == false) {
if (oChannels > 0) {
closeStream();
}
ATA_ERROR("system error");
return airtaudio::errorSystemError;
}
}
m_stream.callbackInfo.callback = (void *) callback;
m_stream.callbackInfo.userData = userData;
if (options != NULL) {
options->numberOfBuffers = m_stream.nBuffers;
}
m_stream.state = airtaudio::api::STREAM_STOPPED;
return airtaudio::errorNone;
}
uint32_t airtaudio::Api::getDefaultInputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
uint32_t airtaudio::Api::getDefaultOutputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
enum airtaudio::errorType airtaudio::Api::closeStream() {
// MUST be implemented in subclasses!
return airtaudio::errorNone;
}
bool airtaudio::Api::probeDeviceOpen(uint32_t /*device*/,
airtaudio::api::StreamMode /*mode*/,
uint32_t /*channels*/,
uint32_t /*firstChannel*/,
uint32_t /*sampleRate*/,
airtaudio::format /*format*/,
uint32_t * /*bufferSize*/,
airtaudio::StreamOptions * /*options*/) {
// MUST be implemented in subclasses!
return false;
}
void airtaudio::Api::tickStreamTime() {
// Subclasses that do not provide their own implementation of
// getStreamTime should call this function once per buffer I/O to
// provide basic stream time support.
m_stream.streamTime += (m_stream.bufferSize * 1.0 / m_stream.sampleRate);
#if defined(HAVE_GETTIMEOFDAY)
gettimeofday(&m_stream.lastTickTimestamp, NULL);
#endif
}
long airtaudio::Api::getStreamLatency() {
if (verifyStream() != airtaudio::errorNone) {
return 0;
}
long totalLatency = 0;
if ( m_stream.mode == airtaudio::api::OUTPUT
|| m_stream.mode == airtaudio::api::DUPLEX) {
totalLatency = m_stream.latency[0];
}
if ( m_stream.mode == airtaudio::api::INPUT
|| m_stream.mode == airtaudio::api::DUPLEX) {
totalLatency += m_stream.latency[1];
}
return totalLatency;
}
double airtaudio::Api::getStreamTime() {
if (verifyStream() != airtaudio::errorNone) {
return 0.0f;
}
#if defined(HAVE_GETTIMEOFDAY)
// Return a very accurate estimate of the stream time by
// adding in the elapsed time since the last tick.
struct timeval then;
struct timeval now;
if (m_stream.state != airtaudio::api::STREAM_RUNNING || m_stream.streamTime == 0.0) {
return m_stream.streamTime;
}
gettimeofday(&now, NULL);
then = m_stream.lastTickTimestamp;
return m_stream.streamTime
+ ((now.tv_sec + 0.000001 * now.tv_usec)
- (then.tv_sec + 0.000001 * then.tv_usec));
#else
return m_stream.streamTime;
#endif
}
uint32_t airtaudio::Api::getStreamSampleRate() {
if (verifyStream() != airtaudio::errorNone) {
return 0;
}
return m_stream.sampleRate;
}
enum airtaudio::errorType airtaudio::Api::verifyStream() {
if (m_stream.state == airtaudio::api::STREAM_CLOSED) {
ATA_ERROR("airtaudio::Api:: a stream is not open!");
return airtaudio::errorInvalidUse;
}
return airtaudio::errorNone;
}
void airtaudio::Api::clearStreamInfo() {
m_stream.mode = airtaudio::api::UNINITIALIZED;
m_stream.state = airtaudio::api::STREAM_CLOSED;
m_stream.sampleRate = 0;
m_stream.bufferSize = 0;
m_stream.nBuffers = 0;
m_stream.userFormat = 0;
m_stream.userInterleaved = true;
m_stream.streamTime = 0.0;
m_stream.apiHandle = 0;
m_stream.deviceBuffer = 0;
m_stream.callbackInfo.callback = 0;
m_stream.callbackInfo.userData = 0;
m_stream.callbackInfo.isRunning = false;
for (int32_t iii=0; iii<2; ++iii) {
m_stream.device[iii] = 11111;
m_stream.doConvertBuffer[iii] = false;
m_stream.deviceInterleaved[iii] = true;
m_stream.doByteSwap[iii] = false;
m_stream.nUserChannels[iii] = 0;
m_stream.nDeviceChannels[iii] = 0;
m_stream.channelOffset[iii] = 0;
m_stream.deviceFormat[iii] = 0;
m_stream.latency[iii] = 0;
m_stream.userBuffer[iii] = 0;
m_stream.convertInfo[iii].channels = 0;
m_stream.convertInfo[iii].inJump = 0;
m_stream.convertInfo[iii].outJump = 0;
m_stream.convertInfo[iii].inFormat = 0;
m_stream.convertInfo[iii].outFormat = 0;
m_stream.convertInfo[iii].inOffset.clear();
m_stream.convertInfo[iii].outOffset.clear();
}
}
uint32_t airtaudio::Api::formatBytes(airtaudio::format _format)
{
if (_format == airtaudio::SINT16) {
return 2;
} else if ( _format == airtaudio::SINT32
|| _format == airtaudio::FLOAT32) {
return 4;
} else if (_format == airtaudio::FLOAT64) {
return 8;
} else if (_format == airtaudio::SINT24) {
return 3;
} else if (_format == airtaudio::SINT8) {
return 1;
}
ATA_ERROR("airtaudio::Api::formatBytes: undefined format.");
// TODO : airtaudio::errorWarning;
return 0;
}
void airtaudio::Api::setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel) {
if (_mode == airtaudio::api::INPUT) { // convert device to user buffer
m_stream.convertInfo[_mode].inJump = m_stream.nDeviceChannels[1];
m_stream.convertInfo[_mode].outJump = m_stream.nUserChannels[1];
m_stream.convertInfo[_mode].inFormat = m_stream.deviceFormat[1];
m_stream.convertInfo[_mode].outFormat = m_stream.userFormat;
} else { // convert user to device buffer
m_stream.convertInfo[_mode].inJump = m_stream.nUserChannels[0];
m_stream.convertInfo[_mode].outJump = m_stream.nDeviceChannels[0];
m_stream.convertInfo[_mode].inFormat = m_stream.userFormat;
m_stream.convertInfo[_mode].outFormat = m_stream.deviceFormat[0];
}
if (m_stream.convertInfo[_mode].inJump < m_stream.convertInfo[_mode].outJump) {
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].inJump;
} else {
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].outJump;
}
// Set up the interleave/deinterleave offsets.
if (m_stream.deviceInterleaved[_mode] != m_stream.userInterleaved) {
if ( ( _mode == airtaudio::api::OUTPUT
&& m_stream.deviceInterleaved[_mode])
|| ( _mode == airtaudio::api::INPUT
&& m_stream.userInterleaved)) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
m_stream.convertInfo[_mode].inJump = 1;
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].outJump = 1;
}
}
} else { // no (de)interleaving
if (m_stream.userInterleaved) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].inJump = 1;
m_stream.convertInfo[_mode].outJump = 1;
}
}
}
// Add channel offset.
if (_firstChannel > 0) {
if (m_stream.deviceInterleaved[_mode]) {
if (_mode == airtaudio::api::OUTPUT) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].outOffset[kkk] += _firstChannel;
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset[kkk] += _firstChannel;
}
}
} else {
if (_mode == airtaudio::api::OUTPUT) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].outOffset[kkk] += (_firstChannel * m_stream.bufferSize);
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset[kkk] += (_firstChannel * m_stream.bufferSize);
}
}
}
}
}
void airtaudio::Api::convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo &_info) {
// This function does format conversion, input/output channel compensation, and
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
// the lower three bytes of a 32-bit integer.
// Clear our device buffer when in/out duplex device channels are different
if ( _outBuffer == m_stream.deviceBuffer
&& m_stream.mode == airtaudio::api::DUPLEX
&& m_stream.nDeviceChannels[0] < m_stream.nDeviceChannels[1]) {
memset(_outBuffer, 0, m_stream.bufferSize * _info.outJump * formatBytes(_info.outFormat));
}
int32_t jjj;
if (_info.outFormat == airtaudio::FLOAT64) {
double scale;
double *out = (double *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
scale = 1.0 / 127.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
scale = 1.0 / 32767.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
scale = 1.0 / 8388607.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) (in[_info.inOffset[jjj]].asInt());
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
scale = 1.0 / 2147483647.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
// Channel compensation and/or (de)interleaving only.
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::FLOAT32) {
float scale;
float *out = (float *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
scale = (float) (1.0 / 127.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
scale = (float) (1.0 / 32767.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
scale = (float) (1.0 / 8388607.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) (in[_info.inOffset[jjj]].asInt());
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
scale = (float) (1.0 / 2147483647.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
// Channel compensation and/or (de)interleaving only.
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT32) {
int32_t *out = (int32_t *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] <<= 24;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] <<= 16;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]].asInt();
out[_info.outOffset[jjj]] <<= 8;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
// Channel compensation and/or (de)interleaving only.
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 2147483647.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 2147483647.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT24) {
int24_t *out = (int24_t *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] << 16);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] << 8);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
// Channel compensation and/or (de)interleaving only.
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] >> 8);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 8388607.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 8388607.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT16) {
int16_t *out = (int16_t *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] <<= 8;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
// Channel compensation and/or (de)interleaving only.
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]].asInt() >> 8);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) ((in[_info.inOffset[jjj]] >> 16) & 0x0000ffff);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]] * 32767.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]] * 32767.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT8) {
signed char *out = (signed char *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
// Channel compensation and/or (de)interleaving only.
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) ((in[_info.inOffset[jjj]] >> 8) & 0x00ff);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]].asInt() >> 16);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) ((in[_info.inOffset[jjj]] >> 24) & 0x000000ff);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]] * 127.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]] * 127.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
}
void airtaudio::Api::byteSwapBuffer(char *_buffer, uint32_t _samples, airtaudio::format _format) {
register char val;
register char *ptr;
ptr = _buffer;
if (_format == airtaudio::SINT16) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 2nd bytes.
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 2 bytes.
ptr += 2;
}
} else if ( _format == airtaudio::SINT32
|| _format == airtaudio::FLOAT32) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 4th bytes.
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 2nd and 3rd bytes.
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 3 more bytes.
ptr += 3;
}
} else if (_format == airtaudio::SINT24) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 3rd bytes.
val = *(ptr);
*(ptr) = *(ptr+2);
*(ptr+2) = val;
// Increment 2 more bytes.
ptr += 2;
}
} else if (_format == airtaudio::FLOAT64) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 8th bytes
val = *(ptr);
*(ptr) = *(ptr+7);
*(ptr+7) = val;
// Swap 2nd and 7th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+5);
*(ptr+5) = val;
// Swap 3rd and 6th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 4th and 5th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 5 more bytes.
ptr += 5;
}
}
}

View File

@@ -1,196 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_API_H__
#define __AIRTAUDIO_API_H__
#include <sstream>
#include <airtaudio/debug.h>
namespace airtaudio {
namespace api {
/**
* @brief Audio API specifier arguments.
*/
enum type {
UNSPECIFIED, //!< Search for a working compiled API.
LINUX_ALSA, //!< The Advanced Linux Sound Architecture API.
LINUX_PULSE, //!< The Linux PulseAudio API.
LINUX_OSS, //!< The Linux Open Sound System API.
UNIX_JACK, //!< The Jack Low-Latency Audio Server API.
MACOSX_CORE, //!< Macintosh OS-X Core Audio API.
IOS_CORE, //!< Macintosh OS-X Core Audio API.
WINDOWS_ASIO, //!< The Steinberg Audio Stream I/O API.
WINDOWS_DS, //!< The Microsoft Direct Sound API.
RTAUDIO_DUMMY, //!< A compilable but non-functional API.
ANDROID_JAVA, //!< Android Interface.
USER_INTERFACE_1, //!< User interface 1.
USER_INTERFACE_2, //!< User interface 2.
USER_INTERFACE_3, //!< User interface 3.
USER_INTERFACE_4, //!< User interface 4.
};
extern const uint32_t MAX_SAMPLE_RATES;
extern const uint32_t SAMPLE_RATES[];
enum StreamState {
STREAM_STOPPED,
STREAM_STOPPING,
STREAM_RUNNING,
STREAM_CLOSED = -50
};
enum StreamMode {
OUTPUT,
INPUT,
DUPLEX,
UNINITIALIZED = -75
};
// A protected structure used for buffer conversion.
struct ConvertInfo {
int32_t channels;
int32_t inJump, outJump;
airtaudio::format inFormat, outFormat;
std::vector<int> inOffset;
std::vector<int> outOffset;
};
// A protected structure for audio streams.
class Stream {
public:
uint32_t device[2]; // Playback and record, respectively.
void *apiHandle; // void pointer for API specific stream handle information
airtaudio::api::StreamMode mode; // OUTPUT, INPUT, or DUPLEX.
airtaudio::api::StreamState state; // STOPPED, RUNNING, or CLOSED
char *userBuffer[2]; // Playback and record, respectively.
char *deviceBuffer;
bool doConvertBuffer[2]; // Playback and record, respectively.
bool userInterleaved;
bool deviceInterleaved[2]; // Playback and record, respectively.
bool doByteSwap[2]; // Playback and record, respectively.
uint32_t sampleRate;
uint32_t bufferSize;
uint32_t nBuffers;
uint32_t nUserChannels[2]; // Playback and record, respectively.
uint32_t nDeviceChannels[2]; // Playback and record channels, respectively.
uint32_t channelOffset[2]; // Playback and record, respectively.
uint64_t latency[2]; // Playback and record, respectively.
airtaudio::format userFormat;
airtaudio::format deviceFormat[2]; // Playback and record, respectively.
std::mutex mutex;
airtaudio::CallbackInfo callbackInfo;
airtaudio::api::ConvertInfo convertInfo[2];
double streamTime; // Number of elapsed seconds since the stream started.
#if defined(HAVE_GETTIMEOFDAY)
struct timeval lastTickTimestamp;
#endif
Stream() :
apiHandle(0),
deviceBuffer(0) {
device[0] = 11111;
device[1] = 11111;
}
};
};
/**
* RtApi class declaration.
*
* Subclasses of RtApi contain all API- and OS-specific code necessary
* to fully implement the RtAudio API.
*
* Note that RtApi is an abstract base class and cannot be
* explicitly instantiated. The class RtAudio will create an
* instance of an RtApi subclass (RtApiOss, RtApiAlsa,
* RtApiJack, RtApiCore, RtApiDs, or RtApiAsio).
*/
class Api {
public:
Api();
virtual ~Api();
virtual airtaudio::api::type getCurrentApi() = 0;
virtual uint32_t getDeviceCount() = 0;
virtual airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
virtual uint32_t getDefaultInputDevice();
virtual uint32_t getDefaultOutputDevice();
enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters,
airtaudio::StreamParameters *_inputParameters,
airtaudio::format _format,
uint32_t _sampleRate,
uint32_t *_bufferFrames,
airtaudio::AirTAudioCallback _callback,
void *_userData,
airtaudio::StreamOptions *_options);
virtual enum airtaudio::errorType closeStream();
virtual enum airtaudio::errorType startStream() = 0;
virtual enum airtaudio::errorType stopStream() = 0;
virtual enum airtaudio::errorType abortStream() = 0;
long getStreamLatency();
uint32_t getStreamSampleRate();
virtual double getStreamTime();
bool isStreamOpen() const {
return m_stream.state != airtaudio::api::STREAM_CLOSED;
}
bool isStreamRunning() const {
return m_stream.state == airtaudio::api::STREAM_RUNNING;
}
protected:
airtaudio::api::Stream m_stream;
/*!
Protected, api-specific method that attempts to open a device
with the given parameters. This function MUST be implemented by
all subclasses. If an error is encountered during the probe, a
"warning" message is reported and false is returned. A
successful probe is indicated by a return value of true.
*/
virtual bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
//! A protected function used to increment the stream time.
void tickStreamTime();
//! Protected common method to clear an RtApiStream structure.
void clearStreamInfo();
/*!
Protected common method that throws an RtError (type =
INVALID_USE) if a stream is not open.
*/
enum airtaudio::errorType verifyStream();
/**
* @brief Protected method used to perform format, channel number, and/or interleaving
* conversions between the user and device buffers.
*/
void convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo& _info);
//! Protected common method used to perform byte-swapping on buffers.
void byteSwapBuffer(char *_buffer, uint32_t _samples, airtaudio::format _format);
//! Protected common method that returns the number of bytes for a given format.
uint32_t formatBytes(airtaudio::format _format);
//! Protected common method that sets up the parameters for buffer conversion.
void setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel);
};
};
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
std::ostream& operator <<(std::ostream& _os, const airtaudio::api::type& _obj);
#endif

View File

@@ -1,44 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_CALLBACK_INFO_H__
#define __AIRTAUDIO_CALLBACK_INFO_H__
#include <thread>
namespace airtaudio {
// This global structure type is used to pass callback information
// between the private RtAudio stream structure and global callback
// handling functions.
class CallbackInfo {
public:
void* object; // Used as a "this" pointer.
std::thread* thread;
void* callback;
void* userData;
void* apiInfo; // void pointer for API specific callback information
bool isRunning;
bool doRealtime;
int32_t priority;
// Default constructor.
CallbackInfo() :
object(0),
callback(0),
userData(0),
apiInfo(0),
isRunning(false),
doRealtime(false) {
}
};
};
#endif

View File

@@ -1,40 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_DEVICE_INFO_H__
#define __AIRTAUDIO_DEVICE_INFO_H__
namespace airtaudio {
/**
* @brief The public device information structure for returning queried values.
*/
class DeviceInfo {
public:
bool probed; //!< true if the device capabilities were successfully probed.
std::string name; //!< Character string device identifier.
uint32_t outputChannels; //!< Maximum output channels supported by device.
uint32_t inputChannels; //!< Maximum input channels supported by device.
uint32_t duplexChannels; //!< Maximum simultaneous input/output channels supported by device.
bool isDefaultOutput; //!< true if this is the default output device.
bool isDefaultInput; //!< true if this is the default input device.
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
airtaudio::format nativeFormats; //!< Bit mask of supported data formats.
// Default constructor.
DeviceInfo() :
probed(false),
outputChannels(0),
inputChannels(0),
duplexChannels(0),
isDefaultOutput(false),
isDefaultInput(false),
nativeFormats(0) {}
};
};
#endif

View File

@@ -1,158 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
//#include <etk/types.h>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <iostream>
std::vector<airtaudio::api::type> airtaudio::Interface::getCompiledApi() {
std::vector<airtaudio::api::type> apis;
// The order here will control the order of RtAudio's API search in
// the constructor.
for (auto &it : m_apiAvaillable) {
apis.push_back(it.first);
}
return apis;
}
void airtaudio::Interface::openRtApi(airtaudio::api::type _api) {
if (m_rtapi != NULL) {
delete m_rtapi;
m_rtapi = NULL;
}
for (auto &it :m_apiAvaillable) {
ATA_ERROR("try open " << it.first);
if (_api == it.first) {
ATA_ERROR(" ==> call it");
m_rtapi = it.second();
if (m_rtapi != NULL) {
return;
}
}
}
// TODO : An eror occured ...
ATA_ERROR("Error in open API ...");
}
airtaudio::Interface::Interface() :
m_rtapi(NULL) {
#if defined(__UNIX_JACK__)
addInterface(airtaudio::api::UNIX_JACK, airtaudio::api::Jack::Create);
#endif
#if defined(__LINUX_ALSA__)
addInterface(airtaudio::api::LINUX_ALSA, airtaudio::api::Alsa::Create);
#endif
#if defined(__LINUX_PULSE__)
addInterface(airtaudio::api::LINUX_PULSE, airtaudio::api::Pulse::Create);
#endif
#if defined(__LINUX_OSS__)
addInterface(airtaudio::api::LINUX_OSS, airtaudio::api::Oss::Create);
#endif
#if defined(__WINDOWS_ASIO__)
addInterface(airtaudio::api::WINDOWS_ASIO, airtaudio::api::Asio::Create);
#endif
#if defined(__WINDOWS_DS__)
addInterface(airtaudio::api::WINDOWS_DS, airtaudio::api::Ds::Create);
#endif
#if defined(__MACOSX_CORE__)
addInterface(airtaudio::api::MACOSX_CORE, airtaudio::api::Core::Create);
#endif
#if defined(__IOS_CORE__)
addInterface(airtaudio::api::IOS_CORE, airtaudio::api::CoreIos::Create);
#endif
#if defined(__ANDROID_JAVA__)
addInterface(airtaudio::api::ANDROID_JAVA, airtaudio::api::Android::Create);
#endif
#if defined(__AIRTAUDIO_DUMMY__)
addInterface(airtaudio::api::RTAUDIO_DUMMY, airtaudio::api::Dummy::Create);
#endif
}
void airtaudio::Interface::addInterface(airtaudio::api::type _api, Api* (*_callbackCreate)()) {
m_apiAvaillable.push_back(std::pair<airtaudio::api::type, Api* (*)()>(_api, _callbackCreate));
}
enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type _api) {
ATA_INFO("Instanciate API ...");
if (m_rtapi != NULL) {
ATA_WARNING("Interface already started ...!");
return airtaudio::errorNone;
}
if (_api != airtaudio::api::UNSPECIFIED) {
ATA_ERROR("API specified ...");
// Attempt to open the specified API.
openRtApi(_api);
if (m_rtapi != NULL) {
return airtaudio::errorNone;
}
// No compiled support for specified API value. Issue a debug
// warning and continue as if no API was specified.
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
return airtaudio::errorFail;
}
ATA_INFO("Auto choice API :");
// Iterate through the compiled APIs and return as soon as we find
// one with at least one device or we reach the end of the list.
std::vector<airtaudio::api::type> apis = getCompiledApi();
ATA_INFO(" find : " << apis.size() << " apis.");
for (auto &it : apis) {
ATA_INFO("try open ...");
openRtApi(it);
if(m_rtapi == NULL) {
ATA_ERROR(" ==> can not create ...");
continue;
}
if (m_rtapi->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
break;
}
}
if (m_rtapi != NULL) {
return airtaudio::errorNone;
}
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
return airtaudio::errorFail;
}
airtaudio::Interface::~Interface() {
ATA_INFO("Remove interface");
if (m_rtapi != NULL) {
delete m_rtapi;
m_rtapi = NULL;
}
}
enum airtaudio::errorType airtaudio::Interface::openStream(
airtaudio::StreamParameters* _outputParameters,
airtaudio::StreamParameters* _inputParameters,
airtaudio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
airtaudio::AirTAudioCallback _callback,
void* _userData,
airtaudio::StreamOptions* _options) {
if (m_rtapi == NULL) {
return airtaudio::errorInputNull;
}
return m_rtapi->openStream(_outputParameters,
_inputParameters,
_format,
_sampleRate,
_bufferFrames,
_callback,
_userData,
_options);
}

View File

@@ -1,312 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_RTAUDIO_H__
#define __AIRTAUDIO_RTAUDIO_H__
#include <string>
#include <vector>
#include <airtaudio/base.h>
#include <airtaudio/int24_t.h>
#include <airtaudio/CallbackInfo.h>
#include <airtaudio/Api.h>
#include <airtaudio/api/Alsa.h>
#include <airtaudio/api/Android.h>
#include <airtaudio/api/Asio.h>
#include <airtaudio/api/Core.h>
#include <airtaudio/api/CoreIos.h>
#include <airtaudio/api/Ds.h>
#include <airtaudio/api/Dummy.h>
#include <airtaudio/api/Jack.h>
#include <airtaudio/api/Oss.h>
#include <airtaudio/api/Pulse.h>
namespace airtaudio {
/**
* @brief airtaudio::Interface class declaration.
*
* airtaudio::Interface is a "controller" used to select an available audio i/o
* interface. It presents a common API for the user to call but all
* functionality is implemented by the class RtApi and its
* subclasses. RtAudio creates an instance of an RtApi subclass
* based on the user's API choice. If no choice is made, RtAudio
* attempts to make a "logical" API selection.
*/
class Interface {
protected:
std::vector<std::pair<airtaudio::api::type, Api* (*)()>> m_apiAvaillable;
protected:
airtaudio::Api *m_rtapi;
public:
/**
* @brief A static function to determine the current airtaudio version.
*/
static std::string getVersion() {
return airtaudio::VERSION;
}
/**
* @brief A static function to determine the available compiled audio APIs.
*
* The values returned in the std::vector can be compared against
* the enumerated list values. Note that there can be more than one
* API compiled for certain operating systems.
*/
std::vector<airtaudio::api::type> getCompiledApi();
/**
* @brief The class constructor.
* @note the creating of the basic instance is done by Instanciate
*/
Interface();
/**
* @brief The destructor.
*
* If a stream is running or open, it will be stopped and closed
* automatically.
*/
virtual ~Interface();
/**
* @brief Add an interface of the Possible List.
* @param[in] _api Type of the interface.
* @param[in] _callbackCreate API creation callback.
*/
void addInterface(airtaudio::api::type _api, Api* (*_callbackCreate)());
/**
* @brief Create an interface instance
*/
enum airtaudio::errorType instanciate(airtaudio::api::type _api = airtaudio::api::UNSPECIFIED);
/**
* @return the audio API specifier for the current instance of airtaudio.
*/
airtaudio::api::type getCurrentApi() {
if (m_rtapi == NULL) {
return airtaudio::api::UNSPECIFIED;
}
return m_rtapi->getCurrentApi();
}
/**
* @brief A public function that queries for the number of audio devices available.
*
* This function performs a system query of available devices each time it
* is called, thus supporting devices connected \e after instantiation. If
* a system error occurs during processing, a warning will be issued.
*/
uint32_t getDeviceCount() {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getDeviceCount();
}
/**
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
* If an invalid argument is provided, an RtError (type = INVALID_USE)
* will be thrown. If a device is busy or otherwise unavailable, the
* structure member "probed" will have a value of "false" and all
* other members are undefined. If the specified device is the
* current default input or output device, the corresponding
* "isDefault" member will have a value of "true".
*
* @return An airtaudio::DeviceInfo structure for a specified device number.
*/
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) {
if (m_rtapi == NULL) {
return airtaudio::DeviceInfo();
}
return m_rtapi->getDeviceInfo(_device);
}
/**
* @brief A function that returns the index of the default output device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultOutputDevice() {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getDefaultOutputDevice();
}
/**
* @brief A function that returns the index of the default input device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultInputDevice() {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getDefaultInputDevice();
}
/**
* @brief A public function for opening a stream with the specified parameters.
*
* An RtError (type = SYSTEM_ERROR) is thrown if a stream cannot be
* opened with the specified parameters or an error occurs during
* processing. An RtError (type = INVALID_USE) is thrown if any
* invalid device ID or channel number parameters are specified.
* @param _outputParameters Specifies output stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For input-only streams, this
* argument should be NULL. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _inputParameters Specifies input stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For output-only streams, this
* argument should be NULL. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _format An airtaudio::format specifying the desired sample data format.
* @param _sampleRate The desired sample rate (sample frames per second).
* @param *_bufferFrames A pointer to a value indicating the desired
* internal buffer size in sample frames. The actual value
* used by the device is returned via the same pointer. A
* value of zero can be specified, in which case the lowest
* allowable value is determined.
* @param _callback A client-defined function that will be invoked
* when input data is available and/or output data is needed.
* @param _userData An optional pointer to data that can be accessed
* from within the callback function.
* @param _options An optional pointer to a structure containing various
* global stream options, including a list of OR'ed airtaudio::streamFlags
* and a suggested number of stream buffers that can be used to
* control stream latency. More buffers typically result in more
* robust performance, though at a cost of greater latency. If a
* value of zero is specified, a system-specific median value is
* chosen. If the airtaudio_MINIMIZE_LATENCY flag bit is set, the
* lowest allowable value is used. The actual value used is
* returned via the structure argument. The parameter is API dependent.
* @param _errorCallback A client-defined function that will be invoked
* when an error has occured.
*/
enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters,
airtaudio::StreamParameters *_inputParameters,
airtaudio::format _format,
uint32_t _sampleRate,
uint32_t *_bufferFrames,
airtaudio::AirTAudioCallback _callback,
void *_userData = NULL,
airtaudio::StreamOptions *_options = NULL);
/**
* @brief A function that closes a stream and frees any associated stream memory.
*
* If a stream is not open, this function issues a warning and
* returns (no exception is thrown).
*/
enum airtaudio::errorType closeStream() {
if (m_rtapi == NULL) {
return airtaudio::errorInputNull;
}
return m_rtapi->closeStream();
}
/**
* @brief A function that starts a stream.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* running.
*/
enum airtaudio::errorType startStream() {
if (m_rtapi == NULL) {
return airtaudio::errorInputNull;
}
return m_rtapi->startStream();
}
/**
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
enum airtaudio::errorType stopStream() {
if (m_rtapi == NULL) {
return airtaudio::errorInputNull;
}
return m_rtapi->stopStream();
}
/**
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
enum airtaudio::errorType abortStream() {
if (m_rtapi == NULL) {
return airtaudio::errorInputNull;
}
return m_rtapi->abortStream();
}
/**
* @return true if a stream is open and false if not.
*/
bool isStreamOpen() const {
if (m_rtapi == NULL) {
return false;
}
return m_rtapi->isStreamOpen();
}
/**
* @return true if the stream is running and false if it is stopped or not open.
*/
bool isStreamRunning() const {
if (m_rtapi == NULL) {
return false;
}
return m_rtapi->isStreamRunning();
}
/**
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
* @return the number of elapsed seconds since the stream was started.
*/
double getStreamTime() {
if (m_rtapi == NULL) {
return 0.0;
}
return m_rtapi->getStreamTime();
}
/**
* @brief The stream latency refers to delay in audio input and/or output
* caused by internal buffering by the audio system and/or hardware.
* For duplex streams, the returned value will represent the sum of
* the input and output latencies. If a stream is not open, an
* RtError (type = INVALID_USE) will be thrown. If the API does not
* report latency, the return value will be zero.
* @return The internal stream latency in sample frames.
*/
long getStreamLatency() {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getStreamLatency();
}
/**
* @brief On some systems, the sample rate used may be slightly different
* than that specified in the stream parameters. If a stream is not
* open, an RtError (type = INVALID_USE) will be thrown.
* @return Returns actual sample rate in use by the stream.
*/
uint32_t getStreamSampleRate() {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getStreamSampleRate();
}
protected:
void openRtApi(airtaudio::api::type _api);
};
};
#endif

View File

@@ -1,87 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_STREAM_OPTION_H__
#define __AIRTAUDIO_STREAM_OPTION_H__
namespace airtaudio {
/**
* @brief The structure for specifying stream options.
*
* The following flags can be OR'ed together to allow a client to
* make changes to the default stream behavior:
*
* - \e RTAUDIO_NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
* - \e RTAUDIO_MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
* - \e RTAUDIO_HOG_DEVICE: Attempt grab device for exclusive use.
* - \e RTAUDIO_SCHEDULE_REALTIME: Attempt to select realtime scheduling for callback thread.
* - \e RTAUDIO_ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
*
* By default, RtAudio streams pass and receive audio data from the
* client in an interleaved format. By passing the
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
* data will instead be presented in non-interleaved buffers. In
* this case, each buffer argument in the RtAudioCallback function
* will point to a single array of data, with \c nFrames samples for
* each channel concatenated back-to-back. For example, the first
* sample of data for the second channel would be located at index \c
* nFrames (assuming the \c buffer pointer was recast to the correct
* data type for the stream).
*
* Certain audio APIs offer a number of parameters that influence the
* I/O latency of a stream. By default, RtAudio will attempt to set
* these parameters internally for robust (glitch-free) performance
* (though some APIs, like Windows Direct Sound, make this difficult).
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
* function, internal stream settings will be influenced in an attempt
* to minimize stream latency, though possibly at the expense of stream
* performance.
*
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
* open the input and/or output stream device(s) for exclusive use.
* Note that this is not possible with all supported audio APIs.
*
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
* to select realtime scheduling (round-robin) for the callback thread.
* The \c priority parameter will only be used if the RTAUDIO_SCHEDULE_REALTIME
* flag is set. It defines the thread's realtime priority.
*
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
* open the "default" PCM device when using the ALSA API. Note that this
* will override any specified input or output device id.
*
* The \c numberOfBuffers parameter can be used to control stream
* latency in the Windows DirectSound, Linux OSS, and Linux Alsa APIs
* only. A value of two is usually the smallest allowed. Larger
* numbers can potentially result in more robust stream performance,
* though likely at the cost of stream latency. The value set by the
* user is replaced during execution of the RtAudio::openStream()
* function by the value actually used by the system.
*
* The \c streamName parameter can be used to set the client name
* when using the Jack API. By default, the client name is set to
* RtApiJack. However, if you wish to create multiple instances of
* RtAudio with Jack, each instance must have a unique client name.
*/
class StreamOptions {
public:
airtaudio::streamFlags flags; //!< A bit-mask of stream flags (RTAUDIO_NONINTERLEAVED, RTAUDIO_MINIMIZE_LATENCY, RTAUDIO_HOG_DEVICE, RTAUDIO_ALSA_USE_DEFAULT).
uint32_t numberOfBuffers; //!< Number of stream buffers.
std::string streamName; //!< A stream name (currently used only in Jack).
int32_t priority; //!< Scheduling priority of callback thread (only used with flag RTAUDIO_SCHEDULE_REALTIME).
// Default constructor.
StreamOptions() :
flags(0),
numberOfBuffers(0),
priority(0) {}
};
};
#endif

View File

@@ -1,30 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_STREAM_PARAMETER_H__
#define __AIRTAUDIO_STREAM_PARAMETER_H__
namespace airtaudio {
/**
* @brief The structure for specifying input or ouput stream parameters.
*/
class StreamParameters {
public:
uint32_t deviceId; //!< Device index (0 to getDeviceCount() - 1).
uint32_t nChannels; //!< Number of channels.
uint32_t firstChannel; //!< First channel index on device (default = 0).
// Default constructor.
StreamParameters() :
deviceId(0),
nChannels(0),
firstChannel(0) { }
};
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,49 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_ALSA_H__) && defined(__LINUX_ALSA__)
#define __AIRTAUDIO_API_ALSA_H__
namespace airtaudio {
namespace api {
class Alsa: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Alsa();
virtual ~Alsa();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::LINUX_ALSA;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

View File

@@ -1,221 +0,0 @@
/**
* @author Edouard DUPIN
*
* @license like MIT (see license file)
*/
#ifdef __ANDROID_JAVA__
#include <ewol/context/Context.h>
#include <unistd.h>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <limits.h>
airtaudio::Api* airtaudio::api::Android::Create() {
ATA_INFO("Create Android device ... ");
return new airtaudio::api::Android();
}
airtaudio::api::Android::Android() {
ATA_INFO("new Android");
// On android, we set a static device ...
ATA_INFO("get context");
ewol::Context& tmpContext = ewol::getContext();
ATA_INFO("done p=" << (int64_t)&tmpContext);
int32_t deviceCount = tmpContext.audioGetDeviceCount();
ATA_ERROR("Get count devices : " << deviceCount);
for (int32_t iii=0; iii<deviceCount; ++iii) {
std::string property = tmpContext.audioGetDeviceProperty(iii);
ATA_ERROR("Get devices property : " << property);
std::vector<std::string> listProperty = etk::split(property, ':');
airtaudio::DeviceInfo tmp;
tmp.name = listProperty[0];
std::vector<std::string> listFreq = etk::split(listProperty[2], ',');
for(size_t fff=0; fff<listFreq.size(); ++fff) {
tmp.sampleRates.push_back(etk::string_to_int32_t(listFreq[fff]));
}
tmp.outputChannels = 0;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
if (listProperty[1] == "out") {
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.outputChannels = etk::string_to_int32_t(listProperty[3]);
} else if (listProperty[1] == "in") {
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.inputChannels = etk::string_to_int32_t(listProperty[3]);
} else {
/* duplex */
tmp.isDefaultOutput = true;
tmp.isDefaultInput = true;
tmp.duplexChannels = etk::string_to_int32_t(listProperty[3]);
}
std::vector<std::string> listFormat = etk::split(listProperty[4], ',');
tmp.nativeFormats = 0;
for(size_t fff=0; fff<listFormat.size(); ++fff) {
if (listFormat[fff] == "float") {
tmp.nativeFormats |= FLOAT32;
} else if (listFormat[fff] == "double") {
tmp.nativeFormats |= FLOAT64;
} else if (listFormat[fff] == "s32") {
tmp.nativeFormats |= SINT32;
} else if (listFormat[fff] == "s24") {
tmp.nativeFormats |= SINT24;
} else if (listFormat[fff] == "s16") {
tmp.nativeFormats |= SINT16;
} else if (listFormat[fff] == "s8") {
tmp.nativeFormats |= SINT8;
}
}
m_devices.push_back(tmp);
}
ATA_INFO("Create Android interface (end)");
}
airtaudio::api::Android::~Android() {
ATA_INFO("Destroy Android interface");
}
uint32_t airtaudio::api::Android::getDeviceCount() {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
airtaudio::DeviceInfo airtaudio::api::Android::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum airtaudio::errorType airtaudio::api::Android::closeStream() {
ATA_INFO("Clese Stream");
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Android::startStream() {
ATA_INFO("Start Stream");
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Android::stopStream() {
ATA_INFO("Stop stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Android::abortStream() {
ATA_INFO("Abort Stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return airtaudio::errorNone;
}
void airtaudio::api::Android::callBackEvent(void* _data,
int32_t _frameRate) {
int32_t doStopStream = 0;
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
if (m_stream.doConvertBuffer[OUTPUT] == true) {
doStopStream = callback(m_stream.userBuffer[OUTPUT],
NULL,
_frameRate,
streamTime,
status,
m_stream.callbackInfo.userData);
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
} else {
doStopStream = callback(_data,
NULL,
_frameRate,
streamTime,
status,
m_stream.callbackInfo.userData);
}
if (doStopStream == 2) {
abortStream();
return;
}
airtaudio::Api::tickStreamTime();
}
void airtaudio::api::Android::androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData) {
if (_userData == NULL) {
ATA_INFO("callback event ... NULL pointer");
return;
}
airtaudio::api::Android* myClass = static_cast<airtaudio::api::Android*>(_userData);
myClass->callBackEvent(_data, _frameRate/2);
}
bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != OUTPUT) {
ATA_ERROR("Can not start a device input or duplex for Android ...");
return false;
}
m_stream.userFormat = _format;
m_stream.nUserChannels[_mode] = _channels;
ewol::Context& tmpContext = ewol::getContext();
bool ret = false;
if (_format == SINT8) {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
} else {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
}
m_stream.bufferSize = 256;
m_stream.sampleRate = _sampleRate;
m_stream.doByteSwap[_mode] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to bu update later ...
m_stream.deviceFormat[_mode] = SINT16;
m_stream.nDeviceChannels[_mode] = 2;
m_stream.deviceInterleaved[_mode] = true;
m_stream.doConvertBuffer[_mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
&& m_stream.nUserChannels[_mode] > 1) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.doConvertBuffer[_mode] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
return ret;
}
#endif

View File

@@ -1,53 +0,0 @@
/**
* @author Edouard DUPIN
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_ANDROID_H__) && defined(__ANDROID_JAVA__)
#define __AIRTAUDIO_API_ANDROID_H__
namespace airtaudio {
namespace api {
class Android: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Android();
virtual ~Android();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::ANDROID_JAVA;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
private:
void callBackEvent(void* _data,
int32_t _frameRate);
static void androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData);
};
};
};
#endif

View File

@@ -1,51 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_ASIO_H__) && defined(__WINDOWS_ASIO__)
#define __AIRTAUDIO_API_ASIO_H__
namespace airtaudio {
namespace api {
class Asio: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Asio();
virtual ~Asio();
airtaudio::api::type getCurrentApi() {
return airtaudio::WINDOWS_ASIO;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(long _bufferIndex);
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo();
bool m_coInitialized;
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,56 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_CORE_H__) && defined(__MACOSX_CORE__)
#define __AIRTAUDIO_API_CORE_H__
#include <CoreAudio/AudioHardware.h>
namespace airtaudio {
namespace api {
class Core: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Core();
virtual ~Core();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::MACOSX_CORE;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(AudioDeviceID _deviceId,
const AudioBufferList *_inBufferList,
const AudioBufferList *_outBufferList);
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
static const char* getErrorCode(OSStatus _code);
};
};
};
#endif

View File

@@ -1,53 +0,0 @@
/**
* @author Edouard DUPIN
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_CORE_IOS_H__) && defined(__IOS_CORE__)
#define __AIRTAUDIO_API_CORE_IOS_H__
namespace airtaudio {
namespace api {
class CoreIosPrivate;
class CoreIos: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
CoreIos();
virtual ~CoreIos();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::IOS_CORE;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
public:
void callBackEvent(void* _data,
int32_t _frameRate);
private:
CoreIosPrivate* m_private;
};
};
};
#endif

View File

@@ -1,307 +0,0 @@
/**
* @author Edouard DUPIN
*
* @license like MIT (see license file)
*/
#ifdef __IOS_CORE__
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#include <unistd.h>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <limits.h>
airtaudio::Api* airtaudio::api::CoreIos::Create(void) {
ATA_INFO("Create CoreIos device ... ");
return new airtaudio::api::CoreIos();
}
#define kOutputBus 0
#define kInputBus 1
namespace airtaudio {
namespace api {
class CoreIosPrivate {
public:
AudioComponentInstance audioUnit;
};
};
};
airtaudio::api::CoreIos::CoreIos(void) :
m_private(new airtaudio::api::CoreIosPrivate) {
ATA_INFO("new CoreIos");
int32_t deviceCount = 2;
ATA_ERROR("Get count devices : " << 2);
airtaudio::DeviceInfo tmp;
// Add default output format :
tmp.name = "out";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 2;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.nativeFormats = SINT16;
m_devices.push_back(tmp);
// add default input format:
tmp.name = "in";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 0;
tmp.inputChannels = 2;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.nativeFormats = SINT16;
m_devices.push_back(tmp);
ATA_INFO("Create CoreIOs interface (end)");
}
airtaudio::api::CoreIos::~CoreIos(void) {
ATA_INFO("Destroy CoreIOs interface");
AudioUnitUninitialize(m_private->audioUnit);
delete m_private;
m_private = NULL;
}
uint32_t airtaudio::api::CoreIos::getDeviceCount(void) {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
airtaudio::DeviceInfo airtaudio::api::CoreIos::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum airtaudio::errorType airtaudio::api::CoreIos::closeStream(void) {
ATA_INFO("Close Stream");
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::CoreIos::startStream(void) {
ATA_INFO("Start Stream");
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::CoreIos::stopStream(void) {
ATA_INFO("Stop stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::CoreIos::abortStream(void) {
ATA_INFO("Abort Stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return airtaudio::errorNone;
}
void airtaudio::api::CoreIos::callBackEvent(void* _data,
int32_t _frameRate) {
#if 0
static double value=0;
int16_t* vals = (int16_t*)_data;
for (int32_t iii=0; iii<_frameRate; ++iii) {
*vals++ = (int16_t)(sin(value) * 32760.0);
*vals++ = (int16_t)(sin(value) * 32760.0);
value += 0.09;
if (value >= M_PI*2.0) {
value -= M_PI*2.0;
}
}
return;
#endif
int32_t doStopStream = 0;
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
if (m_stream.doConvertBuffer[OUTPUT] == true) {
doStopStream = callback(m_stream.userBuffer[OUTPUT],
NULL,
_frameRate,
streamTime,
status,
m_stream.callbackInfo.userData);
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
} else {
doStopStream = callback(_data,
NULL,
_frameRate,
streamTime,
status,
m_stream.callbackInfo.userData);
}
if (doStopStream == 2) {
abortStream();
return;
}
airtaudio::Api::tickStreamTime();
}
static OSStatus playbackCallback(void *_userData,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
uint32_t inBusNumber,
uint32_t inNumberFrames,
AudioBufferList *ioData) {
if (_userData == NULL) {
ATA_ERROR("callback event ... NULL pointer");
return -1;
}
airtaudio::api::CoreIos* myClass = static_cast<airtaudio::api::CoreIos*>(_userData);
// get all requested buffer :
for (int32_t iii=0; iii < ioData->mNumberBuffers; iii++) {
AudioBuffer buffer = ioData->mBuffers[iii];
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << inBusNumber);
myClass->callBackEvent(buffer.mData, numberFrame);
}
return noErr;
}
bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != OUTPUT) {
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
return false;
}
bool ret = true;
// configure Airtaudio internal configuration:
m_stream.userFormat = _format;
m_stream.nUserChannels[_mode] = _channels;
m_stream.bufferSize = 8192;
m_stream.sampleRate = _sampleRate;
m_stream.doByteSwap[_mode] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to be update later ...
m_stream.deviceFormat[_mode] = SINT16;
m_stream.nDeviceChannels[_mode] = 2;
m_stream.deviceInterleaved[_mode] = true;
m_stream.doConvertBuffer[_mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
&& m_stream.nUserChannels[_mode] > 1) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.doConvertBuffer[_mode] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
// Configure IOs interface:
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not create an audio intance...");
}
uint32_t flag = 1;
// Enable IO for playback
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (status != 0) {
ATA_ERROR("can not request audio autorisation...");
}
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 48000.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1; //
audioFormat.mChannelsPerFrame = 2; // stereo
audioFormat.mBitsPerChannel = sizeof(short) * 8;
audioFormat.mBytesPerPacket = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mBytesPerFrame = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mReserved = 0;
// Apply format
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (status != 0) {
ATA_ERROR("can not set stream properties...");
}
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
if (status != 0) {
ATA_ERROR("can not set Callback...");
}
// Initialise
status = AudioUnitInitialize(m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not initialize...");
}
return ret;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,54 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_DS_H__) && defined(__WINDOWS_DS__)
#define __AIRTAUDIO_API_DS_H__
namespace airtaudio {
namespace api {
class Ds: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Ds();
virtual ~Ds();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::WINDOWS_DS;
}
uint32_t getDeviceCount();
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
bool m_coInitialized;
bool m_buffersRolling;
long m_duplexPrerollBytes;
std::vector<struct DsDevice> dsDevices;
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

View File

@@ -1,61 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__AIRTAUDIO_DUMMY__)
#include <airtaudio/api/Dummy.h>
#include <airtaudio/debug.h>
airtaudio::Api* airtaudio::api::Dummy::Create() {
return new airtaudio::api::Dummy();
}
airtaudio::api::Dummy::Dummy() {
m_errorText = "airtaudio::api::Dummy: This class provides no functionality.";
error(airtaudio::errorWarning);
}
uint32_t airtaudio::api::Dummy::getDeviceCount() {
return 0;
}
rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
()_device;
rtaudio::DeviceInfo info;
return info;
}
enum airtaudio::errorType airtaudio::api::Dummy::closeStream() {
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Dummy::startStream() {
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Dummy::stopStream() {
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Dummy::abortStream() {
return airtaudio::errorNone;
}
bool airtaudio::api::Dummy::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options) {
return false;
}
#endif

View File

@@ -1,43 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_DUMMY_H__) && defined(__AIRTAUDIO_DUMMY__)
#define __AIRTAUDIO_API_DUMMY_H__
#include <airtaudio/Interface.h>
namespace airtaudio {
namespace api {
class Dummy: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Dummy();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::RTAUDIO_DUMMY;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

View File

@@ -1,748 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__UNIX_JACK__)
#include <unistd.h>
#include <limits.h>
#include <iostream>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <string.h>
airtaudio::Api* airtaudio::api::Jack::Create() {
return new airtaudio::api::Jack();
}
// JACK is a low-latency audio server, originally written for the
// GNU/Linux operating system and now also ported to OS-X. It can
// connect a number of different applications to an audio device, as
// well as allowing them to share audio between themselves.
//
// When using JACK with RtAudio, "devices" refer to JACK clients that
// have ports connected to the server. The JACK server is typically
// started in a terminal as follows:
//
// .jackd -d alsa -d hw:0
//
// or through an interface program such as qjackctl. Many of the
// parameters normally set for a stream are fixed by the JACK server
// and can be specified when the JACK server is started. In
// particular,
//
// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
//
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
// frames, and number of buffers = 4. Once the server is running, it
// is not possible to override these values. If the values are not
// specified in the command-line, the JACK server uses default values.
//
// The JACK server does not have to be running when an instance of
// RtApiJack is created, though the function getDeviceCount() will
// report 0 devices found until JACK has been started. When no
// devices are available (i.e., the JACK server is not running), a
// stream cannot be opened.
#include <jack/jack.h>
#include <unistd.h>
#include <cstdio>
// A structure to hold various information related to the Jack API
// implementation.
struct JackHandle {
jack_client_t *client;
jack_port_t **ports[2];
std::string deviceName[2];
bool xrun[2];
std::condition_variable condition;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
JackHandle() :
client(0),
drainCounter(0),
internalDrain(false) {
ports[0] = 0;
ports[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
airtaudio::api::Jack::Jack() {
// Nothing to do here.
}
airtaudio::api::Jack::~Jack() {
if (m_stream.state != STREAM_CLOSED) {
closeStream();
}
}
uint32_t airtaudio::api::Jack::getDeviceCount() {
// See if we can become a jack client.
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = NULL;
jack_client_t *client = jack_client_open("RtApiJackCount", options, status);
if (client == NULL) {
return 0;
}
const char **ports;
std::string port, previousPort;
uint32_t nChannels = 0, nDevices = 0;
ports = jack_get_ports(client, NULL, NULL, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nChannels ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon + 1);
if (port != previousPort) {
nDevices++;
previousPort = port;
}
}
} while (ports[++nChannels]);
free(ports);
}
jack_client_close(client);
return nDevices;
}
airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) {
airtaudio::DeviceInfo info;
info.probed = false;
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
jack_status_t *status = NULL;
jack_client_t *client = jack_client_open("RtApiJackInfo", options, status);
if (client == NULL) {
ATA_ERROR("airtaudio::api::Jack::getDeviceInfo: Jack server not found or connection error!");
// TODO : airtaudio::errorWarning;
return info;
}
const char **ports;
std::string port, previousPort;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, NULL, NULL, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
info.name = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
jack_client_close(client);
ATA_ERROR("airtaudio::api::Jack::getDeviceInfo: device ID is invalid!");
// TODO : airtaudio::errorInvalidUse;
return info;
}
// Get the current jack server sample rate.
info.sampleRates.clear();
info.sampleRates.push_back(jack_get_sample_rate(client));
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsInput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.outputChannels = nChannels;
}
// Jack "output ports" equal RtAudio input channels.
nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsOutput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.inputChannels = nChannels;
}
if (info.outputChannels == 0 && info.inputChannels == 0) {
jack_client_close(client);
ATA_ERROR("airtaudio::api::Jack::getDeviceInfo: error determining Jack input/output channels!");
// TODO : airtaudio::errorWarning;
return info;
}
// If device opens for both playback and capture, we determine the channels.
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Jack always uses 32-bit floats.
info.nativeFormats = airtaudio::FLOAT32;
// Jack doesn't provide default devices so we'll use the first available one.
if ( _device == 0
&& info.outputChannels > 0) {
info.isDefaultOutput = true;
}
if ( _device == 0
&& info.inputChannels > 0) {
info.isDefaultInput = true;
}
jack_client_close(client);
info.probed = true;
return info;
}
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void *_infoPointer) {
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer;
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
if (object->callbackEvent((uint64_t)_nframes) == false) {
return 1;
}
return 0;
}
// This function will be called by a spawned thread when the Jack
// server signals that it is shutting down. It is necessary to handle
// it this way because the jackShutdown() function must return before
// the jack_deactivate() function (in closeStream()) will return.
static void jackCloseStream(void *_ptr) {
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_ptr;
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
object->closeStream();
}
static void jackShutdown(void* _infoPointer) {
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer;
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
// Check current stream state. If stopped, then we'll assume this
// was called as a result of a call to airtaudio::api::Jack::stopStream (the
// deactivation of a client handle causes this function to be called).
// If not, we'll assume the Jack server is shutting down or some
// other problem occurred and we should close the stream.
if (object->isStreamRunning() == false) {
return;
}
new std::thread(jackCloseStream, info);
ATA_ERROR("RtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!");
}
static int32_t jackXrun(void* _infoPointer) {
JackHandle* handle = (JackHandle*)_infoPointer;
if (handle->ports[0]) {
handle->xrun[0] = true;
}
if (handle->ports[1]) {
handle->xrun[1] = true;
}
return 0;
}
bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t* _bufferSize,
airtaudio::StreamOptions* _options) {
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
// Look for jack server and try to become a client (only do once per stream).
jack_client_t *client = 0;
if ( _mode == OUTPUT
|| ( _mode == INPUT
&& m_stream.mode != OUTPUT)) {
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = NULL;
if (_options && !_options->streamName.empty()) {
client = jack_client_open(_options->streamName.c_str(), jackoptions, status);
} else {
client = jack_client_open("RtApiJack", jackoptions, status);
}
if (client == 0) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: Jack server not found or connection error!");
return false;
}
} else {
// The handle must have been created on an earlier pass.
client = handle->client;
}
const char **ports;
std::string port, previousPort, deviceName;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, NULL, NULL, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
deviceName = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: device ID is invalid!");
return false;
}
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
uint64_t flag = JackPortIsInput;
if (_mode == INPUT) flag = JackPortIsOutput;
ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
}
// Compare the jack ports for specified client to the requested number of channels.
if (nChannels < (_channels + _firstChannel)) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: requested number of channels (" << _channels << ") + offset (" << _firstChannel << ") not found for specified device (" << _device << ":" << deviceName << ").");
return false;
}
// Check the jack server sample rate.
uint32_t jackRate = jack_get_sample_rate(client);
if (_sampleRate != jackRate) {
jack_client_close(client);
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ").");
return false;
}
m_stream.sampleRate = jackRate;
// Get the latency of the JACK port.
ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
if (ports[ _firstChannel ]) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (_mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
// the range (usually the min and max are equal)
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
// get the latency range
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
// be optimistic, use the min!
m_stream.latency[_mode] = latrange.min;
//m_stream.latency[_mode] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
}
free(ports);
// The jack server always uses 32-bit floating-point data.
m_stream.deviceFormat[_mode] = FLOAT32;
m_stream.userFormat = _format;
if (_options && _options->flags & NONINTERLEAVED) {
m_stream.userInterleaved = false;
} else {
m_stream.userInterleaved = true;
}
// Jack always uses non-interleaved buffers.
m_stream.deviceInterleaved[_mode] = false;
// Jack always provides host byte-ordered data.
m_stream.doByteSwap[_mode] = false;
// Get the buffer size. The buffer size and number of buffers
// (periods) is set when the jack server is started.
m_stream.bufferSize = (int) jack_get_buffer_size(client);
*_bufferSize = m_stream.bufferSize;
m_stream.nDeviceChannels[_mode] = _channels;
m_stream.nUserChannels[_mode] = _channels;
// Set flags for buffer conversion.
m_stream.doConvertBuffer[_mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
&& m_stream.nUserChannels[_mode] > 1) {
m_stream.doConvertBuffer[_mode] = true;
}
// Allocate our JackHandle structure for the stream.
if (handle == 0) {
handle = new JackHandle;
if (handle == NULL) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating JackHandle memory.");
goto error;
}
m_stream.apiHandle = (void *) handle;
handle->client = client;
}
handle->deviceName[_mode] = deviceName;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating user buffer memory.");
goto error;
}
if (m_stream.doConvertBuffer[_mode]) {
bool makeBuffer = true;
if (_mode == OUTPUT) {
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
} else { // _mode == INPUT
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]);
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes < bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating device buffer memory.");
goto error;
}
}
}
// Allocate memory for the Jack ports (channels) identifiers.
handle->ports[_mode] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
if (handle->ports[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating port memory.");
goto error;
}
m_stream.device[_mode] = _device;
m_stream.channelOffset[_mode] = _firstChannel;
m_stream.state = STREAM_STOPPED;
m_stream.callbackInfo.object = (void *) this;
if ( m_stream.mode == OUTPUT
&& _mode == INPUT) {
// We had already set up the stream for output.
m_stream.mode = DUPLEX;
} else {
m_stream.mode = _mode;
jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo);
jack_set_xrun_callback(handle->client, jackXrun, (void *) &handle);
jack_on_shutdown(handle->client, jackShutdown, (void *) &m_stream.callbackInfo);
}
// Register our ports.
char label[64];
if (_mode == OUTPUT) {
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
snprintf(label, 64, "outport %d", i);
handle->ports[0][i] = jack_port_register(handle->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput,
0);
}
} else {
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
snprintf(label, 64, "inport %d", i);
handle->ports[1][i] = jack_port_register(handle->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput,
0);
}
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_stream.doConvertBuffer[_mode]) {
setConvertInfo(_mode, 0);
}
return true;
error:
if (handle) {
jack_client_close(handle->client);
if (handle->ports[0]) {
free(handle->ports[0]);
}
if (handle->ports[1]) {
free(handle->ports[1]);
}
delete handle;
m_stream.apiHandle = NULL;
}
for (int32_t iii=0; iii<2; ++iii) {
if (m_stream.userBuffer[iii]) {
free(m_stream.userBuffer[iii]);
m_stream.userBuffer[iii] = NULL;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = NULL;
}
return false;
}
enum airtaudio::errorType airtaudio::api::Jack::closeStream() {
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Jack::closeStream(): no open stream to close!");
return airtaudio::errorWarning;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
if (handle != NULL) {
if (m_stream.state == STREAM_RUNNING) {
jack_deactivate(handle->client);
}
jack_client_close(handle->client);
}
if (handle != NULL) {
if (handle->ports[0]) {
free(handle->ports[0]);
}
if (handle->ports[1]) {
free(handle->ports[1]);
}
delete handle;
m_stream.apiHandle = NULL;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = NULL;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = NULL;
}
m_stream.mode = UNINITIALIZED;
m_stream.state = STREAM_CLOSED;
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Jack::startStream() {
if (verifyStream() != airtaudio::errorNone) {
return airtaudio::errorFail;
}
if (m_stream.state == STREAM_RUNNING) {
ATA_ERROR("airtaudio::api::Jack::startStream(): the stream is already running!");
return airtaudio::errorWarning;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
int32_t result = jack_activate(handle->client);
if (result) {
ATA_ERROR("airtaudio::api::Jack::startStream(): unable to activate JACK client!");
goto unlock;
}
const char **ports;
// Get the list of available ports.
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
result = 1;
ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
if (ports == NULL) {
ATA_ERROR("airtaudio::api::Jack::startStream(): error determining available JACK input ports!");
goto unlock;
}
// Now make the port connections. Since RtAudio wasn't designed to
// allow the user to select particular channels of a device, we'll
// just open the first "nChannels" ports with offset.
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
result = 1;
if (ports[ m_stream.channelOffset[0] + i ])
result = jack_connect(handle->client, jack_port_name(handle->ports[0][i]), ports[ m_stream.channelOffset[0] + i ]);
if (result) {
free(ports);
ATA_ERROR("airtaudio::api::Jack::startStream(): error connecting output ports!");
goto unlock;
}
}
free(ports);
}
if ( m_stream.mode == INPUT
|| m_stream.mode == DUPLEX) {
result = 1;
ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput);
if (ports == NULL) {
ATA_ERROR("airtaudio::api::Jack::startStream(): error determining available JACK output ports!");
goto unlock;
}
// Now make the port connections. See note above.
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
result = 1;
if (ports[ m_stream.channelOffset[1] + i ]) {
result = jack_connect(handle->client, ports[ m_stream.channelOffset[1] + i ], jack_port_name(handle->ports[1][i]));
}
if (result) {
free(ports);
ATA_ERROR("airtaudio::api::Jack::startStream(): error connecting input ports!");
goto unlock;
}
}
free(ports);
}
handle->drainCounter = 0;
handle->internalDrain = false;
m_stream.state = STREAM_RUNNING;
unlock:
if (result == 0) {
return airtaudio::errorNone;
}
return airtaudio::errorSystemError;
}
enum airtaudio::errorType airtaudio::api::Jack::stopStream() {
if (verifyStream() != airtaudio::errorNone) {
return airtaudio::errorFail;
}
if (m_stream.state == STREAM_STOPPED) {
ATA_ERROR("airtaudio::api::Jack::stopStream(): the stream is already stopped!");
return airtaudio::errorWarning;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
if (handle->drainCounter == 0) {
handle->drainCounter = 2;
std::unique_lock<std::mutex> lck(m_stream.mutex);
handle->condition.wait(lck);
}
}
jack_deactivate(handle->client);
m_stream.state = STREAM_STOPPED;
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Jack::abortStream() {
if (verifyStream() != airtaudio::errorNone) {
return airtaudio::errorFail;
}
if (m_stream.state == STREAM_STOPPED) {
ATA_ERROR("airtaudio::api::Jack::abortStream(): the stream is already stopped!");
return airtaudio::errorWarning;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
handle->drainCounter = 2;
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the jack_deactivate()
// function will return.
static void jackStopStream(void *_ptr) {
airtaudio::CallbackInfo *info = (airtaudio::CallbackInfo *) _ptr;
airtaudio::api::Jack *object = (airtaudio::api::Jack *) info->object;
object->stopStream();
}
bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
if ( m_stream.state == STREAM_STOPPED
|| m_stream.state == STREAM_STOPPING) {
return true;
}
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!");
return false;
}
if (m_stream.bufferSize != _nframes) {
ATA_ERROR("RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!");
return false;
}
CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo;
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
// Check if we were draining the stream and signal is finished.
if (handle->drainCounter > 3) {
m_stream.state = STREAM_STOPPING;
if (handle->internalDrain == true) {
new std::thread(jackStopStream, info);
} else {
handle->condition.notify_one();
}
return true;
}
// Invoke user callback first, to get fresh output data.
if (handle->drainCounter == 0) {
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) info->callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
if (m_stream.mode != INPUT && handle->xrun[0] == true) {
status |= OUTPUT_UNDERFLOW;
handle->xrun[0] = false;
}
if (m_stream.mode != OUTPUT && handle->xrun[1] == true) {
status |= INPUT_OVERFLOW;
handle->xrun[1] = false;
}
int32_t cbReturnValue = callback(m_stream.userBuffer[0],
m_stream.userBuffer[1],
m_stream.bufferSize,
streamTime,
status,
info->userData);
if (cbReturnValue == 2) {
m_stream.state = STREAM_STOPPING;
handle->drainCounter = 2;
new std::thread(jackStopStream, info);
return true;
}
else if (cbReturnValue == 1) {
handle->drainCounter = 1;
handle->internalDrain = true;
}
}
jack_default_audio_sample_t *jackbuffer;
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
if (handle->drainCounter > 1) { // write zeros to the output stream
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
memset(jackbuffer, 0, bufferBytes);
}
} else if (m_stream.doConvertBuffer[0]) {
convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_stream.deviceBuffer[i*bufferBytes], bufferBytes);
}
} else { // no buffer conversion
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_stream.userBuffer[0][i*bufferBytes], bufferBytes);
}
}
if (handle->drainCounter) {
handle->drainCounter++;
goto unlock;
}
}
if ( m_stream.mode == INPUT
|| m_stream.mode == DUPLEX) {
if (m_stream.doConvertBuffer[1]) {
for (uint32_t i=0; i<m_stream.nDeviceChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_stream.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
}
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
} else {
// no buffer conversion
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_stream.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
}
}
}
unlock:
airtaudio::Api::tickStreamTime();
return true;
}
#endif

View File

@@ -1,48 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_JACK_H__) && defined(__UNIX_JACK__)
#define __AIRTAUDIO_API_JACK_H__
namespace airtaudio {
namespace api {
class Jack: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Jack();
virtual ~Jack();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::UNIX_JACK;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(uint64_t _nframes);
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

View File

@@ -1,848 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__LINUX_OSS__)
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <fcntl.h>
#include "soundcard.h"
#include <errno.h>
#include <math.h>
airtaudio::Api* airtaudio::api::Oss::Create() {
return new airtaudio::api::Oss();
}
static void *ossCallbackHandler(void* _ptr);
// A structure to hold various information related to the OSS API
// implementation.
struct OssHandle {
int32_t id[2]; // device ids
bool xrun[2];
bool triggered;
std::condition_variable runnable;
OssHandle():
triggered(false) {
id[0] = 0;
id[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
airtaudio::api::Oss::Oss() {
// Nothing to do here.
}
airtaudio::api::Oss::~Oss() {
if (m_stream.state != STREAM_CLOSED) {
closeStream();
}
}
uint32_t airtaudio::api::Oss::getDeviceCount() {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("airtaudio::api::Oss::getDeviceCount: error opening '/dev/mixer'.");
return 0;
}
oss_sysinfo sysinfo;
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.");
return 0;
}
close(mixerfd);
return sysinfo.numaudios;
}
airtaudio::DeviceInfo airtaudio::api::Oss::getDeviceInfo(uint32_t _device) {
rtaudio::DeviceInfo info;
info.probed = false;
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error opening '/dev/mixer'.");
return info;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.");
return info;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: no devices found!");
return info;
}
if (_device >= nDevices) {
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: device ID is invalid!");
return info;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error getting device (" << ainfo.name << ") info.");
error(airtaudio::errorWarning);
return info;
}
// Probe channels
if (ainfo.caps & PCM_CAP_OUTPUT) {
info.outputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_INPUT) {
info.inputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_DUPLEX) {
if ( info.outputChannels > 0
&& info.inputChannels > 0
&& ainfo.caps & PCM_CAP_DUPLEX) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
}
// Probe data formats ... do for input
uint64_t mask = ainfo.iformats;
if ( mask & AFMT_S16_LE
|| mask & AFMT_S16_BE) {
info.nativeFormats |= RTAUDIO_SINT16;
}
if (mask & AFMT_S8) {
info.nativeFormats |= RTAUDIO_SINT8;
}
if ( mask & AFMT_S32_LE
|| mask & AFMT_S32_BE) {
info.nativeFormats |= RTAUDIO_SINT32;
}
if (mask & AFMT_FLOAT) {
info.nativeFormats |= RTAUDIO_FLOAT32;
}
if ( mask & AFMT_S24_LE
|| mask & AFMT_S24_BE) {
info.nativeFormats |= RTAUDIO_SINT24;
}
// Check that we have at least one supported format
if (info.nativeFormats == 0) {
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.");
return info;
}
// Probe the supported sample rates.
info.sampleRates.clear();
if (ainfo.nrates) {
for (uint32_t i=0; i<ainfo.nrates; i++) {
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
break;
}
}
}
} else {
// Check min and max rate values;
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if ( ainfo.min_rate <= (int) SAMPLE_RATES[k]
&& ainfo.max_rate >= (int) SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
}
}
}
if (info.sampleRates.size() == 0) {
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").");
} else {
info.probed = true;
info.name = ainfo.name;
}
return info;
}
bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
rtaudio::format _format,
uint32_t* _bufferSize,
rtaudio::StreamOptions* _options) {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error opening '/dev/mixer'.");
return false;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.");
return false;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: no devices found!");
return false;
}
if (_device >= nDevices) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device ID is invalid!");
return false;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error getting device (" << ainfo.name << ") info.");
return false;
}
// Check if device supports input or output
if ( ( _mode == OUTPUT
&& !(ainfo.caps & PCM_CAP_OUTPUT))
|| ( _mode == INPUT
&& !(ainfo.caps & PCM_CAP_INPUT))) {
if (_mode == OUTPUT) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.");
} else {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.");
}
return false;
}
int32_t flags = 0;
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (_mode == OUTPUT) {
flags |= O_WRONLY;
} else { // _mode == INPUT
if ( m_stream.mode == OUTPUT
&& m_stream.device[0] == _device) {
// We just set the same device for playback ... close and reopen for duplex (OSS only).
close(handle->id[0]);
handle->id[0] = 0;
if (!(ainfo.caps & PCM_CAP_DUPLEX)) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.");
return false;
}
// Check that the number previously set channels is the same.
if (m_stream.nUserChannels[0] != _channels) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").");
return false;
}
flags |= O_RDWR;
} else {
flags |= O_RDONLY;
}
}
// Set exclusive access if specified.
if ( _options != NULL
&& _options->flags & RTAUDIO_HOG_DEVICE) {
flags |= O_EXCL;
}
// Try to open the device.
int32_t fd;
fd = open(ainfo.devnode, flags, 0);
if (fd == -1) {
if (errno == EBUSY) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") is busy.");
} else {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error opening device (" << ainfo.name << ").");
}
return false;
}
// For duplex operation, specifically set this mode (this doesn't seem to work).
/*
if (flags | O_RDWR) {
result = ioctl(fd, SNDCTL_DSP_SETDUPLEX, NULL);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return false;
}
}
*/
// Check the device channel support.
m_stream.nUserChannels[_mode] = _channels;
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.");
return false;
}
// Set the number of channels.
int32_t deviceChannels = _channels + _firstChannel;
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
if ( result == -1
|| deviceChannels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").");
return false;
}
m_stream.nDeviceChannels[_mode] = deviceChannels;
// Get the data format mask
int32_t mask;
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
if (result == -1) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.");
return false;
}
// Determine how to set the device format.
m_stream.userFormat = _format;
int32_t deviceFormat = -1;
m_stream.doByteSwap[_mode] = false;
if (_format == RTAUDIO_SINT8) {
if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
}
} else if (_format == RTAUDIO_SINT16) {
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
m_stream.doByteSwap[_mode] = true;
}
} else if (_format == RTAUDIO_SINT24) {
if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
m_stream.doByteSwap[_mode] = true;
}
} else if (_format == RTAUDIO_SINT32) {
if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
m_stream.doByteSwap[_mode] = true;
}
}
if (deviceFormat == -1) {
// The user requested format is not natively supported by the device.
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
} else if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
} else if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
m_stream.doByteSwap[_mode] = true;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
m_stream.doByteSwap[_mode] = true;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
m_stream.doByteSwap[_mode] = true;
} else if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
}
}
if (m_stream.deviceFormat[_mode] == 0) {
// This really shouldn't happen ...
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.");
return false;
}
// Set the data format.
int32_t temp = deviceFormat;
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
if ( result == -1
|| deviceFormat != temp) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").");
return false;
}
// Attempt to set the buffer size. According to OSS, the minimum
// number of buffers is two. The supposed minimum buffer size is 16
// bytes, so that will be our lower bound. The argument to this
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
// We'll check the actual value used near the end of the setup
// procedure.
int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels;
if (ossBufferBytes < 16) {
ossBufferBytes = 16;
}
int32_t buffers = 0;
if (_options != NULL) {
buffers = _options->numberOfBuffers;
}
if ( _options != NULL
&& _options->flags & RTAUDIO_MINIMIZE_LATENCY) {
buffers = 2;
}
if (buffers < 2) {
buffers = 3;
}
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
if (result == -1) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").");
return false;
}
m_stream.nBuffers = buffers;
// Save buffer size (in sample frames).
*_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels);
m_stream.bufferSize = *_bufferSize;
// Set the sample rate.
int32_t srate = _sampleRate;
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
if (result == -1) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting sample rate (" << _sampleRate << ") on device (" << ainfo.name << ").");
return false;
}
// Verify the sample rate setup worked.
if (abs(srate - _sampleRate) > 100) {
close(fd);
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ").");
return false;
}
m_stream.sampleRate = _sampleRate;
if ( _mode == INPUT
&& m_stream._mode == OUTPUT
&& m_stream.device[0] == _device) {
// We're doing duplex setup here.
m_stream.deviceFormat[0] = m_stream.deviceFormat[1];
m_stream.nDeviceChannels[0] = deviceChannels;
}
// Set interleaving parameters.
m_stream.userInterleaved = true;
m_stream.deviceInterleaved[_mode] = true;
if (_options && _options->flags & RTAUDIO_NONINTERLEAVED) {
m_stream.userInterleaved = false;
}
// Set flags for buffer conversion
m_stream.doConvertBuffer[_mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
&& m_stream.nUserChannels[_mode] > 1) {
m_stream.doConvertBuffer[_mode] = true;
}
// Allocate the stream handles if necessary and then save.
if (m_stream.apiHandle == 0) {
handle = new OssHandle;
if handle == NULL) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error allocating OssHandle memory.");
goto error;
}
m_stream.apiHandle = (void *) handle;
} else {
handle = (OssHandle *) m_stream.apiHandle;
}
handle->id[_mode] = fd;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error allocating user buffer memory.");
goto error;
}
if (m_stream.doConvertBuffer[_mode]) {
bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
if (_mode == INPUT) {
if ( m_stream._mode == OUTPUT
&& m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
}
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error allocating device buffer memory.");
goto error;
}
}
}
m_stream.device[_mode] = _device;
m_stream.state = STREAM_STOPPED;
// Setup the buffer conversion information structure.
if (m_stream.doConvertBuffer[_mode]) {
setConvertInfo(_mode, _firstChannel);
}
// Setup thread if necessary.
if (m_stream.mode == OUTPUT && _mode == INPUT) {
// We had already set up an output stream.
m_stream.mode = DUPLEX;
if (m_stream.device[0] == _device) {
handle->id[0] = fd;
}
} else {
m_stream.mode = _mode;
// Setup callback thread.
m_stream.callbackInfo.object = (void *) this;
m_stream.callbackInfo.isRunning = true;
m_stream.callbackInfo.thread = new std::thread(ossCallbackHandler, &m_stream.callbackInfo);
if (m_stream.callbackInfo.thread == NULL) {
m_stream.callbackInfo.isRunning = false;
ATA_ERROR("airtaudio::api::Oss::error creating callback thread!");
goto error;
}
}
return true;
error:
if (handle) {
if (handle->id[0]) {
close(handle->id[0]);
}
if (handle->id[1]) {
close(handle->id[1]);
}
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
return false;
}
enum airtaudio::errorType airtaudio::api::Oss::closeStream() {
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Oss::closeStream(): no open stream to close!");
return airtaudio::errorWarning;
}
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
m_stream.callbackInfo.isRunning = false;
m_stream.mutex.lock();
if (m_stream.state == STREAM_STOPPED) {
handle->runnable.notify_one();
}
m_stream.mutex.unlock();
m_stream.callbackInfo.thread->join();
if (m_stream.state == STREAM_RUNNING) {
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
} else {
ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
}
m_stream.state = STREAM_STOPPED;
}
if (handle) {
if (handle->id[0]) {
close(handle->id[0]);
}
if (handle->id[1]) {
close(handle->id[1]);
}
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
m_stream.mode = UNINITIALIZED;
m_stream.state = STREAM_CLOSED;
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Oss::startStream() {
if (verifyStream() != airtaudio::errorNone) {
return airtaudio::errorFail;
}
if (m_stream.state == STREAM_RUNNING) {
ATA_ERROR("airtaudio::api::Oss::startStream(): the stream is already running!");
return airtaudio::errorWarning;
}
m_stream.mutex.lock();
m_stream.state = STREAM_RUNNING;
// No need to do anything else here ... OSS automatically starts
// when fed samples.
m_stream.mutex.unlock();
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
handle->runnable.notify_one();
}
enum airtaudio::errorType airtaudio::api::Oss::stopStream() {
if (verifyStream() != airtaudio::errorNone) {
return airtaudio::errorFail;
}
if (m_stream.state == STREAM_STOPPED) {
ATA_ERROR("airtaudio::api::Oss::stopStream(): the stream is already stopped!");
return;
}
m_stream.mutex.lock();
// The state might change while waiting on a mutex.
if (m_stream.state == STREAM_STOPPED) {
m_stream.mutex.unlock();
return;
}
int32_t result = 0;
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
// Flush the output with zeros a few times.
char *buffer;
int32_t samples;
airtaudio::format format;
if (m_stream.doConvertBuffer[0]) {
buffer = m_stream.deviceBuffer;
samples = m_stream.bufferSize * m_stream.nDeviceChannels[0];
format = m_stream.deviceFormat[0];
} else {
buffer = m_stream.userBuffer[0];
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
format = m_stream.userFormat;
}
memset(buffer, 0, samples * formatBytes(format));
for (uint32_t i=0; i<m_stream.nBuffers+1; i++) {
result = write(handle->id[0], buffer, samples * formatBytes(format));
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::stopStream: audio write error.");
return airtaudio::errorWarning;
}
}
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::stopStream: system error stopping callback procedure on device (" << m_stream.device[0] << ").");
goto unlock;
}
handle->triggered = false;
}
if ( m_stream.mode == INPUT
|| ( m_stream.mode == DUPLEX
&& handle->id[0] != handle->id[1])) {
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::stopStream: system error stopping input callback procedure on device (" << m_stream.device[0] << ").");
goto unlock;
}
}
unlock:
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
if (result != -1) {
return airtaudio::errorNone;
}
return airtaudio::errorSystemError;
}
enum airtaudio::errorType airtaudio::api::Oss::abortStream() {
if (verifyStream() != airtaudio::errorNone) {
return airtaudio::errorFail;
}
if (m_stream.state == STREAM_STOPPED) {
ATA_ERROR("airtaudio::api::Oss::abortStream(): the stream is already stopped!");
return airtaudio::errorWarning;
}
m_stream.mutex.lock();
// The state might change while waiting on a mutex.
if (m_stream.state == STREAM_STOPPED) {
m_stream.mutex.unlock();
return;
}
int32_t result = 0;
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::abortStream: system error stopping callback procedure on device (" << m_stream.device[0] << ").");
goto unlock;
}
handle->triggered = false;
}
if (m_stream.mode == INPUT || (m_stream.mode == DUPLEX && handle->id[0] != handle->id[1])) {
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("airtaudio::api::Oss::abortStream: system error stopping input callback procedure on device (" << m_stream.device[0] << ").");
goto unlock;
}
}
unlock:
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
if (result != -1) {
return airtaudio::errorNone;
}
return airtaudio::errorSystemError;
}
void airtaudio::api::Oss::callbackEvent() {
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (m_stream.state == STREAM_STOPPED) {
std::unique_lock<std::mutex> lck(m_stream.mutex);
handle->runnable.wait(lck);
if (m_stream.state != STREAM_RUNNING) {
return;
}
}
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Oss::callbackEvent(): the stream is closed ... this shouldn't happen!");
return airtaudio::errorWarning;
}
// Invoke user callback to get fresh output data.
int32_t doStopStream = 0;
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
rtaudio::streamStatus status = 0;
if ( m_stream.mode != INPUT
&& handle->xrun[0] == true) {
status |= RTAUDIO_OUTPUT_UNDERFLOW;
handle->xrun[0] = false;
}
if ( m_stream.mode != OUTPUT
&& handle->xrun[1] == true) {
status |= RTAUDIO_INPUT_OVERFLOW;
handle->xrun[1] = false;
}
doStopStream = callback(m_stream.userBuffer[0],
m_stream.userBuffer[1],
m_stream.bufferSize,
streamTime,
status,
m_stream.callbackInfo.userData);
if (doStopStream == 2) {
this->abortStream();
return;
}
m_stream.mutex.lock();
// The state might change while waiting on a mutex.
if (m_stream.state == STREAM_STOPPED) {
goto unlock;
}
int32_t result;
char *buffer;
int32_t samples;
airtaudio::format format;
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
// Setup parameters and do buffer conversion if necessary.
if (m_stream.doConvertBuffer[0]) {
buffer = m_stream.deviceBuffer;
convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
samples = m_stream.bufferSize * m_stream.nDeviceChannels[0];
format = m_stream.deviceFormat[0];
} else {
buffer = m_stream.userBuffer[0];
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
format = m_stream.userFormat;
}
// Do byte swapping if necessary.
if (m_stream.doByteSwap[0]) {
byteSwapBuffer(buffer, samples, format);
}
if ( m_stream.mode == DUPLEX
&& handle->triggered == false) {
int32_t trig = 0;
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
result = write(handle->id[0], buffer, samples * formatBytes(format));
trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
handle->triggered = true;
} else {
// Write samples to device.
result = write(handle->id[0], buffer, samples * formatBytes(format));
}
if (result == -1) {
// We'll assume this is an underrun, though there isn't a
// specific means for determining that.
handle->xrun[0] = true;
ATA_ERROR("airtaudio::api::Oss::callbackEvent: audio write error.");
//error(airtaudio::errorWarning);
// Continue on to input section.
}
}
if ( m_stream.mode == INPUT
|| m_stream.mode == DUPLEX) {
// Setup parameters.
if (m_stream.doConvertBuffer[1]) {
buffer = m_stream.deviceBuffer;
samples = m_stream.bufferSize * m_stream.nDeviceChannels[1];
format = m_stream.deviceFormat[1];
} else {
buffer = m_stream.userBuffer[1];
samples = m_stream.bufferSize * m_stream.nUserChannels[1];
format = m_stream.userFormat;
}
// Read samples from device.
result = read(handle->id[1], buffer, samples * formatBytes(format));
if (result == -1) {
// We'll assume this is an overrun, though there isn't a
// specific means for determining that.
handle->xrun[1] = true;
ATA_ERROR("airtaudio::api::Oss::callbackEvent: audio read error.");
goto unlock;
}
// Do byte swapping if necessary.
if (m_stream.doByteSwap[1]) {
byteSwapBuffer(buffer, samples, format);
}
// Do buffer conversion if necessary.
if (m_stream.doConvertBuffer[1]) {
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
}
}
unlock:
m_stream.mutex.unlock();
airtaudio::Api::tickStreamTime();
if (doStopStream == 1) {
this->stopStream();
}
}
static void ossCallbackHandler(void* _ptr) {
CallbackInfo* info = (CallbackInfo*)_ptr;
RtApiOss* object = (RtApiOss*)info->object;
bool *isRunning = &info->isRunning;
while (*isRunning == true) {
object->callbackEvent();
}
}
#endif

View File

@@ -1,47 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_OSS_H__) && defined(__LINUX_OSS__)
#define __AIRTAUDIO_API_OSS_H__
namespace airtaudio {
namespace api {
class Oss: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
Oss();
virtual ~Oss();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::LINUX_OSS;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

View File

@@ -1,442 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__LINUX_PULSE__)
#include <unistd.h>
#include <limits.h>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
// Code written by Peter Meerwald, pmeerw@pmeerw.net
// and Tristan Matthews.
#include <pulse/error.h>
#include <pulse/simple.h>
#include <cstdio>
airtaudio::Api* airtaudio::api::Pulse::Create() {
return new airtaudio::api::Pulse();
}
static const uint32_t SUPPORTED_SAMPLERATES[] = {
8000,
16000,
22050,
32000,
44100,
48000,
96000,
0
};
struct rtaudio_pa_format_mapping_t {
airtaudio::format airtaudio_format;
pa_sample_format_t pa_format;
};
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
{airtaudio::SINT16, PA_SAMPLE_S16LE},
{airtaudio::SINT32, PA_SAMPLE_S32LE},
{airtaudio::FLOAT32, PA_SAMPLE_FLOAT32LE},
{0, PA_SAMPLE_INVALID}};
struct PulseAudioHandle {
pa_simple *s_play;
pa_simple *s_rec;
std::thread* thread;
std::condition_variable runnable_cv;
bool runnable;
PulseAudioHandle() :
s_play(0),
s_rec(0),
runnable(false) {
}
};
airtaudio::api::Pulse::~Pulse() {
if (m_stream.state != STREAM_CLOSED) {
closeStream();
}
}
uint32_t airtaudio::api::Pulse::getDeviceCount() {
return 1;
}
airtaudio::DeviceInfo airtaudio::api::Pulse::getDeviceInfo(uint32_t _device) {
airtaudio::DeviceInfo info;
info.probed = true;
info.name = "PulseAudio";
info.outputChannels = 2;
info.inputChannels = 2;
info.duplexChannels = 2;
info.isDefaultOutput = true;
info.isDefaultInput = true;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
info.sampleRates.push_back(*sr);
}
info.nativeFormats = SINT16 | SINT32 | FLOAT32;
return info;
}
static void pulseaudio_callback(void* _user) {
airtaudio::CallbackInfo *cbi = static_cast<airtaudio::CallbackInfo *>(_user);
airtaudio::api::Pulse *context = static_cast<airtaudio::api::Pulse*>(cbi->object);
volatile bool *isRunning = &cbi->isRunning;
while (*isRunning) {
context->callbackEvent();
}
}
enum airtaudio::errorType airtaudio::api::Pulse::closeStream() {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
m_stream.callbackInfo.isRunning = false;
if (pah) {
m_stream.mutex.lock();
if (m_stream.state == STREAM_STOPPED) {
pah->runnable = true;
pah->runnable_cv.notify_one();;
}
m_stream.mutex.unlock();
pah->thread->join();
if (pah->s_play) {
pa_simple_flush(pah->s_play, NULL);
pa_simple_free(pah->s_play);
}
if (pah->s_rec) {
pa_simple_free(pah->s_rec);
}
delete pah;
m_stream.apiHandle = NULL;
}
if (m_stream.userBuffer[0] != NULL) {
free(m_stream.userBuffer[0]);
m_stream.userBuffer[0] = NULL;
}
if (m_stream.userBuffer[1] != NULL) {
free(m_stream.userBuffer[1]);
m_stream.userBuffer[1] = NULL;
}
m_stream.state = STREAM_CLOSED;
m_stream.mode = UNINITIALIZED;
return airtaudio::errorNone;
}
void airtaudio::api::Pulse::callbackEvent() {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
if (m_stream.state == STREAM_STOPPED) {
std::unique_lock<std::mutex> lck(m_stream.mutex);
while (!pah->runnable) {
pah->runnable_cv.wait(lck);
}
if (m_stream.state != STREAM_RUNNING) {
m_stream.mutex.unlock();
return;
}
}
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Pulse::callbackEvent(): the stream is closed ... this shouldn't happen!");
return;
}
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
int32_t doStopStream = callback(m_stream.userBuffer[OUTPUT],
m_stream.userBuffer[INPUT],
m_stream.bufferSize,
streamTime,
status,
m_stream.callbackInfo.userData);
if (doStopStream == 2) {
abortStream();
return;
}
m_stream.mutex.lock();
void *pulse_in = m_stream.doConvertBuffer[INPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[INPUT];
void *pulse_out = m_stream.doConvertBuffer[OUTPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[OUTPUT];
if (m_stream.state != STREAM_RUNNING) {
goto unlock;
}
int32_t pa_error;
size_t bytes;
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
if (m_stream.doConvertBuffer[OUTPUT]) {
convertBuffer(m_stream.deviceBuffer,
m_stream.userBuffer[OUTPUT],
m_stream.convertInfo[OUTPUT]);
bytes = m_stream.nDeviceChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[OUTPUT]);
} else {
bytes = m_stream.nUserChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
}
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
ATA_ERROR("airtaudio::api::Pulse::callbackEvent: audio write error, " << pa_strerror(pa_error) << ".");
return;
}
}
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
if (m_stream.doConvertBuffer[INPUT]) {
bytes = m_stream.nDeviceChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[INPUT]);
} else {
bytes = m_stream.nUserChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
}
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
ATA_ERROR("airtaudio::api::Pulse::callbackEvent: audio read error, " << pa_strerror(pa_error) << ".");
return;
}
if (m_stream.doConvertBuffer[INPUT]) {
convertBuffer(m_stream.userBuffer[INPUT],
m_stream.deviceBuffer,
m_stream.convertInfo[INPUT]);
}
}
unlock:
m_stream.mutex.unlock();
airtaudio::Api::tickStreamTime();
if (doStopStream == 1) {
stopStream();
return;
}
return;
}
enum airtaudio::errorType airtaudio::api::Pulse::startStream() {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Pulse::startStream(): the stream is not open!");
return airtaudio::errorInvalidUse;
}
if (m_stream.state == STREAM_RUNNING) {
ATA_ERROR("airtaudio::api::Pulse::startStream(): the stream is already running!");
return airtaudio::errorWarning;
}
m_stream.mutex.lock();
m_stream.state = STREAM_RUNNING;
pah->runnable = true;
pah->runnable_cv.notify_one();
m_stream.mutex.unlock();
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Pulse::stopStream() {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Pulse::stopStream(): the stream is not open!");
return airtaudio::errorInvalidUse;
}
if (m_stream.state == STREAM_STOPPED) {
ATA_ERROR("airtaudio::api::Pulse::stopStream(): the stream is already stopped!");
return airtaudio::errorWarning;
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.lock();
if (pah && pah->s_play) {
int32_t pa_error;
if (pa_simple_drain(pah->s_play, &pa_error) < 0) {
ATA_ERROR("airtaudio::api::Pulse::stopStream: error draining output device, " << pa_strerror(pa_error) << ".");
m_stream.mutex.unlock();
return airtaudio::errorSystemError;
}
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::Pulse::abortStream() {
PulseAudioHandle *pah = static_cast<PulseAudioHandle*>(m_stream.apiHandle);
if (m_stream.state == STREAM_CLOSED) {
ATA_ERROR("airtaudio::api::Pulse::abortStream(): the stream is not open!");
return airtaudio::errorInvalidUse;
}
if (m_stream.state == STREAM_STOPPED) {
ATA_ERROR("airtaudio::api::Pulse::abortStream(): the stream is already stopped!");
return airtaudio::errorWarning;
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.lock();
if (pah && pah->s_play) {
int32_t pa_error;
if (pa_simple_flush(pah->s_play, &pa_error) < 0) {
ATA_ERROR("airtaudio::api::Pulse::abortStream: error flushing output device, " << pa_strerror(pa_error) << ".");
m_stream.mutex.unlock();
return airtaudio::errorSystemError;
}
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
return airtaudio::errorNone;
}
bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options) {
PulseAudioHandle *pah = 0;
uint64_t bufferBytes = 0;
pa_sample_spec ss;
if (_device != 0) {
return false;
}
if (_mode != INPUT && _mode != OUTPUT) {
return false;
}
if (_channels != 1 && _channels != 2) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: unsupported number of channels.");
return false;
}
ss.channels = _channels;
if (_firstChannel != 0) {
return false;
}
bool sr_found = false;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
if (_sampleRate == *sr) {
sr_found = true;
m_stream.sampleRate = _sampleRate;
ss.rate = _sampleRate;
break;
}
}
if (!sr_found) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: unsupported sample rate.");
return false;
}
bool sf_found = 0;
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
++sf) {
if (_format == sf->airtaudio_format) {
sf_found = true;
m_stream.userFormat = sf->airtaudio_format;
ss.format = sf->pa_format;
break;
}
}
if (!sf_found) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: unsupported sample format.");
return false;
}
// Set interleaving parameters.
if (_options && _options->flags & NONINTERLEAVED) {
m_stream.userInterleaved = false;
} else {
m_stream.userInterleaved = true;
}
m_stream.deviceInterleaved[_mode] = true;
m_stream.nBuffers = 1;
m_stream.doByteSwap[_mode] = false;
m_stream.doConvertBuffer[_mode] = _channels > 1 && !m_stream.userInterleaved;
m_stream.deviceFormat[_mode] = m_stream.userFormat;
m_stream.nUserChannels[_mode] = _channels;
m_stream.nDeviceChannels[_mode] = _channels + _firstChannel;
m_stream.channelOffset[_mode] = 0;
// Allocate necessary internal buffers.
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error allocating user buffer memory.");
goto error;
}
m_stream.bufferSize = *_bufferSize;
if (m_stream.doConvertBuffer[_mode]) {
bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
if (_mode == INPUT) {
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error allocating device buffer memory.");
goto error;
}
}
}
m_stream.device[_mode] = _device;
// Setup the buffer conversion information structure.
if (m_stream.doConvertBuffer[_mode]) {
setConvertInfo(_mode, _firstChannel);
}
if (!m_stream.apiHandle) {
PulseAudioHandle *pah = new PulseAudioHandle;
if (!pah) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error allocating memory for handle.");
goto error;
}
m_stream.apiHandle = pah;
}
pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
int32_t error;
switch (_mode) {
case INPUT:
pah->s_rec = pa_simple_new(NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error);
if (!pah->s_rec) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error connecting input to PulseAudio server.");
goto error;
}
break;
case OUTPUT:
pah->s_play = pa_simple_new(NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error);
if (!pah->s_play) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error connecting output to PulseAudio server.");
goto error;
}
break;
default:
goto error;
}
if (m_stream.mode == UNINITIALIZED) {
m_stream.mode = _mode;
} else if (m_stream.mode == _mode) {
goto error;
}else {
m_stream.mode = DUPLEX;
}
if (!m_stream.callbackInfo.isRunning) {
m_stream.callbackInfo.object = this;
m_stream.callbackInfo.isRunning = true;
pah->thread = new std::thread(pulseaudio_callback, (void *)&m_stream.callbackInfo);
if (pah->thread == NULL) {
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error creating thread.");
goto error;
}
}
m_stream.state = STREAM_STOPPED;
return true;
error:
if (pah && m_stream.callbackInfo.isRunning) {
delete pah;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
return false;
}
#endif

View File

@@ -1,48 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_PULSE_H__) && defined(__LINUX_PULSE__)
#define __AIRTAUDIO_API_PULSE_H__
namespace airtaudio {
namespace api {
class Pulse: public airtaudio::Api {
public:
static airtaudio::Api* Create();
public:
virtual ~Pulse();
airtaudio::api::type getCurrentApi() {
return airtaudio::api::LINUX_PULSE;
}
uint32_t getDeviceCount();
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream();
enum airtaudio::errorType startStream();
enum airtaudio::errorType stopStream();
enum airtaudio::errorType abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

View File

@@ -1,105 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if 0
#include <airtaudio/base.h>
std::ostream& airtaudio::operator <<(std::ostream& _os, enum errorType _obj) {
switch(_obj) {
case errorNone:
_os << "errorNone";
break;
case errorFail:
_os << "errorFail";
break;
case errorWarning:
_os << "errorWarning";
break;
case errorInputNull:
_os << "errorInputNull";
break;
case errorInvalidUse:
_os << "errorInvalidUse";
break;
case errorSystemError:
_os << "errorSystemError";
break;
default:
_os << "UNKNOW...";
break;
}
return _os;
}
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::format& _obj) {
switch(_obj) {
case SINT8:
_os << "SINT8";
break;
case SINT16:
_os << "SINT16";
break;
case SINT24:
_os << "SINT24";
break;
case SINT32:
_os << "SINT32";
break;
case FLOAT32:
_os << "FLOAT32";
break;
case FLOAT64:
_os << "FLOAT64";
break;
default:
_os << "UNKNOW...";
break;
}
return _os;
}
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj) {
switch(_obj) {
case NONINTERLEAVED:
_os << "NONINTERLEAVED";
break;
case MINIMIZE_LATENCY:
_os << "MINIMIZE_LATENCY";
break;
case HOG_DEVICE:
_os << "HOG_DEVICE";
break;
case SCHEDULE_REALTIME:
_os << "SCHEDULE_REALTIME";
break;
case ALSA_USE_DEFAULT:
_os << "ALSA_USE_DEFAULT";
break;
default:
_os << "UNKNOW...";
break;
}
return _os;
}
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamStatus& _obj) {
switch(_obj) {
case INPUT_OVERFLOW:
_os << "INPUT_OVERFLOW";
break;
case OUTPUT_UNDERFLOW:
_os << "OUTPUT_UNDERFLOW";
break;
default:
_os << "UNKNOW...";
break;
}
return _os;
}
#endif

View File

@@ -1,204 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_ERROR_H__
#define __AIRTAUDIO_ERROR_H__
#include <thread>
#include <condition_variable>
#include <mutex>
// defien type : uintXX_t and intXX_t
#define __STDC_LIMIT_MACROS
// note in android include the macro of min max are overwitten
#include <stdint.h>
#if defined(HAVE_GETTIMEOFDAY)
#include <sys/time.h>
#endif
//#include <etk/Stream.h>
namespace airtaudio {
//! Defined RtError types.
enum errorType {
errorNone, //!< No error
errorFail, //!< An error occure in the operation
errorWarning, //!< A non-critical error.
errorInputNull, //!< null input or internal errror
errorInvalidUse, //!< The function was called incorrectly.
errorSystemError //!< A system error occured.
};
// airtaudio version
static const std::string VERSION("4.0.12");
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
//std::ostream& operator <<(std::ostream& _os, enum errorType _obj);
/**
* @typedef typedef uint64_t format;
* @brief airtaudio data format type.
*
* Support for signed integers and floats. Audio data fed to/from an
* airtaudio stream is assumed to ALWAYS be in host byte order. The
* internal routines will automatically take care of any necessary
* byte-swapping between the host format and the soundcard. Thus,
* endian-ness is not a concern in the following format definitions.
*
* - \e SINT8: 8-bit signed integer.
* - \e SINT16: 16-bit signed integer.
* - \e SINT24: 24-bit signed integer.
* - \e SINT32: 32-bit signed integer.
* - \e FLOAT32: Normalized between plus/minus 1.0.
* - \e FLOAT64: Normalized between plus/minus 1.0.
*/
typedef uint64_t format;
static const format SINT8 = 0x1; // 8-bit signed integer.
static const format SINT16 = 0x2; // 16-bit signed integer.
static const format SINT24 = 0x4; // 24-bit signed integer.
static const format SINT32 = 0x8; // 32-bit signed integer.
static const format FLOAT32 = 0x10; // Normalized between plus/minus 1.0.
static const format FLOAT64 = 0x20; // Normalized between plus/minus 1.0.
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
//std::ostream& operator <<(std::ostream& _os, const airtaudio::format& _obj);
/**
* @typedef typedef uint64_t streamFlags;
* @brief RtAudio stream option flags.
*
* The following flags can be OR'ed together to allow a client to
* make changes to the default stream behavior:
*
* - \e NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
* - \e MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
* - \e HOG_DEVICE: Attempt grab device for exclusive use.
* - \e ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
*
* By default, RtAudio streams pass and receive audio data from the
* client in an interleaved format. By passing the
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
* data will instead be presented in non-interleaved buffers. In
* this case, each buffer argument in the RtAudioCallback function
* will point to a single array of data, with \c nFrames samples for
* each channel concatenated back-to-back. For example, the first
* sample of data for the second channel would be located at index \c
* nFrames (assuming the \c buffer pointer was recast to the correct
* data type for the stream).
*
* Certain audio APIs offer a number of parameters that influence the
* I/O latency of a stream. By default, RtAudio will attempt to set
* these parameters internally for robust (glitch-free) performance
* (though some APIs, like Windows Direct Sound, make this difficult).
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
* function, internal stream settings will be influenced in an attempt
* to minimize stream latency, though possibly at the expense of stream
* performance.
*
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
* open the input and/or output stream device(s) for exclusive use.
* Note that this is not possible with all supported audio APIs.
*
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
* to select realtime scheduling (round-robin) for the callback thread.
*
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
* open the "default" PCM device when using the ALSA API. Note that this
* will override any specified input or output device id.
*/
typedef uint32_t streamFlags;
static const streamFlags NONINTERLEAVED = 0x1; // Use non-interleaved buffers (default = interleaved).
static const streamFlags MINIMIZE_LATENCY = 0x2; // Attempt to set stream parameters for lowest possible latency.
static const streamFlags HOG_DEVICE = 0x4; // Attempt grab device and prevent use by others.
static const streamFlags SCHEDULE_REALTIME = 0x8; // Try to select realtime scheduling for callback thread.
static const streamFlags ALSA_USE_DEFAULT = 0x10; // Use the "default" PCM device (ALSA only).
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
//std::ostream& operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj);
/**
* @typedef typedef uint64_t rtaudio::streamStatus;
* @brief RtAudio stream status (over- or underflow) flags.
*
* Notification of a stream over- or underflow is indicated by a
* non-zero stream \c status argument in the RtAudioCallback function.
* The stream status can be one of the following two options,
* depending on whether the stream is open for output and/or input:
*
* - \e RTAUDIO_INPUT_OVERFLOW: Input data was discarded because of an overflow condition at the driver.
* - \e RTAUDIO_OUTPUT_UNDERFLOW: The output buffer ran low, likely producing a break in the output sound.
*/
typedef uint32_t streamStatus;
static const streamStatus INPUT_OVERFLOW = 0x1; // Input data was discarded because of an overflow condition at the driver.
static const streamStatus OUTPUT_UNDERFLOW = 0x2; // The output buffer ran low, likely causing a gap in the output sound.
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
//std::ostream& operator <<(std::ostream& _os, const airtaudio::streamStatus& _obj);
/**
* @brief RtAudio callback function prototype.
*
* All RtAudio clients must create a function of type RtAudioCallback
* to read and/or write data from/to the audio stream. When the
* underlying audio system is ready for new input or output data, this
* function will be invoked.
*
* @param _outputBuffer For output (or duplex) streams, the client
* should write \c nFrames of audio sample frames into this
* buffer. This argument should be recast to the datatype
* specified when the stream was opened. For input-only
* streams, this argument will be NULL.
*
* @param _inputBuffer For input (or duplex) streams, this buffer will
* hold \c nFrames of input audio sample frames. This
* argument should be recast to the datatype specified when the
* stream was opened. For output-only streams, this argument
* will be NULL.
*
* @param _nFrames The number of sample frames of input or output
* data in the buffers. The actual buffer size in bytes is
* dependent on the data type and number of channels in use.
*
* @param _streamTime The number of seconds that have elapsed since the
* stream was started.
*
* @param _status If non-zero, this argument indicates a data overflow
* or underflow condition for the stream. The particular
* condition can be determined by comparison with the
* streamStatus flags.
*
* @param _userData A pointer to optional data provided by the client
* when opening the stream (default = NULL).
*
* To continue normal stream operation, the RtAudioCallback function
* should return a value of zero. To stop the stream and drain the
* output buffer, the function should return a value of one. To abort
* the stream immediately, the client should return a value of two.
*/
typedef int32_t (*AirTAudioCallback)(void *_outputBuffer,
void *_inputBuffer,
uint32_t _nFrames,
double _streamTime,
airtaudio::streamStatus _status,
void *_userData);
}
#include <airtaudio/DeviceInfo.h>
#include <airtaudio/StreamOptions.h>
#include <airtaudio/StreamParameters.h>
#endif

View File

@@ -1,14 +0,0 @@
/**
* @author Edouard DUPIN
*
* @copyright 2011, Edouard DUPIN, all right reserved
*
* @license BSD 3 clauses (see license file)
*/
#include <airtaudio/debug.h>
int32_t airtaudio::getLogId() {
static int32_t g_val = etk::log::registerInstance("airtaudio");
return g_val;
}

View File

@@ -1,52 +0,0 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_INT24_T_H__
#define __AIRTAUDIO_INT24_T_H__
#pragma pack(push, 1)
class int24_t {
protected:
uint8_t c3[3];
public:
int24_t() {}
int24_t& operator = (const int32_t& i) {
c3[0] = (i & 0x000000ff);
c3[1] = (i & 0x0000ff00) >> 8;
c3[2] = (i & 0x00ff0000) >> 16;
return *this;
}
int24_t(const int24_t& v) {
*this = v;
}
int24_t(const double& d) {
*this = (int32_t)d;
}
int24_t(const float& f) {
*this = (int32_t)f;
}
int24_t(const int16_t& s) {
*this = (int32_t)s;
}
int24_t(const int8_t& c) {
*this = (int32_t)c;
}
int32_t asInt() {
int32_t i = c3[0] | (c3[1] << 8) | (c3[2] << 16);
if (i & 0x800000) {
i |= ~0xffffff;
}
return i;
}
};
#pragma pack(pop)
#endif

View File

@@ -0,0 +1,13 @@
/**
* @author Edouard DUPIN
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
public interface OrchestraConstants {
public static final int BUFFER_SIZE = 512;
}

View File

@@ -0,0 +1,119 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import android.media.AudioRecord;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.util.Log;
public class OrchestraInterfaceInput implements Runnable, OrchestraConstants {
private Thread m_thread = null;
private int m_uid = -1;
private OrchestraNative m_orchestraNativeHandle;
private boolean m_stop = false;
private boolean m_suspend = false;
private AudioRecord m_audio = null;
private int m_sampleRate = 48000;
private int m_nbChannel = 2;
private int m_format = 1;
private int m_bufferSize = BUFFER_SIZE;
public OrchestraInterfaceInput(int _id, OrchestraNative _instance, int _idDevice, int _sampleRate, int _nbChannel, int _format) {
Log.d("InterfaceInput", "new: Input");
m_uid = _id;
m_orchestraNativeHandle = _instance;
m_stop = false;
m_suspend = false;
m_sampleRate = _sampleRate;
m_nbChannel = _nbChannel;
m_format = _format;
m_bufferSize = BUFFER_SIZE * m_nbChannel;
}
public int getUId() {
return m_uid;
}
public void run() {
Log.e("InterfaceInput", "RUN (start)");
int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// we keep the minimum buffer size, otherwite the delay is too big ...
// TODO : int bufferSize = AudioRecord.getMinBufferSize(m_sampleRate, channelConfig, audioFormat);
int config = 0;
if (m_nbChannel == 1) {
config = AudioFormat.CHANNEL_IN_MONO;
} else {
config = AudioFormat.CHANNEL_IN_STEREO;
}
// Create a streaming AudioTrack for music playback
short[] streamBuffer = new short[m_bufferSize];
m_audio = new AudioRecord(MediaRecorder.AudioSource.MIC,
m_sampleRate,
config,
audioFormat,
m_bufferSize);
m_audio.startRecording();
while ( m_stop == false
&& m_suspend == false) {
// Stream PCM data into the local buffer
m_audio.read(streamBuffer, 0, m_bufferSize);
// Send it to C++
m_orchestraNativeHandle.record(m_uid, streamBuffer, m_bufferSize/m_nbChannel);
}
m_audio.stop();
m_audio = null;
streamBuffer = null;
Log.e("InterfaceInput", "RUN (stop)");
}
public void autoStart() {
m_stop=false;
if (m_suspend == false) {
Log.e("InterfaceInput", "Create thread");
m_thread = new Thread(this);
Log.e("InterfaceInput", "start thread");
m_thread.start();
Log.e("InterfaceInput", "start thread (done)");
}
}
public void autoStop() {
if(m_audio == null) {
return;
}
m_stop=true;
m_thread = null;
/*
try {
super.join();
} catch(InterruptedException e) { }
*/
}
public void activityResume() {
m_suspend = false;
if (m_stop == false) {
Log.i("InterfaceInput", "Resume audio stream : " + m_uid);
m_thread = new Thread(this);
m_thread.start();
}
}
public void activityPause() {
if(m_audio == null) {
return;
}
m_suspend = true;
Log.i("InterfaceInput", "Pause audio stream : " + m_uid);
m_thread = null;
}
}

View File

@@ -0,0 +1,108 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import android.media.AudioTrack;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.util.Log;
public class OrchestraInterfaceOutput extends Thread implements OrchestraConstants {
private int m_uid = -1;
private OrchestraNative m_orchestraNativeHandle;
private boolean m_stop = false;
private boolean m_suspend = false;
private AudioTrack m_audio = null;
private int m_sampleRate = 48000;
private int m_nbChannel = 2;
private int m_format = 1;
private int m_bufferSize = BUFFER_SIZE;
public OrchestraInterfaceOutput(int _id, OrchestraNative _instance, int _idDevice, int _sampleRate, int _nbChannel, int _format) {
Log.d("InterfaceOutput", "new: output");
m_uid = _id;
m_orchestraNativeHandle = _instance;
m_stop = true;
m_sampleRate = _sampleRate;
m_nbChannel = _nbChannel;
m_format = _format;
m_bufferSize = BUFFER_SIZE * m_nbChannel;
}
public int getUId() {
return m_uid;
}
public void run() {
Log.e("InterfaceOutput", "RUN (start)");
int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// we keep the minimum buffer size, otherwite the delay is too big ...
//int bufferSize = AudioTrack.getMinBufferSize(m_sampleRate, channelConfig, audioFormat);
int config = 0;
if (m_nbChannel == 1) {
config = AudioFormat.CHANNEL_OUT_MONO;
} else if (m_nbChannel == 4) {
config = AudioFormat.CHANNEL_OUT_QUAD;
} else {
config = AudioFormat.CHANNEL_OUT_STEREO;
}
// Create a streaming AudioTrack for music playback
short[] streamBuffer = new short[m_bufferSize];
m_audio = new AudioTrack(AudioManager.STREAM_MUSIC,
m_sampleRate,
config,
audioFormat,
m_bufferSize,
AudioTrack.MODE_STREAM);
m_audio.play();
//m_audio.setPositionNotificationPeriod(2048);
while (m_stop == false) {
// Fill buffer with PCM data from C++
m_orchestraNativeHandle.playback(m_uid, streamBuffer, m_bufferSize/m_nbChannel);
// Stream PCM data into the music AudioTrack
m_audio.write(streamBuffer, 0, m_bufferSize);
}
m_audio.flush();
m_audio.stop();
m_audio = null;
streamBuffer = null;
Log.e("InterfaceOutput", "RUN (stop)");
}
public void autoStart() {
m_stop=false;
this.start();
}
public void autoStop() {
if(m_audio == null) {
return;
}
m_stop=true;
try {
super.join();
} catch(InterruptedException e) { }
}
public void activityResume() {
if (m_audio != null) {
Log.i("InterfaceOutput", "Resume audio stream : " + m_uid);
m_audio.play();
}
}
public void activityPause() {
if(m_audio == null) {
return;
}
if (m_audio != null) {
Log.i("InterfaceOutput", "Pause audio stream : " + m_uid);
m_audio.pause();
}
}
}

View File

@@ -0,0 +1,262 @@
/**
* @author Edouard DUPIN
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import android.util.Log;
import java.util.Vector;
//import org.musicdsp.orchestra.Constants;
//import org.musicdsp.orchestra.ManagerCallback;
//import org.musicdsp.orchestra.Orchestra;
//import org.musicdsp.orchestra.InterfaceOutput;
//import org.musicdsp.orchestra.InterfaceInput;
/**
* @brief Class :
*
*/
public class OrchestraManager implements OrchestraManagerCallback, OrchestraConstants {
private OrchestraNative m_orchestraHandle;
private int m_uid = 0;
private Vector<OrchestraInterfaceOutput> m_outputList;
private Vector<OrchestraInterfaceInput> m_inputList;
public OrchestraManager() {
// set the java evironement in the C sources :
m_orchestraHandle = new OrchestraNative(this);
m_outputList = new Vector<OrchestraInterfaceOutput>();
m_inputList = new Vector<OrchestraInterfaceInput>();
}
public int getDeviceCount() {
Log.e("Manager", "Get device List");
return 2;
}
public String getDeviceProperty(int _idDevice) {
if (_idDevice == 0) {
return "{\n"
+ " name:'speaker',\n"
+ " type:'output',\n"
+ " sample-rate:[8000,16000,24000,32000,48000,96000],\n"
+ " channels:['front-left','front-right'],\n"
+ " format:['int16'],\n"
+ " default:true\n"
+ "}";
} else if (_idDevice == 1) {
return "{\n"
+ " name:'microphone',\n"
+ " type:'input',\n"
+ " sample-rate:[8000,16000,24000,32000,48000,96000],\n"
+ " channels:['front-left','front-right'],\n"
+ " format:['int16'],\n"
+ " default:true\n"
+ "}";
} else {
return "{}";
}
}
public int openDeviceOutput(int _idDevice, int _freq, int _nbChannel, int _format) {
OrchestraInterfaceOutput iface = new OrchestraInterfaceOutput(m_uid, m_orchestraHandle, _idDevice, _freq, _nbChannel, _format);
m_uid++;
Log.e("Manager", "Open device Output: " + _idDevice + " with m_uid=" + (m_uid-1));
if (iface != null) {
m_outputList.add(iface);
Log.e("Manager", "Added element count=" + m_outputList.size());
return m_uid-1;
}
return -1;
}
public int openDeviceInput(int _idDevice, int _freq, int _nbChannel, int _format) {
OrchestraInterfaceInput iface = new OrchestraInterfaceInput(m_uid, m_orchestraHandle, _idDevice, _freq, _nbChannel, _format);
m_uid++;
Log.e("Manager", "Open device Input: " + _idDevice + " with m_uid=" + (m_uid-1));
if (iface != null) {
m_inputList.add(iface);
return m_uid-1;
}
return -1;
}
public boolean closeDevice(int _uniqueID) {
Log.e("Manager", "Close device : " + _uniqueID);
if (_uniqueID<0) {
Log.e("Manager", "Can not Close device with m_uid: " + _uniqueID);
return false;
}
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_inputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_inputList.remove(iii);
return true;
}
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_outputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_outputList.remove(iii);
return true;
}
}
}
Log.e("Manager", "Can not start device with m_uid: " + _uniqueID + " Element does not exist ...");
return false;
}
public boolean start(int _uniqueID) {
Log.e("Manager", "start device : " + _uniqueID);
if (_uniqueID<0) {
Log.e("Manager", "Can not start device with m_uid: " + _uniqueID);
return false;
}
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_inputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_inputList.get(iii).autoStart();
return true;
}
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_outputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_outputList.get(iii).autoStart();
return true;
}
}
}
Log.e("Manager", "Can not start device with UID: " + _uniqueID + " Element does not exist ...");
return false;
}
public boolean stop(int _uniqueID) {
Log.e("Manager", "stop device : " + _uniqueID);
if (_uniqueID<0) {
Log.e("Manager", "Can not stop device with UID: " + _uniqueID);
return false;
}
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_inputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_inputList.get(iii).autoStop();
return true;
}
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_outputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_outputList.get(iii).autoStop();
return true;
}
}
}
Log.e("Manager", "Can not stop device with UID: " + _uniqueID + " Element does not exist ...");
return false;
}
public void onCreate() {
Log.w("Manager", "onCreate ...");
// nothing to do ...
}
public void onStart() {
Log.w("Manager", "onStart ...");
// nothing to do ...
}
public void onRestart() {
Log.w("Manager", "onRestart ...");
// nothing to do ...
}
public void onResume() {
Log.w("Manager", "onResume ...");
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_inputList.get(iii).activityResume();
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_outputList.get(iii).activityResume();
}
}
}
public void onPause() {
Log.w("Manager", "onPause ...");
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_inputList.get(iii).activityPause();
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_outputList.get(iii).activityPause();
}
}
}
public void onStop() {
Log.w("Manager", "onStop ...");
}
public void onDestroy() {
Log.w("Manager", "onDestroy ...");
}
}

View File

@@ -0,0 +1,19 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
public interface OrchestraManagerCallback {
public int getDeviceCount();
public String getDeviceProperty(int _idDevice);
public int openDeviceInput(int _idDevice, int _sampleRate, int _nbChannel, int _format);
public int openDeviceOutput(int _idDevice, int _sampleRate, int _nbChannel, int _format);
public boolean closeDevice(int _uniqueID);
public boolean start(int _uniqueID);
public boolean stop(int _uniqueID);
}

View File

@@ -0,0 +1,43 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import java.lang.UnsatisfiedLinkError;
import java.lang.RuntimeException;
import android.util.Log;
public class OrchestraNative {
public <T extends OrchestraManagerCallback> OrchestraNative(T _managerInstance) {
try {
NNsetJavaManager(_managerInstance);
} catch (java.lang.UnsatisfiedLinkError e) {
Log.e("Orchestra", "JNI binding not present ...");
throw new RuntimeException("Orchestra binding not present ...");
}
Log.d("Orchestra", "new ...");
}
public void setManagerRemove() {
NNsetJavaManagerRemove();
}
public void playback(int _flowId, short[] _bufferData, int _nbChunk) {
NNPlayback(_flowId, _bufferData, _nbChunk);
}
public void record(int _flowId, short[] _bufferData, int _nbChunk) {
NNRecord(_flowId, _bufferData, _nbChunk);
}
private native <T extends OrchestraManagerCallback> void NNsetJavaManager(T _managerInstance);
private native void NNsetJavaManagerRemove();
private native void NNPlayback(int _flowId, short[] _bufferData, int _nbChunk);
private native void NNRecord(int _flowId, short[] _bufferData, int _nbChunk);
}

481
audio/orchestra/Api.cpp Normal file
View File

@@ -0,0 +1,481 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
// Static variable definitions.
const std::vector<uint32_t>& audio::orchestra::genericSampleRate() {
static std::vector<uint32_t> list;
if (list.size() == 0) {
list.push_back(4000);
list.push_back(5512);
list.push_back(8000);
list.push_back(9600);
list.push_back(11025);
list.push_back(16000);
list.push_back(22050);
list.push_back(32000);
list.push_back(44100);
list.push_back(48000);
list.push_back(64000);
list.push_back(88200);
list.push_back(96000);
list.push_back(128000);
list.push_back(176400);
list.push_back(192000);
list.push_back(256000);
}
return list;
};
audio::orchestra::Api::Api() :
m_callback(nullptr),
m_deviceBuffer(nullptr) {
m_device[0] = 11111;
m_device[1] = 11111;
m_state = audio::orchestra::state::closed;
m_mode = audio::orchestra::mode_unknow;
}
audio::orchestra::Api::~Api() {
}
enum audio::orchestra::error audio::orchestra::Api::startStream() {
ATA_VERBOSE("Start Stream");
m_startTime = audio::Time::now();
m_duration = std::chrono::microseconds(0);
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra::StreamParameters* _oParams,
audio::orchestra::StreamParameters* _iParams,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_state != audio::orchestra::state::closed) {
ATA_ERROR("a stream is already open!");
return audio::orchestra::error_invalidUse;
}
if ( _oParams != nullptr
&& _oParams->nChannels < 1) {
ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one.");
return audio::orchestra::error_invalidUse;
}
if ( _iParams != nullptr
&& _iParams->nChannels < 1) {
ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one.");
return audio::orchestra::error_invalidUse;
}
if ( _oParams == nullptr
&& _iParams == nullptr) {
ATA_ERROR("input and output StreamParameters structures are both nullptr!");
return audio::orchestra::error_invalidUse;
}
if (audio::getFormatBytes(_format) == 0) {
ATA_ERROR("'format' parameter value is undefined.");
return audio::orchestra::error_invalidUse;
}
uint32_t nDevices = getDeviceCount();
uint32_t oChannels = 0;
if (_oParams != nullptr) {
oChannels = _oParams->nChannels;
if ( _oParams->deviceId >= nDevices
&& _oParams->deviceName == "") {
ATA_ERROR("output device parameter value is invalid.");
return audio::orchestra::error_invalidUse;
}
}
uint32_t iChannels = 0;
if (_iParams != nullptr) {
iChannels = _iParams->nChannels;
if ( _iParams->deviceId >= nDevices
&& _iParams->deviceName == "") {
ATA_ERROR("input device parameter value is invalid.");
return audio::orchestra::error_invalidUse;
}
}
clearStreamInfo();
bool result;
if (oChannels > 0) {
if (_oParams->deviceId == -1) {
result = openName(_oParams->deviceName,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
} else {
result = open(_oParams->deviceId,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
}
if (result == false) {
ATA_ERROR("system ERROR");
return audio::orchestra::error_systemError;
}
}
if (iChannels > 0) {
if (_iParams->deviceId == -1) {
result = openName(_iParams->deviceName,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
} else {
result = open(_iParams->deviceId,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
}
if (result == false) {
if (oChannels > 0) {
closeStream();
}
ATA_ERROR("system error");
return audio::orchestra::error_systemError;
}
}
m_callback = _callback;
//_options.numberOfBuffers = m_nBuffers;
m_state = audio::orchestra::state::stopped;
return audio::orchestra::error_none;
}
uint32_t audio::orchestra::Api::getDefaultInputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
uint32_t audio::orchestra::Api::getDefaultOutputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
enum audio::orchestra::error audio::orchestra::Api::closeStream() {
ATA_VERBOSE("Close Stream");
// MUST be implemented in subclasses!
return audio::orchestra::error_none;
}
bool audio::orchestra::Api::open(uint32_t /*device*/,
audio::orchestra::mode /*mode*/,
uint32_t /*channels*/,
uint32_t /*firstChannel*/,
uint32_t /*sampleRate*/,
audio::format /*format*/,
uint32_t * /*bufferSize*/,
const audio::orchestra::StreamOptions& /*options*/) {
// MUST be implemented in subclasses!
return false;
}
void audio::orchestra::Api::tickStreamTime() {
//ATA_WARNING("tick : size=" << m_bufferSize << " rate=" << m_sampleRate << " time=" << audio::Duration((int64_t(m_bufferSize) * int64_t(1000000000)) / int64_t(m_sampleRate)).count());
//ATA_WARNING(" one element=" << audio::Duration((int64_t(1000000000)) / int64_t(m_sampleRate)).count());
m_duration += audio::Duration((int64_t(m_bufferSize) * int64_t(1000000000)) / int64_t(m_sampleRate));
}
long audio::orchestra::Api::getStreamLatency() {
if (verifyStream() != audio::orchestra::error_none) {
return 0;
}
long totalLatency = 0;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
totalLatency = m_latency[0];
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
totalLatency += m_latency[1];
}
return totalLatency;
}
audio::Time audio::orchestra::Api::getStreamTime() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::Time();
}
return m_startTime + m_duration;
}
uint32_t audio::orchestra::Api::getStreamSampleRate() {
if (verifyStream() != audio::orchestra::error_none) {
return 0;
}
return m_sampleRate;
}
enum audio::orchestra::error audio::orchestra::Api::verifyStream() {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("a stream is not open!");
return audio::orchestra::error_invalidUse;
}
return audio::orchestra::error_none;
}
void audio::orchestra::Api::clearStreamInfo() {
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state::closed;
m_sampleRate = 0;
m_bufferSize = 0;
m_nBuffers = 0;
m_userFormat = audio::format_unknow;
m_startTime = audio::Time();
m_duration = audio::Duration(0);
m_deviceBuffer = nullptr;
m_callback = nullptr;
for (int32_t iii=0; iii<2; ++iii) {
m_device[iii] = 11111;
m_doConvertBuffer[iii] = false;
m_deviceInterleaved[iii] = true;
m_doByteSwap[iii] = false;
m_nUserChannels[iii] = 0;
m_nDeviceChannels[iii] = 0;
m_channelOffset[iii] = 0;
m_deviceFormat[iii] = audio::format_unknow;
m_latency[iii] = 0;
m_userBuffer[iii].clear();
m_convertInfo[iii].channels = 0;
m_convertInfo[iii].inJump = 0;
m_convertInfo[iii].outJump = 0;
m_convertInfo[iii].inFormat = audio::format_unknow;
m_convertInfo[iii].outFormat = audio::format_unknow;
m_convertInfo[iii].inOffset.clear();
m_convertInfo[iii].outOffset.clear();
}
}
void audio::orchestra::Api::setConvertInfo(audio::orchestra::mode _mode, uint32_t _firstChannel) {
int32_t idTable = audio::orchestra::modeToIdTable(_mode);
if (_mode == audio::orchestra::mode_input) { // convert device to user buffer
m_convertInfo[idTable].inJump = m_nDeviceChannels[1];
m_convertInfo[idTable].outJump = m_nUserChannels[1];
m_convertInfo[idTable].inFormat = m_deviceFormat[1];
m_convertInfo[idTable].outFormat = m_userFormat;
} else { // convert user to device buffer
m_convertInfo[idTable].inJump = m_nUserChannels[0];
m_convertInfo[idTable].outJump = m_nDeviceChannels[0];
m_convertInfo[idTable].inFormat = m_userFormat;
m_convertInfo[idTable].outFormat = m_deviceFormat[0];
}
if (m_convertInfo[idTable].inJump < m_convertInfo[idTable].outJump) {
m_convertInfo[idTable].channels = m_convertInfo[idTable].inJump;
} else {
m_convertInfo[idTable].channels = m_convertInfo[idTable].outJump;
}
// Set up the interleave/deinterleave offsets.
if (m_deviceInterleaved[idTable] == false) {
if (_mode == audio::orchestra::mode_input) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk * m_bufferSize);
m_convertInfo[idTable].outOffset.push_back(kkk);
m_convertInfo[idTable].inJump = 1;
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk);
m_convertInfo[idTable].outOffset.push_back(kkk * m_bufferSize);
m_convertInfo[idTable].outJump = 1;
}
}
} else { // no (de)interleaving
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk);
m_convertInfo[idTable].outOffset.push_back(kkk);
}
}
// Add channel offset.
if (_firstChannel > 0) {
if (m_deviceInterleaved[idTable]) {
if (_mode == audio::orchestra::mode_output) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].outOffset[kkk] += _firstChannel;
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset[kkk] += _firstChannel;
}
}
} else {
if (_mode == audio::orchestra::mode_output) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].outOffset[kkk] += (_firstChannel * m_bufferSize);
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset[kkk] += (_firstChannel * m_bufferSize);
}
}
}
}
}
void audio::orchestra::Api::convertBuffer(char *_outBuffer, char *_inBuffer, audio::orchestra::ConvertInfo &_info) {
// This function does format conversion, input/output channel compensation, and
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
// the lower three bytes of a 32-bit integer.
// Clear our device buffer when in/out duplex device channels are different
if ( _outBuffer == m_deviceBuffer
&& m_mode == audio::orchestra::mode_duplex
&& m_nDeviceChannels[0] < m_nDeviceChannels[1]) {
memset(_outBuffer, 0, m_bufferSize * _info.outJump * audio::getFormatBytes(_info.outFormat));
}
switch (audio::getFormatBytes(_info.outFormat)) {
case 1:
{
uint8_t *out = reinterpret_cast<uint8_t*>(_outBuffer);
uint8_t *in = reinterpret_cast<uint8_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
case 2:
{
uint16_t *out = reinterpret_cast<uint16_t*>(_outBuffer);
uint16_t *in = reinterpret_cast<uint16_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
case 4:
{
uint32_t *out = reinterpret_cast<uint32_t*>(_outBuffer);
uint32_t *in = reinterpret_cast<uint32_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
case 8:
{
uint64_t *out = reinterpret_cast<uint64_t*>(_outBuffer);
uint64_t *in = reinterpret_cast<uint64_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
}
}
void audio::orchestra::Api::byteSwapBuffer(char *_buffer, uint32_t _samples, audio::format _format) {
char val;
char *ptr;
ptr = _buffer;
if (_format == audio::format_int16) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 2nd bytes.
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 2 bytes.
ptr += 2;
}
} else if ( _format == audio::format_int32
|| _format == audio::format_float) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 4th bytes.
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 2nd and 3rd bytes.
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 3 more bytes.
ptr += 3;
}
} else if (_format == audio::format_int24) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 3rd bytes.
val = *(ptr);
*(ptr) = *(ptr+2);
*(ptr+2) = val;
// Increment 2 more bytes.
ptr += 2;
}
} else if (_format == audio::format_double) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 8th bytes
val = *(ptr);
*(ptr) = *(ptr+7);
*(ptr+7) = val;
// Swap 2nd and 7th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+5);
*(ptr+5) = val;
// Swap 3rd and 6th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 4th and 5th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 5 more bytes.
ptr += 5;
}
}
}

175
audio/orchestra/Api.h Normal file
View File

@@ -0,0 +1,175 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <sstream>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/type.h>
#include <audio/orchestra/state.h>
#include <audio/orchestra/mode.h>
#include <audio/Time.h>
#include <audio/Duration.h>
#include <ememory/memory.h>
namespace audio {
namespace orchestra {
const std::vector<uint32_t>& genericSampleRate();
/**
* @brief airtaudio callback function prototype.
* @param _inputBuffer For input (or duplex) streams, this buffer will hold _nbChunk of input audio chunk (nullptr if no data).
* @param _timeInput Timestamp of the first buffer sample (recording time).
* @param _outputBuffer For output (or duplex) streams, the client should write _nbChunk of audio chunk into this buffer (nullptr if no data).
* @param _timeOutput Timestamp of the first buffer sample (playing time).
* @param _nbChunk The number of chunk of input or output chunk in the buffer (same size).
* @param _status List of error that occured in the laps of time.
*/
typedef std::function<int32_t (const void* _inputBuffer,
const audio::Time& _timeInput,
void* _outputBuffer,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status)> AirTAudioCallback;
// A protected structure used for buffer conversion.
class ConvertInfo {
public:
int32_t channels;
int32_t inJump;
int32_t outJump;
enum audio::format inFormat;
enum audio::format outFormat;
std::vector<int> inOffset;
std::vector<int> outOffset;
};
class Api : public ememory::EnableSharedFromThis<Api>{
protected:
std::string m_name;
public:
Api();
virtual ~Api();
void setName(const std::string& _name) {
m_name = _name;
}
virtual const std::string& getCurrentApi() = 0;
virtual uint32_t getDeviceCount() = 0;
virtual audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
// TODO : Check API ...
virtual bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
return false;
}
virtual uint32_t getDefaultInputDevice();
virtual uint32_t getDefaultOutputDevice();
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters* _outputParameters,
audio::orchestra::StreamParameters* _inputParameters,
audio::format _format,
uint32_t _sampleRate,
uint32_t* _nbChunk,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options);
virtual enum audio::orchestra::error closeStream();
virtual enum audio::orchestra::error startStream();
virtual enum audio::orchestra::error stopStream() = 0;
virtual enum audio::orchestra::error abortStream() = 0;
long getStreamLatency();
uint32_t getStreamSampleRate();
virtual audio::Time getStreamTime();
bool isStreamOpen() const {
return m_state != audio::orchestra::state::closed;
}
bool isStreamRunning() const {
return m_state == audio::orchestra::state::running;
}
protected:
mutable std::mutex m_mutex;
audio::orchestra::AirTAudioCallback m_callback;
uint32_t m_device[2]; // Playback and record, respectively.
enum audio::orchestra::mode m_mode; // audio::orchestra::mode_output, audio::orchestra::mode_input, or audio::orchestra::mode_duplex.
enum audio::orchestra::state m_state; // STOPPED, RUNNING, or CLOSED
std::vector<char> m_userBuffer[2]; // Playback and record, respectively.
char *m_deviceBuffer;
bool m_doConvertBuffer[2]; // Playback and record, respectively.
bool m_deviceInterleaved[2]; // Playback and record, respectively.
bool m_doByteSwap[2]; // Playback and record, respectively.
uint32_t m_sampleRate; // TODO : Rename frequency
uint32_t m_bufferSize;
uint32_t m_nBuffers;
uint32_t m_nUserChannels[2]; // Playback and record, respectively. // TODO : set only one config (open inout with the same number of channels (limitation)
uint32_t m_nDeviceChannels[2]; // Playback and record channels, respectively.
uint32_t m_channelOffset[2]; // Playback and record, respectively.
uint64_t m_latency[2]; // Playback and record, respectively.
enum audio::format m_userFormat; // TODO : Remove this ==> use can only open in the Harware format ...
enum audio::format m_deviceFormat[2]; // Playback and record, respectively.
audio::orchestra::ConvertInfo m_convertInfo[2];
//audio::Time
audio::Time m_startTime; //!< start time of the stream (restart at every stop, pause ...)
audio::Duration m_duration; //!< duration from wich the stream is started
/**
* @brief api-specific method that attempts to open a device
* with the given parameters. This function MUST be implemented by
* all subclasses. If an error is encountered during the probe, a
* "warning" message is reported and false is returned. A
* successful probe is indicated by a return value of true.
*/
virtual bool open(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool openName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) { return false; }
/**
* @brief Increment the stream time.
*/
void tickStreamTime();
/**
* @brief Clear an RtApiStream structure.
*/
void clearStreamInfo();
/**
* @brief Check the current stream status
*/
enum audio::orchestra::error verifyStream();
/**
* @brief Protected method used to perform format, channel number, and/or interleaving
* conversions between the user and device buffers.
*/
void convertBuffer(char *_outBuffer,
char *_inBuffer,
audio::orchestra::ConvertInfo& _info);
/**
* @brief Perform byte-swapping on buffers.
*/
void byteSwapBuffer(char *_buffer,
uint32_t _samples,
enum audio::format _format);
/**
* @brief Sets up the parameters for buffer conversion.
*/
void setConvertInfo(enum audio::orchestra::mode _mode,
uint32_t _firstChannel);
public:
virtual bool isMasterOf(ememory::SharedPtr<audio::orchestra::Api> _api) {
return false;
};
};
}
}

View File

View File

@@ -0,0 +1,62 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/DeviceInfo.h>
#include <etk/stdTools.h>
#include <iostream>
void audio::orchestra::DeviceInfo::display(int32_t _tabNumber) const {
std::string space;
for (int32_t iii=0; iii<_tabNumber; ++iii) {
space += " ";
}
if (isCorrect == false) {
ATA_PRINT(space + "NOT CORRECT INFORAMATIONS");
return;
}
ATA_PRINT(space + "mode=" << (input==true?"input":"output"));
ATA_PRINT(space + "name=" << name);
if (desc.size() != 0) {
ATA_PRINT(space + "desc=" << desc);
}
ATA_PRINT(space + "channel" << (channels.size()>1?"s":"") << "=" << channels.size() << " : " << channels);
ATA_PRINT(space + "rate" << (sampleRates.size()>1?"s":"") << "=" << sampleRates);
ATA_PRINT(space + "native Format" << (nativeFormats.size()>1?"s":"") << ": " << nativeFormats);
ATA_PRINT(space + "default=" << (isDefault==true?"true":"false"));
}
void audio::orchestra::DeviceInfo::clear() {
isCorrect = false;
input = false;
name = "";
desc = "";
channels.clear();
sampleRates.clear();
nativeFormats.clear();
isDefault = false;
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj) {
_os << "{";
if (_obj.isCorrect == false) {
_os << "NOT CORRECT INFORAMATIONS";
} else {
_os << "name=" << _obj.name << ", ";
if (_obj.desc.size() != 0) {
_os << "description=" << _obj.desc << ", ";
}
_os << "channels=" << _obj.channels << ", ";
_os << "default=" << _obj.isDefault << ", ";
_os << "rates=" << _obj.sampleRates << ", ";
_os << "native Format: " << _obj.nativeFormats;
}
_os << "}";
return _os;
}

View File

@@ -0,0 +1,49 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <audio/format.h>
#include <audio/channel.h>
namespace audio {
namespace orchestra {
/**
* @brief The public device information structure for returning queried values.
*/
class DeviceInfo {
public:
bool isCorrect; //!< the information is correct (the system can return information incorect).
bool input; //!< true if the device in an input; false: output.
std::string name; //!< Character string device identifier.
std::string desc; //!< description of the device
std::vector<audio::channel> channels; //!< Channels interfaces.
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
std::vector<audio::format> nativeFormats; //!< Bit mask of supported data formats.
bool isDefault; //! is default input/output
// Default constructor.
DeviceInfo() :
isCorrect(false),
input(false),
name(),
desc(),
channels(),
sampleRates(),
nativeFormats(),
isDefault(false) {}
/**
* @brief Display the current information of the device (on console)
*/
void display(int32_t _tabNumber = 1) const;
/**
* @brief Clear all internal data
*/
void clear();
};
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj);
}
}

View File

@@ -0,0 +1,9 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/debug.h>

22
audio/orchestra/Flags.h Normal file
View File

@@ -0,0 +1,22 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.h>
namespace audio {
namespace orchestra {
class Flags {
public:
bool m_minimizeLatency; // Simple example ==> TODO ...
Flags() :
m_minimizeLatency(false) {
// nothing to do ...
}
};
}
}

View File

@@ -0,0 +1,187 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <audio/orchestra/api/Alsa.h>
#include <audio/orchestra/api/Android.h>
#include <audio/orchestra/api/Asio.h>
#include <audio/orchestra/api/Core.h>
#include <audio/orchestra/api/CoreIos.h>
#include <audio/orchestra/api/Ds.h>
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/api/Jack.h>
#include <audio/orchestra/api/Pulse.h>
std::vector<std::string> audio::orchestra::Interface::getListApi() {
std::vector<std::string> apis;
// The order here will control the order of RtAudio's API search in
// the constructor.
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
apis.push_back(m_apiAvaillable[iii].first);
}
return apis;
}
void audio::orchestra::Interface::openApi(const std::string& _api) {
m_api.reset();
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
ATA_INFO("try open " << m_apiAvaillable[iii].first);
if (_api == m_apiAvaillable[iii].first) {
ATA_INFO(" ==> call it");
m_api = m_apiAvaillable[iii].second();
if (m_api != nullptr) {
return;
}
}
}
// TODO : An error occured ...
ATA_ERROR("Error in open API ...");
}
audio::orchestra::Interface::Interface() :
m_api(nullptr) {
ATA_DEBUG("Add interface:");
#if defined(ORCHESTRA_BUILD_JACK)
addInterface(audio::orchestra::typeJack, audio::orchestra::api::Jack::create);
#endif
#if defined(ORCHESTRA_BUILD_ALSA)
addInterface(audio::orchestra::typeAlsa, audio::orchestra::api::Alsa::create);
#endif
#if defined(ORCHESTRA_BUILD_PULSE)
addInterface(audio::orchestra::typePulse, audio::orchestra::api::Pulse::create);
#endif
#if defined(ORCHESTRA_BUILD_ASIO)
addInterface(audio::orchestra::typeAsio, audio::orchestra::api::Asio::create);
#endif
#if defined(ORCHESTRA_BUILD_DS)
addInterface(audio::orchestra::typeDs, audio::orchestra::api::Ds::create);
#endif
#if defined(ORCHESTRA_BUILD_MACOSX_CORE)
addInterface(audio::orchestra::typeCoreOSX, audio::orchestra::api::Core::create);
#endif
#if defined(ORCHESTRA_BUILD_IOS_CORE)
addInterface(audio::orchestra::typeCoreIOS, audio::orchestra::api::CoreIos::create);
#endif
#if defined(ORCHESTRA_BUILD_JAVA)
addInterface(audio::orchestra::typeJava, audio::orchestra::api::Android::create);
#endif
#if defined(ORCHESTRA_BUILD_DUMMY)
addInterface(audio::orchestra::typeDummy, audio::orchestra::api::Dummy::create);
#endif
}
void audio::orchestra::Interface::addInterface(const std::string& _api, ememory::SharedPtr<Api> (*_callbackCreate)()) {
m_apiAvaillable.push_back(std::pair<std::string, ememory::SharedPtr<Api> (*)()>(_api, _callbackCreate));
}
enum audio::orchestra::error audio::orchestra::Interface::clear() {
ATA_INFO("Clear API ...");
if (m_api == nullptr) {
ATA_WARNING("Interface NOT started!");
return audio::orchestra::error_none;
}
m_api.reset();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::Interface::instanciate(const std::string& _api) {
ATA_INFO("Instanciate API ...");
if (m_api != nullptr) {
ATA_WARNING("Interface already started!");
return audio::orchestra::error_none;
}
if (_api != audio::orchestra::typeUndefined) {
ATA_INFO("API specified : " << _api);
// Attempt to open the specified API.
openApi(_api);
if (m_api != nullptr) {
if (m_api->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
}
return audio::orchestra::error_none;
}
// No compiled support for specified API value. Issue a debug
// warning and continue as if no API was specified.
ATA_ERROR("API NOT Supported '" << _api << "' not in " << getListApi());
return audio::orchestra::error_fail;
}
ATA_INFO("Auto choice API :");
// Iterate through the compiled APIs and return as soon as we find
// one with at least one device or we reach the end of the list.
std::vector<std::string> apis = getListApi();
ATA_INFO(" find : " << apis.size() << " apis.");
for (size_t iii=0; iii<apis.size(); ++iii) {
ATA_INFO("try open ...");
openApi(apis[iii]);
if(m_api == nullptr) {
ATA_ERROR(" ==> can not create ...");
continue;
}
if (m_api->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
break;
} else {
ATA_INFO(" ==> Interface exist, but have no devices: " << m_api->getDeviceCount());
}
}
if (m_api != nullptr) {
return audio::orchestra::error_none;
}
ATA_ERROR("API NOT Supported '" << _api << "' not in " << getListApi());
return audio::orchestra::error_fail;
}
audio::orchestra::Interface::~Interface() {
ATA_INFO("Remove interface");
m_api.reset();
}
enum audio::orchestra::error audio::orchestra::Interface::openStream(audio::orchestra::StreamParameters* _outputParameters,
audio::orchestra::StreamParameters* _inputParameters,
audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_api == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_api->openStream(_outputParameters,
_inputParameters,
_format,
_sampleRate,
_bufferFrames,
_callback,
_options);
}
bool audio::orchestra::Interface::isMasterOf(audio::orchestra::Interface& _interface) {
if (m_api == nullptr) {
ATA_ERROR("Current Master API is nullptr ...");
return false;
}
if (_interface.m_api == nullptr) {
ATA_ERROR("Current Slave API is nullptr ...");
return false;
}
if (m_api->getCurrentApi() != _interface.m_api->getCurrentApi()) {
ATA_ERROR("Can not link 2 Interface with not the same Low level type (?)");//" << _interface.m_adac->getCurrentApi() << " != " << m_adac->getCurrentApi() << ")");
return false;
}
if (m_api->getCurrentApi() != audio::orchestra::typeAlsa) {
ATA_ERROR("Link 2 device together work only if the interafec is ?");// << audio::orchestra::type::alsa << " not for " << m_api->getCurrentApi());
return false;
}
return m_api->isMasterOf(_interface.m_api);
}

307
audio/orchestra/Interface.h Normal file
View File

@@ -0,0 +1,307 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <string>
#include <vector>
#include <audio/orchestra/base.h>
#include <audio/orchestra/CallbackInfo.h>
#include <audio/orchestra/Api.h>
namespace audio {
namespace orchestra {
/**
* @brief audio::orchestra::Interface class declaration.
*
* audio::orchestra::Interface is a "controller" used to select an available audio i/o
* interface. It presents a common API for the user to call but all
* functionality is implemented by the class RtApi and its
* subclasses. RtAudio creates an instance of an RtApi subclass
* based on the user's API choice. If no choice is made, RtAudio
* attempts to make a "logical" API selection.
*/
class Interface {
protected:
std::vector<std::pair<std::string, ememory::SharedPtr<Api> (*)()> > m_apiAvaillable;
protected:
ememory::SharedPtr<audio::orchestra::Api> m_api;
public:
void setName(const std::string& _name) {
if (m_api == nullptr) {
return;
}
m_api->setName(_name);
}
/**
* @brief Get the list of all availlable API in the system.
* @return the list of all APIs
*/
std::vector<std::string> getListApi();
/**
* @brief Add an interface of the Possible List.
* @param[in] _api Type of the interface.
* @param[in] _callbackCreate API creation callback.
*/
void addInterface(const std::string& _api, ememory::SharedPtr<Api> (*_callbackCreate)());
/**
* @brief The class constructor.
* @note the creating of the basic instance is done by Instanciate
*/
Interface();
/**
* @brief The destructor.
*
* If a stream is running or open, it will be stopped and closed
* automatically.
*/
virtual ~Interface();
/**
* @brief Clear the current Interface
*/
enum audio::orchestra::error clear();
/**
* @brief Create an interface instance
*/
enum audio::orchestra::error instanciate(const std::string& _api = audio::orchestra::typeUndefined);
/**
* @return the audio API specifier for the current instance of airtaudio.
*/
const std::string& getCurrentApi() {
if (m_api == nullptr) {
return audio::orchestra::typeUndefined;
}
return m_api->getCurrentApi();
}
/**
* @brief A public function that queries for the number of audio devices available.
*
* This function performs a system query of available devices each time it
* is called, thus supporting devices connected \e after instantiation. If
* a system error occurs during processing, a warning will be issued.
*/
uint32_t getDeviceCount() {
if (m_api == nullptr) {
return 0;
}
return m_api->getDeviceCount();
}
/**
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
* If an invalid argument is provided, an RtError (type = INVALID_USE)
* will be thrown. If a device is busy or otherwise unavailable, the
* structure member "probed" will have a value of "false" and all
* other members are undefined. If the specified device is the
* current default input or output device, the corresponding
* "isDefault" member will have a value of "true".
*
* @return An audio::orchestra::DeviceInfo structure for a specified device number.
*/
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) {
if (m_api == nullptr) {
return audio::orchestra::DeviceInfo();
}
return m_api->getDeviceInfo(_device);
}
audio::orchestra::DeviceInfo getDeviceInfo(const std::string& _deviceName) {
if (m_api == nullptr) {
return audio::orchestra::DeviceInfo();
}
audio::orchestra::DeviceInfo info;
m_api->getNamedDeviceInfo(_deviceName, info);
return info;
}
/**
* @brief A function that returns the index of the default output device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultOutputDevice() {
if (m_api == nullptr) {
return 0;
}
return m_api->getDefaultOutputDevice();
}
/**
* @brief A function that returns the index of the default input device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultInputDevice() {
if (m_api == nullptr) {
return 0;
}
return m_api->getDefaultInputDevice();
}
/**
* @brief A public function for opening a stream with the specified parameters.
*
* An RtError (type = SYSTEM_ERROR) is thrown if a stream cannot be
* opened with the specified parameters or an error occurs during
* processing. An RtError (type = INVALID_USE) is thrown if any
* invalid device ID or channel number parameters are specified.
* @param _outputParameters Specifies output stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For input-only streams, this
* argument should be nullptr. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _inputParameters Specifies input stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For output-only streams, this
* argument should be nullptr. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _format An audio::format specifying the desired sample data format.
* @param _sampleRate The desired sample rate (sample frames per second).
* @param _bufferFrames A pointer to a value indicating the desired
* internal buffer size in sample frames. The actual value
* used by the device is returned via the same pointer. A
* value of zero can be specified, in which case the lowest
* allowable value is determined.
* @param _callback A client-defined function that will be invoked
* when input data is available and/or output data is needed.
* @param _options An optional pointer to a structure containing various
* global stream options, including a list of OR'ed audio::orchestra::streamFlags
* and a suggested number of stream buffers that can be used to
* control stream latency. More buffers typically result in more
* robust performance, though at a cost of greater latency. If a
* value of zero is specified, a system-specific median value is
* chosen. If the airtaudio_MINIMIZE_LATENCY flag bit is set, the
* lowest allowable value is used. The actual value used is
* returned via the structure argument. The parameter is API dependent.
* @param _errorCallback A client-defined function that will be invoked
* when an error has occured.
*/
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters *_outputParameters,
audio::orchestra::StreamParameters *_inputParameters,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options = audio::orchestra::StreamOptions());
/**
* @brief A function that closes a stream and frees any associated stream memory.
*
* If a stream is not open, this function issues a warning and
* returns (no exception is thrown).
*/
enum audio::orchestra::error closeStream() {
if (m_api == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_api->closeStream();
}
/**
* @brief A function that starts a stream.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* running.
*/
enum audio::orchestra::error startStream() {
if (m_api == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_api->startStream();
}
/**
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
enum audio::orchestra::error stopStream() {
if (m_api == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_api->stopStream();
}
/**
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
enum audio::orchestra::error abortStream() {
if (m_api == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_api->abortStream();
}
/**
* @return true if a stream is open and false if not.
*/
bool isStreamOpen() const {
if (m_api == nullptr) {
return false;
}
return m_api->isStreamOpen();
}
/**
* @return true if the stream is running and false if it is stopped or not open.
*/
bool isStreamRunning() const {
if (m_api == nullptr) {
return false;
}
return m_api->isStreamRunning();
}
/**
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
* @return the number of elapsed seconds since the stream was started.
*/
audio::Time getStreamTime() {
if (m_api == nullptr) {
return audio::Time();
}
return m_api->getStreamTime();
}
/**
* @brief The stream latency refers to delay in audio input and/or output
* caused by internal buffering by the audio system and/or hardware.
* For duplex streams, the returned value will represent the sum of
* the input and output latencies. If a stream is not open, an
* RtError (type = INVALID_USE) will be thrown. If the API does not
* report latency, the return value will be zero.
* @return The internal stream latency in sample frames.
*/
long getStreamLatency() {
if (m_api == nullptr) {
return 0;
}
return m_api->getStreamLatency();
}
/**
* @brief On some systems, the sample rate used may be slightly different
* than that specified in the stream parameters. If a stream is not
* open, an RtError (type = INVALID_USE) will be thrown.
* @return Returns actual sample rate in use by the stream.
*/
uint32_t getStreamSampleRate() {
if (m_api == nullptr) {
return 0;
}
return m_api->getStreamSampleRate();
}
bool isMasterOf(audio::orchestra::Interface& _interface);
protected:
void openApi(const std::string& _api);
};
}
}

View File

@@ -0,0 +1,45 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/StreamOptions.h>
#include <etk/stdTools.h>
#include <audio/orchestra/debug.h>
static const char* listValue[] = {
"hardware",
"trigered",
"soft"
};
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj) {
_os << listValue[_obj];
return _os;
}
namespace etk {
template <> bool from_string<enum audio::orchestra::timestampMode>(enum audio::orchestra::timestampMode& _variableRet, const std::string& _value) {
if (_value == "hardware") {
_variableRet = audio::orchestra::timestampMode_Hardware;
return true;
}
if (_value == "trigered") {
_variableRet = audio::orchestra::timestampMode_trigered;
return true;
}
if (_value == "soft") {
_variableRet = audio::orchestra::timestampMode_soft;
return true;
}
return false;
}
template <enum audio::orchestra::timestampMode> std::string to_string(const enum audio::orchestra::timestampMode& _variable) {
return listValue[_variable];
}
}

View File

@@ -0,0 +1,34 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <audio/orchestra/Flags.h>
namespace audio {
namespace orchestra {
enum timestampMode {
timestampMode_Hardware, //!< enable harware timestamp
timestampMode_trigered, //!< get harware triger time stamp and increment with duration
timestampMode_soft, //!< Simulate all timestamp.
};
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj);
class StreamOptions {
public:
audio::orchestra::Flags flags; //!< A bit-mask of stream flags
uint32_t numberOfBuffers; //!< Number of stream buffers.
std::string streamName; //!< A stream name (currently used only in Jack).
enum timestampMode mode; //!< mode of timestamping data...
// Default constructor.
StreamOptions() :
flags(),
numberOfBuffers(0),
mode(timestampMode_Hardware) {}
};
}
}

View File

@@ -0,0 +1,30 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
namespace audio {
namespace orchestra {
/**
* @brief The structure for specifying input or ouput stream parameters.
*/
class StreamParameters {
public:
int32_t deviceId; //!< Device index (-1 to getDeviceCount() - 1).
std::string deviceName; //!< name of the device (if deviceId==-1 this must not be == "", and the oposite ...)
uint32_t nChannels; //!< Number of channels.
uint32_t firstChannel; //!< First channel index on device (default = 0).
// Default constructor.
StreamParameters() :
deviceId(-1),
nChannels(0),
firstChannel(0) {
}
};
}
}

1607
audio/orchestra/api/Alsa.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_ALSA
namespace audio {
namespace orchestra {
namespace api {
class AlsaPrivate;
class Alsa: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Alsa();
virtual ~Alsa();
const std::string& getCurrentApi() {
return audio::orchestra::typeAlsa;
}
uint32_t getDeviceCount();
private:
bool getNamedDeviceInfoLocal(const std::string& _deviceName,
audio::orchestra::DeviceInfo& _info,
int32_t _cardId=-1, // Alsa card ID
int32_t _subdevice=-1, // alsa subdevice ID
int32_t _localDeviceId=-1,// local ID of device find
bool _input=false);
public:
bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
return getNamedDeviceInfoLocal(_deviceName, _info);
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
void callbackEventOneCycleRead();
void callbackEventOneCycleWrite();
void callbackEventOneCycleMMAPRead();
void callbackEventOneCycleMMAPWrite();
private:
static void alsaCallbackEvent(void* _userData);
private:
ememory::SharedPtr<AlsaPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool open(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
bool openName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual audio::Time getStreamTime();
public:
bool isMasterOf(ememory::SharedPtr<audio::orchestra::Api> _api);
};
}
}
}
#endif

View File

@@ -0,0 +1,193 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifdef ORCHESTRA_BUILD_JAVA
//#include <ewol/context/Context.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/api/AndroidNativeInterface.h>
#include <audio/orchestra/api/Android.h>
#include <limits.h>
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Android::create() {
ATA_INFO("Create Android device ... ");
return ememory::SharedPtr<audio::orchestra::api::Android>(new audio::orchestra::api::Android());
}
audio::orchestra::api::Android::Android() :
m_uid(-1) {
ATA_INFO("Create Android interface");
}
audio::orchestra::api::Android::~Android() {
ATA_INFO("Destroy Android interface");
}
uint32_t audio::orchestra::api::Android::getDeviceCount() {
//ATA_INFO("Get device count:"<< m_devices.size());
return audio::orchestra::api::android::getDeviceCount();
}
audio::orchestra::DeviceInfo audio::orchestra::api::Android::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return audio::orchestra::api::android::getDeviceInfo(_device);
}
enum audio::orchestra::error audio::orchestra::api::Android::closeStream() {
ATA_INFO("Close Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::startStream() {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
// Can not close the stream now...
return audio::orchestra::api::android::startStream(m_uid);
}
enum audio::orchestra::error audio::orchestra::api::Android::stopStream() {
ATA_INFO("Stop stream");
// Can not close the stream now...
return audio::orchestra::api::android::stopStream(m_uid);
}
enum audio::orchestra::error audio::orchestra::api::Android::abortStream() {
ATA_INFO("Abort Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::Android::playback(int16_t* _dst, int32_t _nbChunk) {
// clear output buffer:
if (_dst != nullptr) {
memset(_dst, 0, _nbChunk*audio::getFormatBytes(m_deviceFormat[modeToIdTable(m_mode)])*m_nDeviceChannels[modeToIdTable(m_mode)]);
}
int32_t doStopStream = 0;
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(m_mode)] == true) {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " userbuffer size = " << m_userBuffer[audio::orchestra::mode_output].size() << "pointer=" << int64_t(&m_userBuffer[audio::orchestra::mode_output][0]));
doStopStream = m_callback(nullptr,
audio::Time(),
&m_userBuffer[m_mode][0],
streamTime,
uint32_t(_nbChunk),
status);
convertBuffer((char*)_dst, (char*)&m_userBuffer[audio::orchestra::mode_output][0], m_convertInfo[audio::orchestra::mode_output]);
} else {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " pointer=" << int64_t(_dst));
doStopStream = m_callback(nullptr,
audio::Time(),
_dst,
streamTime,
uint32_t(_nbChunk),
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
void audio::orchestra::api::Android::record(int16_t* _dst, int32_t _nbChunk) {
int32_t doStopStream = 0;
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(m_mode)] == true) {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " userbuffer size = " << m_userBuffer[audio::orchestra::mode_output].size() << "pointer=" << int64_t(&m_userBuffer[audio::orchestra::mode_output][0]));
convertBuffer((char*)&m_userBuffer[audio::orchestra::mode_input][0], (char*)_dst, m_convertInfo[audio::orchestra::mode_input]);
doStopStream = m_callback(&m_userBuffer[m_mode][0],
streamTime,
nullptr,
audio::Time(),
uint32_t(_nbChunk),
status);
} else {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " pointer=" << int64_t(_dst));
doStopStream = m_callback(_dst,
streamTime,
nullptr,
audio::Time(),
uint32_t(_nbChunk),
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
bool audio::orchestra::api::Android::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool ret = false;
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
m_mode = _mode;
m_userFormat = _format;
m_nUserChannels[modeToIdTable(m_mode)] = _channels;
m_uid = audio::orchestra::api::android::open(_device, m_mode, _channels, _firstChannel, _sampleRate, _format, _bufferSize, _options, ememory::staticPointerCast<audio::orchestra::api::Android>(sharedFromThis()));
if (m_uid < 0) {
ret = false;
} else {
ret = true;
}
m_bufferSize = 256;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(m_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to bu update later ...
m_deviceFormat[modeToIdTable(m_mode)] = audio::format_int16;
m_nDeviceChannels[modeToIdTable(m_mode)] = 2;
m_deviceInterleaved[modeToIdTable(m_mode)] = true;
m_doConvertBuffer[modeToIdTable(m_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(m_mode)]) {
m_doConvertBuffer[modeToIdTable(m_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(m_mode)] < m_nDeviceChannels[modeToIdTable(m_mode)]) {
m_doConvertBuffer[modeToIdTable(m_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(m_mode)] == false
&& m_nUserChannels[modeToIdTable(m_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(m_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(m_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(m_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(m_mode)].resize(bufferBytes);
if (m_userBuffer[modeToIdTable(m_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
}
setConvertInfo(m_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(m_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(m_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(m_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(m_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
return ret;
}
#endif

View File

@@ -0,0 +1,60 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#pragma once
#ifdef ORCHESTRA_BUILD_JAVA
#include <audio/orchestra/Interface.h>
namespace audio {
namespace orchestra {
namespace api {
class Android: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Android();
virtual ~Android();
const std::string& getCurrentApi() {
return audio::orchestra::typeJava;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
int32_t m_uid;
public:
int32_t getUId() {
return m_uid;
}
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
public:
void playback(int16_t* _dst, int32_t _nbChunk);
void record(int16_t* _dst, int32_t _nbChunk);
};
}
}
}
#endif

View File

@@ -0,0 +1,540 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <jni.h>
#include <pthread.h>
#include <mutex>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/error.h>
#include <audio/orchestra/api/AndroidNativeInterface.h>
#include <audio/orchestra/api/Android.h>
/* include auto generated file */
#include <org_musicdsp_orchestra_OrchestraConstants.h>
#include <jvm-basics/jvm-basics.h>
#include <ememory/memory.h>
#include <ejson/ejson.h>
class AndroidOrchestraContext {
public:
// get a resources from the java environement :
JNIEnv* m_JavaVirtualMachinePointer; //!< the JVM
jclass m_javaClassOrchestra; //!< main activity class (android ...)
jclass m_javaClassOrchestraCallback;
jobject m_javaObjectOrchestraCallback;
jmethodID m_javaMethodOrchestraActivityAudioGetDeviceCount;
jmethodID m_javaMethodOrchestraActivityAudioGetDeviceProperty;
jmethodID m_javaMethodOrchestraActivityAudioOpenDeviceInput;
jmethodID m_javaMethodOrchestraActivityAudioOpenDeviceOutput;
jmethodID m_javaMethodOrchestraActivityAudioCloseDevice;
jmethodID m_javaMethodOrchestraActivityAudioStart;
jmethodID m_javaMethodOrchestraActivityAudioStop;
jclass m_javaDefaultClassString; //!< default string class
private:
bool safeInitMethodID(jmethodID& _mid, jclass& _cls, const char* _name, const char* _sign) {
_mid = m_JavaVirtualMachinePointer->GetMethodID(_cls, _name, _sign);
if(_mid == nullptr) {
ATA_ERROR("C->java : Can't find the method " << _name);
/* remove access on the virtual machine : */
m_JavaVirtualMachinePointer = nullptr;
return false;
}
return true;
}
bool java_attach_current_thread(int *_rstatus) {
ATA_DEBUG("C->java : call java");
if (jvm_basics::getJavaVM() == nullptr) {
ATA_ERROR("C->java : JVM not initialised");
m_JavaVirtualMachinePointer = nullptr;
return false;
}
*_rstatus = jvm_basics::getJavaVM()->GetEnv((void **) &m_JavaVirtualMachinePointer, JNI_VERSION_1_6);
if (*_rstatus == JNI_EDETACHED) {
JavaVMAttachArgs lJavaVMAttachArgs;
lJavaVMAttachArgs.version = JNI_VERSION_1_6;
lJavaVMAttachArgs.name = "EwolNativeThread";
lJavaVMAttachArgs.group = nullptr;
int status = jvm_basics::getJavaVM()->AttachCurrentThread(&m_JavaVirtualMachinePointer, &lJavaVMAttachArgs);
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
if (status != JNI_OK) {
ATA_ERROR("C->java : AttachCurrentThread failed : " << status);
m_JavaVirtualMachinePointer = nullptr;
return false;
}
}
return true;
}
void java_detach_current_thread(int _status) {
if(_status == JNI_EDETACHED) {
jvm_basics::getJavaVM()->DetachCurrentThread();
m_JavaVirtualMachinePointer = nullptr;
}
}
public:
AndroidOrchestraContext(JNIEnv* _env, jclass _classBase, jobject _objCallback) :
m_JavaVirtualMachinePointer(nullptr),
m_javaClassOrchestra(0),
m_javaClassOrchestraCallback(0),
m_javaObjectOrchestraCallback(0),
m_javaMethodOrchestraActivityAudioGetDeviceCount(0),
m_javaMethodOrchestraActivityAudioGetDeviceProperty(0),
m_javaMethodOrchestraActivityAudioOpenDeviceInput(0),
m_javaMethodOrchestraActivityAudioOpenDeviceOutput(0),
m_javaMethodOrchestraActivityAudioCloseDevice(0),
m_javaMethodOrchestraActivityAudioStart(0),
m_javaMethodOrchestraActivityAudioStop(0),
m_javaDefaultClassString(0) {
ATA_DEBUG("*******************************************");
ATA_DEBUG("** set JVM Pointer (orchestra) **");
ATA_DEBUG("*******************************************");
m_JavaVirtualMachinePointer = _env;
// get default needed all time elements :
if (m_JavaVirtualMachinePointer == nullptr) {
ATA_ERROR("C->java: NULLPTR jvm interface");
return;
}
ATA_DEBUG("C->java: try load org/musicdsp/orchestra/OrchestraNative class");
m_javaClassOrchestra = m_JavaVirtualMachinePointer->FindClass("org/musicdsp/orchestra/OrchestraNative" );
if (m_javaClassOrchestra == 0) {
ATA_ERROR("C->java : Can't find org/musicdsp/orchestra/OrchestraNative class");
// remove access on the virtual machine :
m_JavaVirtualMachinePointer = nullptr;
return;
}
/* The object field extends Activity and implement OrchestraCallback */
m_javaClassOrchestraCallback = m_JavaVirtualMachinePointer->GetObjectClass(_objCallback);
if(m_javaClassOrchestraCallback == nullptr) {
ATA_ERROR("C->java : Can't find org/musicdsp/orchestra/OrchestraManagerCallback class");
// remove access on the virtual machine :
m_JavaVirtualMachinePointer = nullptr;
return;
}
bool functionCallbackIsMissing = false;
bool ret= false;
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioGetDeviceCount,
m_javaClassOrchestraCallback,
"getDeviceCount",
"()I");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : getDeviceCount");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioGetDeviceProperty,
m_javaClassOrchestraCallback,
"getDeviceProperty",
"(I)Ljava/lang/String;");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : getDeviceProperty");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioOpenDeviceInput,
m_javaClassOrchestraCallback,
"openDeviceInput",
"(IIII)I");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : openDeviceInput");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioOpenDeviceOutput,
m_javaClassOrchestraCallback,
"openDeviceOutput",
"(IIII)I");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : openDeviceOutput");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioCloseDevice,
m_javaClassOrchestraCallback,
"closeDevice",
"(I)Z");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : closeDevice");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioStart,
m_javaClassOrchestraCallback,
"start",
"(I)Z");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : start");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioStop,
m_javaClassOrchestraCallback,
"stop",
"(I)Z");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : stop");
functionCallbackIsMissing = true;
}
m_javaObjectOrchestraCallback = _env->NewGlobalRef(_objCallback);
if (m_javaObjectOrchestraCallback == nullptr) {
functionCallbackIsMissing = true;
}
m_javaDefaultClassString = m_JavaVirtualMachinePointer->FindClass("java/lang/String" );
if (m_javaDefaultClassString == 0) {
ATA_ERROR("C->java : Can't find java/lang/String" );
// remove access on the virtual machine :
m_JavaVirtualMachinePointer = nullptr;
functionCallbackIsMissing = true;
}
if (functionCallbackIsMissing == true) {
ATA_CRITICAL(" mission one function ==> system can not work withut it...");
}
}
~AndroidOrchestraContext() {
// TODO ...
}
void unInit(JNIEnv* _env) {
_env->DeleteGlobalRef(m_javaObjectOrchestraCallback);
m_javaObjectOrchestraCallback = nullptr;
}
uint32_t getDeviceCount() {
// Request the clipBoard :
ATA_WARNING("C->java : audio get device count");
int status;
if(!java_attach_current_thread(&status)) {
return 0;
}
ATA_DEBUG("Call CallIntMethod ...");
//Call java ...
jint ret = m_JavaVirtualMachinePointer->CallIntMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioGetDeviceCount);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
ATA_WARNING(" find " << (uint32_t)ret << " IO");
return (uint32_t)ret;
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _idDevice) {
audio::orchestra::DeviceInfo info;
// Request the clipBoard :
ATA_WARNING("C->java : audio get device info " << _idDevice);
int status;
if(!java_attach_current_thread(&status)) {
return info;
}
//Call java ...
jstring returnString = (jstring) m_JavaVirtualMachinePointer->CallObjectMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioGetDeviceProperty, _idDevice);
const char *js = m_JavaVirtualMachinePointer->GetStringUTFChars(returnString, nullptr);
std::string retString(js);
m_JavaVirtualMachinePointer->ReleaseStringUTFChars(returnString, js);
//m_JavaVirtualMachinePointer->DeleteLocalRef(returnString);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
ATA_WARNING("get device information : " << retString);
ejson::Document doc;
if (doc.parse(retString) == false) {
return info;
}
info.name = doc["name"].toString().get("no-name");
if (doc["type"].toString().get("output") == "output") {
info.input = false;
} else {
info.input = true;
}
ejson::Array list = doc["sample-rate"].toArray();
if (list.exist() == true) {
for (auto it : list) {
info.sampleRates.push_back(int32_t(it.toNumber().get(48000)));
}
}
list = doc["channels"].toArray();
if (list.exist() == true) {
for (auto it : list) {
info.channels.push_back(audio::getChannelFromString(it.toString().get("???")));
}
}
list = doc["format"].toArray();
if (list.exist() == true) {
for (auto it : list) {
info.nativeFormats.push_back(audio::getFormatFromString(it.toString().get("???")));
}
}
info.isDefault = doc["default"].toBoolean().get(false);
info.isCorrect = true;
return info;
}
private:
std::vector<ememory::WeakPtr<audio::orchestra::api::Android> > m_instanceList; // list of connected handle ...
//AndroidAudioCallback m_audioCallBack;
//void* m_audioCallBackUserData;
public:
int32_t open(uint32_t _idDevice,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options,
ememory::SharedPtr<audio::orchestra::api::Android> _instance) {
ATA_DEBUG("C->java : audio open device");
int status;
if(!java_attach_current_thread(&status)) {
return -1;
}
//Call java ...
jint ret = false;
if (_mode == audio::orchestra::mode_output) {
ret = m_JavaVirtualMachinePointer->CallIntMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioOpenDeviceOutput, _idDevice, _sampleRate, _channels, /*_format*/ 1);
} else {
ret = m_JavaVirtualMachinePointer->CallIntMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioOpenDeviceInput, _idDevice, _sampleRate, _channels, /*_format*/ 1);
}
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (int32_t(ret) >= 0) {
m_instanceList.push_back(_instance);
return int32_t(ret);
}
return -1;
}
public:
enum audio::orchestra::error closeStream(int32_t _id) {
ATA_DEBUG("C->java : audio close device");
int status;
if(!java_attach_current_thread(&status)) {
return audio::orchestra::error_fail;
}
//Call java ...
jboolean ret = m_JavaVirtualMachinePointer->CallBooleanMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioCloseDevice, _id);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (bool(ret) == false) {
return audio::orchestra::error_fail;
}
return audio::orchestra::error_none;
}
enum audio::orchestra::error startStream(int32_t _id) {
ATA_DEBUG("C->java : audio start device");
int status;
if(!java_attach_current_thread(&status)) {
return audio::orchestra::error_fail;
}
//Call java ...
jboolean ret = m_JavaVirtualMachinePointer->CallBooleanMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioStart, _id);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (bool(ret) == false) {
return audio::orchestra::error_fail;
}
return audio::orchestra::error_none;
}
enum audio::orchestra::error stopStream(int32_t _id) {
ATA_DEBUG("C->java : audio close device");
int status;
if(!java_attach_current_thread(&status)) {
return audio::orchestra::error_fail;
}
//Call java ...
jboolean ret = m_JavaVirtualMachinePointer->CallBooleanMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioStop, _id);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (bool(ret) == false) {
return audio::orchestra::error_fail;
}
return audio::orchestra::error_none;
}
enum audio::orchestra::error abortStream(int32_t _id) {
return audio::orchestra::error_fail;
}
void playback(int32_t _id, int16_t* _dst, int32_t _nbChunk) {
auto it = m_instanceList.begin();
while (it != m_instanceList.end()) {
auto elem = it->lock();
if (elem == nullptr) {
it = m_instanceList.erase(it);
continue;
}
if (elem->getUId() == _id) {
elem->playback(_dst, _nbChunk);
}
++it;
}
}
void record(int32_t _id, int16_t* _dst, int32_t _nbChunk) {
auto it = m_instanceList.begin();
while (it != m_instanceList.end()) {
auto elem = it->lock();
if (elem == nullptr) {
it = m_instanceList.erase(it);
continue;
}
if (elem->getUId() == _id) {
elem->record(_dst, _nbChunk);
}
++it;
}
}
};
static ememory::SharedPtr<AndroidOrchestraContext> s_localContext;
static int32_t s_nbContextRequested(0);
uint32_t audio::orchestra::api::android::getDeviceCount() {
if (s_localContext == nullptr) {
ATA_ERROR("Have no Orchertra API instanciate in JAVA ...");
return 0;
}
return s_localContext->getDeviceCount();
}
audio::orchestra::DeviceInfo audio::orchestra::api::android::getDeviceInfo(uint32_t _device) {
if (s_localContext == nullptr) {
return audio::orchestra::DeviceInfo();
}
return s_localContext->getDeviceInfo(_device);
}
int32_t audio::orchestra::api::android::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options,
ememory::SharedPtr<audio::orchestra::api::Android> _instance) {
if (s_localContext == nullptr) {
return -1;
}
return s_localContext->open(_device, _mode, _channels, _firstChannel, _sampleRate, _format, _bufferSize, _options, _instance);
}
enum audio::orchestra::error audio::orchestra::api::android::closeStream(int32_t _id) {
if (s_localContext == nullptr) {
return audio::orchestra::error_fail;
}
return s_localContext->closeStream(_id);
}
enum audio::orchestra::error audio::orchestra::api::android::startStream(int32_t _id) {
if (s_localContext == nullptr) {
return audio::orchestra::error_fail;
}
return s_localContext->startStream(_id);
}
enum audio::orchestra::error audio::orchestra::api::android::stopStream(int32_t _id) {
if (s_localContext == nullptr) {
return audio::orchestra::error_fail;
}
return s_localContext->stopStream(_id);
}
enum audio::orchestra::error audio::orchestra::api::android::abortStream(int32_t _id) {
if (s_localContext == nullptr) {
return audio::orchestra::error_fail;
}
return s_localContext->abortStream(_id);
}
extern "C" {
void Java_org_musicdsp_orchestra_OrchestraNative_NNsetJavaManager(JNIEnv* _env,
jclass _classBase,
jobject _objCallback) {
std::unique_lock<std::mutex> lock(jvm_basics::getMutexJavaVM());
ATA_INFO("*******************************************");
ATA_INFO("** Creating Orchestra context **");
ATA_INFO("*******************************************");
if (s_localContext != nullptr) {
s_nbContextRequested++;
}
s_localContext = ememory::makeShared<AndroidOrchestraContext>(_env, _classBase, _objCallback);
if (s_localContext == nullptr) {
ATA_ERROR("Can not allocate the orchestra main context instance");
return;
}
s_nbContextRequested++;
}
void Java_org_musicdsp_orchestra_OrchestraNative_NNsetJavaManagerRemove(JNIEnv* _env, jclass _cls) {
std::unique_lock<std::mutex> lock(jvm_basics::getMutexJavaVM());
ATA_INFO("*******************************************");
ATA_INFO("** remove Orchestra Pointer **");
ATA_INFO("*******************************************");
if (s_nbContextRequested == 0) {
ATA_ERROR("Request remove orchestra interface from Android, but no more interface availlable");
return;
}
s_nbContextRequested--;
if (s_nbContextRequested == 0) {
s_localContext.reset();
}
}
void Java_org_musicdsp_orchestra_OrchestraNative_NNPlayback(JNIEnv* _env,
void* _reserved,
jint _id,
jshortArray _location,
jint _nbChunk) {
std::unique_lock<std::mutex> lock(jvm_basics::getMutexJavaVM());
if (s_localContext == nullptr) {
ATA_ERROR("Call audio with no more Low level interface");
return;
}
// get the short* pointer from the Java array
jboolean isCopy;
jshort* dst = _env->GetShortArrayElements(_location, &isCopy);
if (dst != nullptr) {
//ATA_INFO("Need audioData " << int32_t(_nbChunk));
s_localContext->playback(int32_t(_id), static_cast<short*>(dst), int32_t(_nbChunk));
}
// TODO : Understand why it did not work corectly ...
//if (isCopy == JNI_TRUE) {
// release the short* pointer
_env->ReleaseShortArrayElements(_location, dst, 0);
//}
}
void Java_org_musicdsp_orchestra_OrchestraNative_NNRecord(JNIEnv* _env,
void* _reserved,
jint _id,
jshortArray _location,
jint _nbChunk) {
std::unique_lock<std::mutex> lock(jvm_basics::getMutexJavaVM());
if (s_localContext == nullptr) {
ATA_ERROR("Call audio with no more Low level interface");
return;
}
// get the short* pointer from the Java array
jboolean isCopy;
jshort* dst = _env->GetShortArrayElements(_location, &isCopy);
if (dst != nullptr) {
//ATA_INFO("Need audioData " << int32_t(_nbChunk));
s_localContext->record(int32_t(_id), static_cast<short*>(dst), int32_t(_nbChunk));
}
// TODO : Understand why it did not work corectly ...
//if (isCopy == JNI_TRUE) {
// release the short* pointer
_env->ReleaseShortArrayElements(_location, dst, 0);
//}
}
}

View File

@@ -0,0 +1,42 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#pragma once
#ifdef ORCHESTRA_BUILD_JAVA
#include <audio/orchestra/DeviceInfo.h>
#include <audio/orchestra/mode.h>
#include <audio/orchestra/error.h>
#include <audio/orchestra/StreamOptions.h>
#include <audio/format.h>
#include <ememory/memory.h>
namespace audio {
namespace orchestra {
namespace api {
class Android;
namespace android {
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
int32_t open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options,
ememory::SharedPtr<audio::orchestra::api::Android> _instance);
enum audio::orchestra::error closeStream(int32_t _id);
enum audio::orchestra::error startStream(int32_t _id);
enum audio::orchestra::error stopStream(int32_t _id);
enum audio::orchestra::error abortStream(int32_t _id);
}
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,53 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_ASIO
namespace audio {
namespace orchestra {
namespace api {
class AsioPrivate:
class Asio: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Asio();
virtual ~Asio();
const std::string& getCurrentApi() {
return audio::orchestra::typeAsio;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(long _bufferIndex);
private:
ememory::SharedPtr<AsioPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool m_coInitialized;
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

1300
audio/orchestra/api/Core.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_MACOSX_CORE
#include <CoreAudio/AudioHardware.h>
namespace audio {
namespace orchestra {
namespace api {
class CorePrivate;
class Core: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Core();
virtual ~Core();
const std::string& getCurrentApi() {
return audio::orchestra::typeCoreOSX;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
bool callbackEvent(AudioDeviceID _deviceId,
const AudioBufferList *_inBufferList,
const audio::Time& _inTime,
const AudioBufferList *_outBufferList,
const audio::Time& _outTime);
static OSStatus callbackEvent(AudioDeviceID _inDevice,
const AudioTimeStamp* _inNow,
const AudioBufferList* _inInputData,
const AudioTimeStamp* _inInputTime,
AudioBufferList* _outOutputData,
const AudioTimeStamp* _inOutputTime,
void* _infoPointer);
static void coreStopStream(void *_userData);
private:
ememory::SharedPtr<CorePrivate> m_private;
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
static const char* getErrorCode(OSStatus _code);
static OSStatus xrunListener(AudioObjectID _inDevice,
uint32_t _nAddresses,
const AudioObjectPropertyAddress _properties[],
void* _userData);
};
}
}
}
#endif

View File

@@ -0,0 +1,56 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_IOS_CORE
namespace audio {
namespace orchestra {
namespace api {
class CoreIosPrivate;
class CoreIos: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
CoreIos();
virtual ~CoreIos();
const std::string& getCurrentApi() {
return audio::orchestra::typeCoreIOS;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
public:
void callBackEvent(void* _data,
int32_t _nbChunk,
const audio::Time& _time);
public:
ememory::SharedPtr<CoreIosPrivate> m_private;
};
}
}
}
#endif

View File

@@ -0,0 +1,296 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifdef ORCHESTRA_BUILD_IOS_CORE
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
#include <audio/orchestra/api/CoreIos.h>
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::CoreIos::create() {
ATA_INFO("Create CoreIos device ... ");
return ememory::SharedPtr<audio::orchestra::api::CoreIos>(new audio::orchestra::api::CoreIos());
}
#define kOutputBus 0
#define kInputBus 1
namespace audio {
namespace orchestra {
namespace api {
class CoreIosPrivate {
public:
AudioComponentInstance audioUnit;
};
}
}
}
audio::orchestra::api::CoreIos::CoreIos(void) :
m_private(new audio::orchestra::api::CoreIosPrivate()) {
ATA_INFO("new CoreIos");
int32_t deviceCount = 2;
ATA_ERROR("Get count devices : " << 2);
audio::orchestra::DeviceInfo tmp;
// Add default output format :
tmp.name = "speaker";
tmp.sampleRates.push_back(48000);
tmp.channels.push_back(audio::channel_frontRight);
tmp.channels.push_back(audio::channel_frontLeft);
tmp.isDefault = true;
tmp.nativeFormats.push_back(audio::format_int16);
m_devices.push_back(tmp);
// add default input format:
tmp.name = "microphone";
tmp.sampleRates.push_back(48000);
tmp.channels.push_back(audio::channel_frontRight);
tmp.channels.push_back(audio::channel_frontLeft);
tmp.isDefault = true;
tmp.nativeFormats.push_back(audio::format_int16);
m_devices.push_back(tmp);
ATA_INFO("Create CoreIOs interface (end)");
}
audio::orchestra::api::CoreIos::~CoreIos(void) {
ATA_INFO("Destroy CoreIOs interface");
AudioUnitUninitialize(m_private->audioUnit);
}
uint32_t audio::orchestra::api::CoreIos::getDeviceCount(void) {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
audio::orchestra::DeviceInfo audio::orchestra::api::CoreIos::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::closeStream(void) {
ATA_INFO("Close Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream(void) {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::stopStream(void) {
ATA_INFO("Stop stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream(void) {
ATA_INFO("Abort Stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::CoreIos::callBackEvent(void* _data,
int32_t _nbChunk,
const audio::Time& _time) {
int32_t doStopStream = 0;
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(audio::orchestra::mode_output)] == true) {
doStopStream = m_callback(nullptr,
audio::Time(),
&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0],
_time,
_nbChunk,
status);
convertBuffer((char*)_data, &m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0], m_convertInfo[modeToIdTable(audio::orchestra::mode_output)]);
} else {
doStopStream = m_callback(_data,
_time,
nullptr,
audio::Time(),
_nbChunk,
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
static OSStatus playbackCallback(void *_userData,
AudioUnitRenderActionFlags* _ioActionFlags,
const AudioTimeStamp* _inTime,
uint32_t _inBusNumber,
uint32_t _inNumberFrames,
AudioBufferList* _ioData) {
if (_userData == nullptr) {
ATA_ERROR("callback event ... nullptr pointer");
return -1;
}
audio::Time tmpTimeime;
if (_inTime != nullptr) {
tmpTimeime = audio::Time(_inTime->mHostTime/1000000000LL, _inTime->mHostTime%1000000000LL);
}
audio::orchestra::api::CoreIos* myClass = static_cast<audio::orchestra::api::CoreIos*>(_userData);
// get all requested buffer :
for (int32_t iii=0; iii < _ioData->mNumberBuffers; iii++) {
AudioBuffer buffer = _ioData->mBuffers[iii];
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << _inBusNumber);
myClass->callBackEvent(buffer.mData, numberFrame, tmpTimeime);
}
return noErr;
}
bool audio::orchestra::api::CoreIos::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
return false;
}
bool ret = true;
// configure Airtaudio internal configuration:
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_bufferSize = 8192;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to be update later ...
m_deviceFormat[modeToIdTable(_mode)] = audio::format_int16;
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
// Configure IOs interface:
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(nullptr, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not create an audio intance...");
}
uint32_t flag = 1;
// Enable IO for playback
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (status != 0) {
ATA_ERROR("can not request audio autorisation...");
}
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 48000.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1; //
audioFormat.mChannelsPerFrame = 2; // stereo
audioFormat.mBitsPerChannel = sizeof(short) * 8;
audioFormat.mBytesPerPacket = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mBytesPerFrame = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mReserved = 0;
// Apply format
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (status != 0) {
ATA_ERROR("can not set stream properties...");
}
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = &playbackCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
if (status != 0) {
ATA_ERROR("can not set Callback...");
}
// Initialise
status = AudioUnitInitialize(m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not initialize...");
}
return ret;
}
#endif

1399
audio/orchestra/api/Ds.cpp Normal file

File diff suppressed because it is too large Load Diff

54
audio/orchestra/api/Ds.h Normal file
View File

@@ -0,0 +1,54 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_DS
namespace audio {
namespace orchestra {
namespace api {
class DsPrivate;
class Ds: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Ds();
virtual ~Ds();
const std::string& getCurrentApi() {
return audio::orchestra::typeDs;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
static void dsCallbackEvent(void *_userData);
ememory::SharedPtr<DsPrivate> m_private;
bool m_coInitialized;
bool m_buffersRolling;
long m_duplexPrerollBytes;
bool open(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,60 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_DUMMY)
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/debug.h>
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Dummy::create() {
return ememory::SharedPtr<audio::orchestra::api::Dummy>(new audio::orchestra::api::Dummy());
}
audio::orchestra::api::Dummy::Dummy() {
ATA_WARNING("This class provides no functionality.");
}
uint32_t audio::orchestra::api::Dummy::getDeviceCount() {
return 0;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Dummy::getDeviceInfo(uint32_t _device) {
(void)_device;
return audio::orchestra::DeviceInfo();
}
enum audio::orchestra::error audio::orchestra::api::Dummy::closeStream() {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::stopStream() {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::abortStream() {
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Dummy::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
return false;
}
#endif

View File

@@ -0,0 +1,44 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_DUMMY
#include <audio/orchestra/Interface.h>
namespace audio {
namespace orchestra {
namespace api {
class Dummy: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Dummy();
const std::string& getCurrentApi() {
return audio::orchestra::typeDummy;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
private:
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,732 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
// must run before :
#if defined(ORCHESTRA_BUILD_JACK)
#include <unistd.h>
#include <limits.h>
#include <iostream>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <string.h>
#include <ethread/tools.h>
#include <audio/orchestra/api/Jack.h>
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Jack::create() {
return ememory::SharedPtr<audio::orchestra::api::Jack>(new audio::orchestra::api::Jack());
}
// JACK is a low-latency audio server, originally written for the
// GNU/Linux operating system and now also ported to OS-X. It can
// connect a number of different applications to an audio device, as
// well as allowing them to share audio between themselves.
//
// When using JACK with RtAudio, "devices" refer to JACK clients that
// have ports connected to the server. The JACK server is typically
// started in a terminal as follows:
//
// .jackd -d alsa -d hw:0
//
// or through an interface program such as qjackctl. Many of the
// parameters normally set for a stream are fixed by the JACK server
// and can be specified when the JACK server is started. In
// particular,
//
// jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
// jackd -r -d alsa -r 48000
//
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
// frames, and number of buffers = 4. Once the server is running, it
// is not possible to override these values. If the values are not
// specified in the command-line, the JACK server uses default values.
//
// The JACK server does not have to be running when an instance of
// audio::orchestra::Jack is created, though the function getDeviceCount() will
// report 0 devices found until JACK has been started. When no
// devices are available (i.e., the JACK server is not running), a
// stream cannot be opened.
#include <jack/jack.h>
#include <unistd.h>
#include <cstdio>
namespace audio {
namespace orchestra {
namespace api {
class JackPrivate {
public:
jack_client_t *client;
jack_port_t **ports[2];
std::string deviceName[2];
bool xrun[2];
std::condition_variable condition;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
JackPrivate() :
client(0),
drainCounter(0),
internalDrain(false) {
ports[0] = 0;
ports[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
}
}
}
audio::orchestra::api::Jack::Jack() :
m_private(new audio::orchestra::api::JackPrivate()) {
// Nothing to do here.
}
audio::orchestra::api::Jack::~Jack() {
if (m_state != audio::orchestra::state::closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Jack::getDeviceCount() {
// See if we can become a jack client.
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
jack_client_t *client = jack_client_open("orchestraJackCount", options, status);
if (client == nullptr) {
return 0;
}
const char **ports;
std::string port, previousPort;
uint32_t nChannels = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nChannels ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon + 1);
if (port != previousPort) {
nDevices++;
previousPort = port;
}
}
} while (ports[++nChannels]);
free(ports);
}
jack_client_close(client);
return nDevices*2;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
jack_status_t *status = nullptr;
jack_client_t *client = jack_client_open("orchestraJackInfo", options, status);
if (client == nullptr) {
ATA_ERROR("Jack server not found or connection error!");
// TODO : audio::orchestra::error_warning;
info.clear();
return info;
}
const char **ports;
std::string port, previousPort;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
int32_t deviceID = _device/2;
info.input = _device%2==0?true:false; // note that jack sens are inverted
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[nPorts];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == deviceID) {
info.name = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (deviceID >= nDevices) {
jack_client_close(client);
ATA_ERROR("device ID is invalid!");
// TODO : audio::orchestra::error_invalidUse;
return info;
}
// Get the current jack server sample rate.
info.sampleRates.clear();
info.sampleRates.push_back(jack_get_sample_rate(client));
if (info.input == true) {
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsOutput);
if (ports) {
int32_t iii=0;
while (ports[iii]) {
ATA_ERROR(" ploppp='" << ports[iii] << "'");
info.channels.push_back(audio::channel_unknow);
iii++;
}
free(ports);
}
} else {
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsInput);
if (ports) {
int32_t iii=0;
while (ports[iii]) {
ATA_ERROR(" ploppp='" << ports[iii] << "'");
info.channels.push_back(audio::channel_unknow);
iii++;
}
free(ports);
}
}
if (info.channels.size() == 0) {
jack_client_close(client);
ATA_ERROR("error determining Jack input/output channels!");
// TODO : audio::orchestra::error_warning;
info.clear();
return info;
}
// Jack always uses 32-bit floats.
info.nativeFormats.push_back(audio::format_float);
// Jack doesn't provide default devices so we'll use the first available one.
if (deviceID == 0) {
info.isDefault = true;
}
jack_client_close(client);
info.isCorrect = true;
return info;
}
int32_t audio::orchestra::api::Jack::jackCallbackHandler(jack_nframes_t _nframes, void* _userData) {
ATA_VERBOSE("Jack callback: [BEGIN] " << uint64_t(_userData));
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
if (myClass->callbackEvent((uint64_t)_nframes) == false) {
ATA_VERBOSE("Jack callback: [END] 1");
return 1;
}
ATA_VERBOSE("Jack callback: [END] 0");
return 0;
}
// This function will be called by a spawned thread when the Jack
// server signals that it is shutting down. It is necessary to handle
// it this way because the jackShutdown() function must return before
// the jack_deactivate() function (in closeStream()) will return.
void audio::orchestra::api::Jack::jackCloseStream(void* _userData) {
ethread::setName("Jack_closeStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->closeStream();
}
void audio::orchestra::api::Jack::jackShutdown(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
// Check current stream state. If stopped, then we'll assume this
// was called as a result of a call to audio::orchestra::api::Jack::stopStream (the
// deactivation of a client handle causes this function to be called).
// If not, we'll assume the Jack server is shutting down or some
// other problem occurred and we should close the stream.
if (myClass->isStreamRunning() == false) {
return;
}
new std::thread(&audio::orchestra::api::Jack::jackCloseStream, _userData);
ATA_ERROR("The Jack server is shutting down this client ... stream stopped and closed!!");
}
int32_t audio::orchestra::api::Jack::jackXrun(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
if (myClass->m_private->ports[0]) {
myClass->m_private->xrun[0] = true;
}
if (myClass->m_private->ports[1]) {
myClass->m_private->xrun[1] = true;
}
return 0;
}
bool audio::orchestra::api::Jack::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// Look for jack server and try to become a client (only do once per stream).
jack_client_t *client = 0;
if ( _mode == audio::orchestra::mode_output
|| ( _mode == audio::orchestra::mode_input
&& m_mode != audio::orchestra::mode_output)) {
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
if (!_options.streamName.empty()) {
client = jack_client_open(_options.streamName.c_str(), jackoptions, status);
} else {
client = jack_client_open("orchestraJack", jackoptions, status);
}
if (client == 0) {
ATA_ERROR("Jack server not found or connection error!");
return false;
}
} else {
// The handle must have been created on an earlier pass.
client = m_private->client;
}
const char **ports;
std::string port, previousPort, deviceName;
uint32_t nPorts = 0, nDevices = 0;
int32_t deviceID = _device/2;
bool isInput = _device%2==0?true:false;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == deviceID) {
deviceName = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
ATA_ERROR("device ID is invalid!");
return false;
}
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
uint64_t flag = JackPortIsInput;
if (_mode == audio::orchestra::mode_input) {
flag = JackPortIsOutput;
}
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
}
// Compare the jack ports for specified client to the requested number of channels.
if (nChannels < (_channels + _firstChannel)) {
ATA_ERROR("requested number of channels (" << _channels << ") + offset (" << _firstChannel << ") not found for specified device (" << _device << ":" << deviceName << ").");
return false;
}
// Check the jack server sample rate.
uint32_t jackRate = jack_get_sample_rate(client);
if (_sampleRate != jackRate) {
jack_client_close(client);
ATA_ERROR("the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ").");
return false;
}
m_sampleRate = jackRate;
// Get the latency of the JACK port.
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (ports[ _firstChannel ]) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (_mode == audio::orchestra::mode_input ? JackCaptureLatency : JackPlaybackLatency);
// the range (usually the min and max are equal)
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
// get the latency range
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
// be optimistic, use the min!
m_latency[modeToIdTable(_mode)] = latrange.min;
//m_latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
}
free(ports);
// The jack server always uses 32-bit floating-point data.
m_deviceFormat[modeToIdTable(_mode)] = audio::format_float;
m_userFormat = _format;
// Jack always uses non-interleaved buffers.
m_deviceInterleaved[modeToIdTable(_mode)] = false;
// Jack always provides host byte-ordered data.
m_doByteSwap[modeToIdTable(_mode)] = false;
// Get the buffer size. The buffer size and number of buffers
// (periods) is set when the jack server is started.
m_bufferSize = (int) jack_get_buffer_size(client);
*_bufferSize = m_bufferSize;
m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
// Set flags for buffer conversion.
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
ATA_CRITICAL("Can not update format ==> use RIVER lib for this ...");
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
ATA_ERROR("Reorder channel for the interleaving properties ...");
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
// Allocate our JackHandle structure for the stream.
m_private->client = client;
m_private->deviceName[modeToIdTable(_mode)] = deviceName;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
ATA_VERBOSE("allocate : nbChannel=" << m_nUserChannels[modeToIdTable(_mode)] << " bufferSize=" << *_bufferSize << " format=" << m_deviceFormat[modeToIdTable(_mode)] << "=" << audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]));
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
if (_mode == audio::orchestra::mode_output) {
bufferBytes = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
} else { // _mode == audio::orchestra::mode_input
bufferBytes = m_nDeviceChannels[1] * audio::getFormatBytes(m_deviceFormat[1]);
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes < bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
// Allocate memory for the Jack ports (channels) identifiers.
m_private->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
if (m_private->ports[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating port memory.");
goto error;
}
m_device[modeToIdTable(_mode)] = _device;
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
m_state = audio::orchestra::state::stopped;
if ( m_mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up the stream for output.
m_mode = audio::orchestra::mode_duplex;
} else {
m_mode = _mode;
jack_set_process_callback(m_private->client, &audio::orchestra::api::Jack::jackCallbackHandler, this);
jack_set_xrun_callback(m_private->client, &audio::orchestra::api::Jack::jackXrun, this);
jack_on_shutdown(m_private->client, &audio::orchestra::api::Jack::jackShutdown, this);
}
// Register our ports.
char label[64];
if (_mode == audio::orchestra::mode_output) {
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
snprintf(label, 64, "outport %d", i);
m_private->ports[0][i] = jack_port_register(m_private->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput,
0);
}
} else {
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
snprintf(label, 64, "inport %d", i);
m_private->ports[1][i] = jack_port_register(m_private->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput,
0);
}
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, 0);
}
return true;
error:
jack_client_close(m_private->client);
if (m_private->ports[0] != nullptr) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
}
if (m_private->ports[1] != nullptr) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
}
for (int32_t iii=0; iii<2; ++iii) {
m_userBuffer[iii].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Jack::closeStream() {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_private != nullptr) {
if (m_state == audio::orchestra::state::running) {
jack_deactivate(m_private->client);
}
jack_client_close(m_private->client);
}
if (m_private->ports[0] != nullptr) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
}
if (m_private->ports[1] != nullptr) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state::closed;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state::running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
int32_t result = jack_activate(m_private->client);
if (result) {
ATA_ERROR("unable to activate JACK client!");
goto unlock;
}
const char **ports;
// Get the list of available ports.
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[0].c_str(), nullptr, JackPortIsInput);
if (ports == nullptr) {
ATA_ERROR("error determining available JACK input ports!");
goto unlock;
}
// Now make the port connections. Since RtAudio wasn't designed to
// allow the user to select particular channels of a device, we'll
// just open the first "nChannels" ports with offset.
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
result = 1;
if (ports[ m_channelOffset[0] + i ])
result = jack_connect(m_private->client, jack_port_name(m_private->ports[0][i]), ports[ m_channelOffset[0] + i ]);
if (result) {
free(ports);
ATA_ERROR("error connecting output ports!");
goto unlock;
}
}
free(ports);
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[1].c_str(), nullptr, JackPortIsOutput);
if (ports == nullptr) {
ATA_ERROR("error determining available JACK output ports!");
goto unlock;
}
// Now make the port connections. See note above.
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
result = 1;
if (ports[ m_channelOffset[1] + i ]) {
result = jack_connect(m_private->client, ports[ m_channelOffset[1] + i ], jack_port_name(m_private->ports[1][i]));
}
if (result) {
free(ports);
ATA_ERROR("error connecting input ports!");
goto unlock;
}
}
free(ports);
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
m_state = audio::orchestra::state::running;
unlock:
if (result == 0) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Jack::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
m_private->drainCounter = 2;
std::unique_lock<std::mutex> lck(m_mutex);
m_private->condition.wait(lck);
}
}
jack_deactivate(m_private->client);
m_state = audio::orchestra::state::stopped;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Jack::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_private->drainCounter = 2;
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the jack_deactivate()
// function will return.
static void jackStopStream(void* _userData) {
ethread::setName("Jack_stopStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->stopStream();
}
bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
if ( m_state == audio::orchestra::state::stopped
|| m_state == audio::orchestra::state::stopping) {
return true;
}
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
if (m_bufferSize != _nframes) {
ATA_ERROR("the JACK buffer size has changed ... cannot process!");
return false;
}
// Check if we were draining the stream and signal is finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state::stopping;
if (m_private->internalDrain == true) {
new std::thread(jackStopStream, this);
} else {
m_private->condition.notify_one();
}
return true;
}
// Invoke user callback first, to get fresh output data.
if (m_private->drainCounter == 0) {
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_mode != audio::orchestra::mode_input && m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status::underflow);
m_private->xrun[0] = false;
}
if (m_mode != audio::orchestra::mode_output && m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status::overflow);
m_private->xrun[1] = false;
}
int32_t cbReturnValue = m_callback(&m_userBuffer[1][0],
streamTime,
&m_userBuffer[0][0],
streamTime,
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state::stopping;
m_private->drainCounter = 2;
new std::thread(jackStopStream, this);
return true;
}
else if (cbReturnValue == 1) {
m_private->drainCounter = 1;
m_private->internalDrain = true;
}
}
jack_default_audio_sample_t *jackbuffer;
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter > 1) { // write zeros to the output stream
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memset(jackbuffer, 0, bufferBytes);
}
} else if (m_doConvertBuffer[0]) {
convertBuffer(m_deviceBuffer, &m_userBuffer[0][0], m_convertInfo[0]);
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_deviceBuffer[i*bufferBytes], bufferBytes);
}
} else { // no buffer conversion
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_userBuffer[0][i*bufferBytes], bufferBytes);
}
}
if (m_private->drainCounter) {
m_private->drainCounter++;
goto unlock;
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[1]) {
for (uint32_t i=0; i<m_nDeviceChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
}
convertBuffer(&m_userBuffer[1][0], m_deviceBuffer, m_convertInfo[1]);
} else {
// no buffer conversion
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
}
}
}
unlock:
audio::orchestra::Api::tickStreamTime();
return true;
}
#endif

View File

@@ -0,0 +1,57 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_JACK
#include <jack/jack.h>
namespace audio {
namespace orchestra {
namespace api {
class JackPrivate;
class Jack: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Jack();
virtual ~Jack();
const std::string& getCurrentApi() {
return audio::orchestra::typeJack;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(uint64_t _nframes);
private:
static int32_t jackXrun(void* _userData);
static void jackCloseStream(void* _userData);
static void jackShutdown(void* _userData);
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void* _userData);
private:
ememory::SharedPtr<JackPrivate> m_private;
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,415 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_PULSE)
#include <unistd.h>
#include <limits.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <pulse/error.h>
#include <pulse/simple.h>
#include <cstdio>
#include <ethread/tools.h>
#include <audio/orchestra/api/PulseDeviceList.h>
#include <audio/orchestra/api/Pulse.h>
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Pulse::create() {
return ememory::SharedPtr<audio::orchestra::api::Pulse>(new audio::orchestra::api::Pulse());
}
static const uint32_t SUPPORTED_SAMPLERATES[] = {
8000,
16000,
22050,
32000,
44100,
48000,
96000,
0
};
struct rtaudio_pa_format_mapping_t {
enum audio::format airtaudio_format;
pa_sample_format_t pa_format;
};
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
{audio::format_int16, PA_SAMPLE_S16LE},
{audio::format_int32, PA_SAMPLE_S32LE},
{audio::format_float, PA_SAMPLE_FLOAT32LE},
{audio::format_unknow, PA_SAMPLE_INVALID}};
namespace audio {
namespace orchestra {
namespace api {
class PulsePrivate {
public:
pa_simple* handle;
ememory::SharedPtr<std::thread> thread;
bool threadRunning;
std::condition_variable runnable_cv;
bool runnable;
PulsePrivate() :
handle(0),
threadRunning(false),
runnable(false) {
}
};
}
}
}
audio::orchestra::api::Pulse::Pulse() :
m_private(new audio::orchestra::api::PulsePrivate()) {
}
audio::orchestra::api::Pulse::~Pulse() {
if (m_state != audio::orchestra::state::closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Pulse::getDeviceCount() {
#if 1
std::vector<audio::orchestra::DeviceInfo> list = audio::orchestra::api::pulse::getDeviceList();
return list.size();
#else
return 1;
#endif
}
audio::orchestra::DeviceInfo audio::orchestra::api::Pulse::getDeviceInfo(uint32_t _device) {
std::vector<audio::orchestra::DeviceInfo> list = audio::orchestra::api::pulse::getDeviceList();
if (_device >= list.size()) {
ATA_ERROR("Request device out of IDs:" << _device << " >= " << list.size());
return audio::orchestra::DeviceInfo();
}
return list[_device];
}
static void pulseaudio_callback(void* _userData) {
audio::orchestra::api::Pulse* myClass = reinterpret_cast<audio::orchestra::api::Pulse*>(_userData);
myClass->callbackEvent();
}
void audio::orchestra::api::Pulse::callbackEvent() {
ethread::setName("Pulse IO-" + m_name);
while (m_private->threadRunning == true) {
callbackEventOneCycle();
}
}
enum audio::orchestra::error audio::orchestra::api::Pulse::closeStream() {
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state::stopped) {
m_private->runnable = true;
m_private->runnable_cv.notify_one();;
}
m_mutex.unlock();
m_private->thread->join();
if (m_mode == audio::orchestra::mode_output) {
pa_simple_flush(m_private->handle, nullptr);
}
pa_simple_free(m_private->handle);
m_private->handle = nullptr;
m_userBuffer[0].clear();
m_userBuffer[1].clear();
m_state = audio::orchestra::state::closed;
m_mode = audio::orchestra::mode_unknow;
return audio::orchestra::error_none;
}
void audio::orchestra::api::Pulse::callbackEventOneCycle() {
if (m_state == audio::orchestra::state::stopped) {
std::unique_lock<std::mutex> lck(m_mutex);
while (!m_private->runnable) {
m_private->runnable_cv.wait(lck);
}
if (m_state != audio::orchestra::state::running) {
m_mutex.unlock();
return;
}
}
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return;
}
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
int32_t doStopStream = m_callback(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
streamTime,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
streamTime,
m_bufferSize,
status);
if (doStopStream == 2) {
abortStream();
return;
}
m_mutex.lock();
void *pulse_in = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0];
void *pulse_out = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0];
if (m_state != audio::orchestra::state::running) {
goto unlock;
}
int32_t pa_error;
size_t bytes;
if (m_mode == audio::orchestra::mode_output) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]) {
convertBuffer(m_deviceBuffer,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_write(m_private->handle, pulse_out, bytes, &pa_error) < 0) {
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
return;
}
}
if (m_mode == audio::orchestra::mode_input) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_read(m_private->handle, pulse_in, bytes, &pa_error) < 0) {
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
return;
}
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
convertBuffer(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
m_deviceBuffer,
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
}
}
unlock:
m_mutex.unlock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
stopStream();
return;
}
return;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state::running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state::running;
m_private->runnable = true;
m_private->runnable_cv.notify_one();
m_mutex.unlock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::stopStream() {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state::stopped;
m_mutex.lock();
if ( m_private != nullptr
&& m_private->handle != nullptr
&& m_mode == audio::orchestra::mode_output) {
int32_t pa_error;
if (pa_simple_drain(m_private->handle, &pa_error) < 0) {
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state::stopped;
m_mutex.unlock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::abortStream() {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state::stopped;
m_mutex.lock();
if ( m_private != nullptr
&& m_private->handle != nullptr
&& m_mode == audio::orchestra::mode_output) {
int32_t pa_error;
if (pa_simple_flush(m_private->handle, &pa_error) < 0) {
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state::stopped;
m_mutex.unlock();
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Pulse::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
uint64_t bufferBytes = 0;
pa_sample_spec ss;
if (_device != 0) {
return false;
}
if (_mode != audio::orchestra::mode_input && _mode != audio::orchestra::mode_output) {
return false;
}
if (_channels != 1 && _channels != 2) {
ATA_ERROR("unsupported number of channels.");
return false;
}
ss.channels = _channels;
if (_firstChannel != 0) {
return false;
}
bool sr_found = false;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
if (_sampleRate == *sr) {
sr_found = true;
m_sampleRate = _sampleRate;
ss.rate = _sampleRate;
break;
}
}
if (!sr_found) {
ATA_ERROR("unsupported sample rate.");
return false;
}
bool sf_found = 0;
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
++sf) {
if (_format == sf->airtaudio_format) {
sf_found = true;
m_userFormat = sf->airtaudio_format;
ss.format = sf->pa_format;
break;
}
}
if (!sf_found) {
ATA_ERROR("unsupported sample format.");
return false;
}
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_nBuffers = 1;
m_doByteSwap[modeToIdTable(_mode)] = false;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
m_deviceFormat[modeToIdTable(_mode)] = m_userFormat;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
m_channelOffset[modeToIdTable(_mode)] = 0;
// Allocate necessary internal buffers.
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
m_bufferSize = *_bufferSize;
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_device[modeToIdTable(_mode)] = _device;
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, _firstChannel);
}
int32_t error;
switch (_mode) {
case audio::orchestra::mode_input:
m_private->handle = pa_simple_new(nullptr, "orchestra", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
if (m_private->handle == nullptr) {
ATA_ERROR("error connecting input to PulseAudio server.");
goto error;
}
break;
case audio::orchestra::mode_output:
m_private->handle = pa_simple_new(nullptr, "orchestra", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
if (m_private->handle == nullptr) {
ATA_ERROR("error connecting output to PulseAudio server.");
goto error;
}
break;
default:
goto error;
}
if (m_mode == audio::orchestra::mode_unknow) {
m_mode = _mode;
} else {
goto error;
}
if (m_private->threadRunning == false) {
m_private->threadRunning = true;
m_private->thread = ememory::makeShared<std::thread>(&pulseaudio_callback, this);
if (m_private->thread == nullptr) {
ATA_ERROR("error creating thread.");
goto error;
}
}
m_state = audio::orchestra::state::stopped;
return true;
error:
for (int32_t iii=0; iii<2; ++iii) {
m_userBuffer[iii].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
#endif

View File

@@ -0,0 +1,52 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_PULSE
namespace audio {
namespace orchestra {
namespace api {
class PulsePrivate;
class Pulse: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Pulse();
virtual ~Pulse();
const std::string& getCurrentApi() {
return audio::orchestra::typePulse;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEventOneCycle();
void callbackEvent();
private:
ememory::SharedPtr<PulsePrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,362 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_PULSE)
#include <stdio.h>
#include <string.h>
#include <pulse/pulseaudio.h>
#include <audio/orchestra/api/PulseDeviceList.h>
#include <audio/orchestra/debug.h>
#include <audio/Time.h>
#include <audio/Duration.h>
#include <audio/format.h>
#include <etk/stdTools.h>
// This callback gets called when our context changes state. We really only
// care about when it's ready or if it has failed
static void callbackStateMachine(pa_context* _contex, void *_userdata) {
pa_context_state_t state;
int *pulseAudioReady = static_cast<int*>(_userdata);
state = pa_context_get_state(_contex);
switch (state) {
// There are just here for reference
case PA_CONTEXT_UNCONNECTED:
ATA_VERBOSE("pulse state: PA_CONTEXT_UNCONNECTED");
break;
case PA_CONTEXT_CONNECTING:
ATA_VERBOSE("pulse state: PA_CONTEXT_CONNECTING");
break;
case PA_CONTEXT_AUTHORIZING:
ATA_VERBOSE("pulse state: PA_CONTEXT_AUTHORIZING");
break;
case PA_CONTEXT_SETTING_NAME:
ATA_VERBOSE("pulse state: PA_CONTEXT_SETTING_NAME");
break;
default:
ATA_VERBOSE("pulse state: default");
break;
case PA_CONTEXT_FAILED:
*pulseAudioReady = 2;
ATA_VERBOSE("pulse state: PA_CONTEXT_FAILED");
break;
case PA_CONTEXT_TERMINATED:
*pulseAudioReady = 2;
ATA_VERBOSE("pulse state: PA_CONTEXT_TERMINATED");
break;
case PA_CONTEXT_READY:
*pulseAudioReady = 1;
ATA_VERBOSE("pulse state: PA_CONTEXT_READY");
break;
}
}
static audio::format getFormatFromPulseFormat(enum pa_sample_format _format) {
switch (_format) {
case PA_SAMPLE_U8:
return audio::format_int8;
break;
case PA_SAMPLE_ALAW:
ATA_ERROR("Not supported: uint8_t a-law");
return audio::format_unknow;
case PA_SAMPLE_ULAW:
ATA_ERROR("Not supported: uint8_t mu-law");
return audio::format_unknow;
case PA_SAMPLE_S16LE:
return audio::format_int16;
break;
case PA_SAMPLE_S16BE:
return audio::format_int16;
break;
case PA_SAMPLE_FLOAT32LE:
return audio::format_float;
break;
case PA_SAMPLE_FLOAT32BE:
return audio::format_float;
break;
case PA_SAMPLE_S32LE:
return audio::format_int32;
break;
case PA_SAMPLE_S32BE:
return audio::format_int32;
break;
case PA_SAMPLE_S24LE:
return audio::format_int24;
break;
case PA_SAMPLE_S24BE:
return audio::format_int24;
break;
case PA_SAMPLE_S24_32LE:
return audio::format_int24_on_int32;
break;
case PA_SAMPLE_S24_32BE:
return audio::format_int24_on_int32;
break;
case PA_SAMPLE_INVALID:
case PA_SAMPLE_MAX:
ATA_ERROR("Not supported: invalid");
return audio::format_unknow;
}
ATA_ERROR("Not supported: UNKNOW flag...");
return audio::format_unknow;
}
static std::vector<audio::channel> getChannelOrderFromPulseChannel(const struct pa_channel_map& _map) {
std::vector<audio::channel> out;
for (int32_t iii=0; iii<_map.channels; ++iii) {
switch(_map.map[iii]) {
default:
case PA_CHANNEL_POSITION_MAX:
case PA_CHANNEL_POSITION_INVALID:
out.push_back(audio::channel_unknow);
break;
case PA_CHANNEL_POSITION_MONO:
case PA_CHANNEL_POSITION_FRONT_CENTER:
out.push_back(audio::channel_frontCenter);
break;
case PA_CHANNEL_POSITION_FRONT_LEFT:
out.push_back(audio::channel_frontLeft);
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT:
out.push_back(audio::channel_frontRight);
break;
case PA_CHANNEL_POSITION_REAR_CENTER:
out.push_back(audio::channel_rearCenter);
break;
case PA_CHANNEL_POSITION_REAR_LEFT:
out.push_back(audio::channel_rearLeft);
break;
case PA_CHANNEL_POSITION_REAR_RIGHT:
out.push_back(audio::channel_rearRight);
break;
case PA_CHANNEL_POSITION_LFE:
out.push_back(audio::channel_lfe);
break;
case PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER:
out.push_back(audio::channel_centerLeft);
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER:
out.push_back(audio::channel_centerRight);
break;
case PA_CHANNEL_POSITION_SIDE_LEFT:
out.push_back(audio::channel_topCenterLeft);
break;
case PA_CHANNEL_POSITION_SIDE_RIGHT:
out.push_back(audio::channel_topCenterRight);
break;
case PA_CHANNEL_POSITION_TOP_CENTER:
case PA_CHANNEL_POSITION_TOP_FRONT_CENTER:
out.push_back(audio::channel_topFrontCenter);
break;
case PA_CHANNEL_POSITION_TOP_FRONT_LEFT:
out.push_back(audio::channel_topFrontLeft);
break;
case PA_CHANNEL_POSITION_TOP_FRONT_RIGHT:
out.push_back(audio::channel_topFrontRight);
break;
case PA_CHANNEL_POSITION_TOP_REAR_LEFT:
out.push_back(audio::channel_topRearLeft);
break;
case PA_CHANNEL_POSITION_TOP_REAR_RIGHT:
out.push_back(audio::channel_topRearRight);
break;
case PA_CHANNEL_POSITION_TOP_REAR_CENTER:
out.push_back(audio::channel_topRearCenter);
break;
case PA_CHANNEL_POSITION_AUX0: out.push_back(audio::channel_aux0); break;
case PA_CHANNEL_POSITION_AUX1: out.push_back(audio::channel_aux1); break;
case PA_CHANNEL_POSITION_AUX2: out.push_back(audio::channel_aux2); break;
case PA_CHANNEL_POSITION_AUX3: out.push_back(audio::channel_aux3); break;
case PA_CHANNEL_POSITION_AUX4: out.push_back(audio::channel_aux4); break;
case PA_CHANNEL_POSITION_AUX5: out.push_back(audio::channel_aux5); break;
case PA_CHANNEL_POSITION_AUX6: out.push_back(audio::channel_aux6); break;
case PA_CHANNEL_POSITION_AUX7: out.push_back(audio::channel_aux7); break;
case PA_CHANNEL_POSITION_AUX8: out.push_back(audio::channel_aux8); break;
case PA_CHANNEL_POSITION_AUX9: out.push_back(audio::channel_aux9); break;
case PA_CHANNEL_POSITION_AUX10: out.push_back(audio::channel_aux10); break;
case PA_CHANNEL_POSITION_AUX11: out.push_back(audio::channel_aux11); break;
case PA_CHANNEL_POSITION_AUX12: out.push_back(audio::channel_aux12); break;
case PA_CHANNEL_POSITION_AUX13: out.push_back(audio::channel_aux13); break;
case PA_CHANNEL_POSITION_AUX14: out.push_back(audio::channel_aux14); break;
case PA_CHANNEL_POSITION_AUX15: out.push_back(audio::channel_aux15); break;
case PA_CHANNEL_POSITION_AUX16: out.push_back(audio::channel_aux16); break;
case PA_CHANNEL_POSITION_AUX17: out.push_back(audio::channel_aux17); break;
case PA_CHANNEL_POSITION_AUX18: out.push_back(audio::channel_aux18); break;
case PA_CHANNEL_POSITION_AUX19: out.push_back(audio::channel_aux19); break;
case PA_CHANNEL_POSITION_AUX20: out.push_back(audio::channel_aux20); break;
case PA_CHANNEL_POSITION_AUX21: out.push_back(audio::channel_aux21); break;
case PA_CHANNEL_POSITION_AUX22: out.push_back(audio::channel_aux22); break;
case PA_CHANNEL_POSITION_AUX23: out.push_back(audio::channel_aux23); break;
case PA_CHANNEL_POSITION_AUX24: out.push_back(audio::channel_aux24); break;
case PA_CHANNEL_POSITION_AUX25: out.push_back(audio::channel_aux25); break;
case PA_CHANNEL_POSITION_AUX26: out.push_back(audio::channel_aux26); break;
case PA_CHANNEL_POSITION_AUX27: out.push_back(audio::channel_aux27); break;
case PA_CHANNEL_POSITION_AUX28: out.push_back(audio::channel_aux28); break;
case PA_CHANNEL_POSITION_AUX29: out.push_back(audio::channel_aux29); break;
case PA_CHANNEL_POSITION_AUX30: out.push_back(audio::channel_aux30); break;
case PA_CHANNEL_POSITION_AUX31: out.push_back(audio::channel_aux31); break;
}
}
return out;
}
// Callback on getting data from pulseaudio:
static void callbackGetSinkList(pa_context* _contex, const pa_sink_info* _info, int _eol, void* _userdata) {
std::vector<audio::orchestra::DeviceInfo>* list = static_cast<std::vector<audio::orchestra::DeviceInfo>*>(_userdata);
// If eol is set to a positive number, you're at the end of the list
if (_eol > 0) {
return;
}
audio::orchestra::DeviceInfo info;
info.isCorrect = true;
info.input = false;
info.name = _info->name;
info.desc = _info->description;
info.sampleRates.push_back(_info->sample_spec.rate);
info.nativeFormats.push_back(getFormatFromPulseFormat(_info->sample_spec.format));
info.channels = getChannelOrderFromPulseChannel(_info->channel_map);
ATA_VERBOSE("plop=" << _info->index << " " << _info->name);
//ATA_DEBUG(" ports=" << _info->n_ports);
list->push_back(info);
}
// allback to get data from pulseaudio:
static void callbackGetSourceList(pa_context* _contex, const pa_source_info* _info, int _eol, void* _userdata) {
std::vector<audio::orchestra::DeviceInfo>* list = static_cast<std::vector<audio::orchestra::DeviceInfo>*>(_userdata);
if (_eol > 0) {
return;
}
audio::orchestra::DeviceInfo info;
info.isCorrect = true;
info.input = true;
info.name = _info->name;
info.desc = _info->description;
info.sampleRates.push_back(_info->sample_spec.rate);
info.nativeFormats.push_back(getFormatFromPulseFormat(_info->sample_spec.format));
info.channels = getChannelOrderFromPulseChannel(_info->channel_map);
ATA_VERBOSE("plop=" << _info->index << " " << _info->name);
list->push_back(info);
}
// to not update all the time ...
static std::vector<audio::orchestra::DeviceInfo> pulseAudioListOfDevice;
static audio::Time pulseAudioListOfDeviceTime;
std::vector<audio::orchestra::DeviceInfo> audio::orchestra::api::pulse::getDeviceList() {
audio::Duration delta = audio::Time::now() - pulseAudioListOfDeviceTime;
if (delta < audio::Duration(30,0)) {
return pulseAudioListOfDevice;
}
// Define our pulse audio loop and connection variables
pa_mainloop* pulseAudioMainLoop;
pa_mainloop_api* pulseAudioMainLoopAPI;
pa_operation* pulseAudioOperation;
pa_context* pulseAudioContex;
pa_context_flags_t pulseAudioFlags = PA_CONTEXT_NOAUTOSPAWN;
std::vector<audio::orchestra::DeviceInfo>& out = pulseAudioListOfDevice;
out.clear();
// We'll need these state variables to keep track of our requests
int state = 0;
int pulseAudioReady = 0;
// Create a mainloop API and connection to the default server
pulseAudioMainLoop = pa_mainloop_new();
pulseAudioMainLoopAPI = pa_mainloop_get_api(pulseAudioMainLoop);
pulseAudioContex = pa_context_new(pulseAudioMainLoopAPI, "orchestraPulseCount");
// If there's an error, the callback will set pulseAudioReady
pa_context_set_state_callback(pulseAudioContex, callbackStateMachine, &pulseAudioReady);
// This function connects to the pulse server
pa_context_connect(pulseAudioContex, NULL, pulseAudioFlags, NULL);
bool playLoop = true;
while (playLoop == true) {
// We can't do anything until PA is ready, so just iterate the mainloop
// and continue
if (pulseAudioReady == 0) {
pa_mainloop_iterate(pulseAudioMainLoop, 1, nullptr);
continue;
}
// We couldn't get a connection to the server, so exit out
if (pulseAudioReady == 2) {
pa_context_disconnect(pulseAudioContex);
pa_context_unref(pulseAudioContex);
pa_mainloop_free(pulseAudioMainLoop);
ATA_ERROR("Pulse interface error: Can not connect to the pulseaudio iterface...");
return out;
}
// At this point, we're connected to the server and ready to make
// requests
switch (state) {
// State 0: we haven't done anything yet
case 0:
ATA_DEBUG("Request sink list");
pulseAudioOperation = pa_context_get_sink_info_list(pulseAudioContex,
callbackGetSinkList,
&out);
state++;
break;
case 1:
// Now we wait for our operation to complete. When it's
// complete our pa_output_devicelist is filled out, and we move
// along to the next state
if (pa_operation_get_state(pulseAudioOperation) == PA_OPERATION_DONE) {
pa_operation_unref(pulseAudioOperation);
ATA_DEBUG("Request sources list");
pulseAudioOperation = pa_context_get_source_info_list(pulseAudioContex,
callbackGetSourceList,
&out);
state++;
}
break;
case 2:
if (pa_operation_get_state(pulseAudioOperation) == PA_OPERATION_DONE) {
ATA_DEBUG("All is done");
// Now we're done, clean up and disconnect and return
pa_operation_unref(pulseAudioOperation);
pa_context_disconnect(pulseAudioContex);
pa_context_unref(pulseAudioContex);
pa_mainloop_free(pulseAudioMainLoop);
playLoop = false;
break;
}
break;
default:
// We should never see this state
ATA_ERROR("Error in getting the devices list ...");
return out;
}
// Iterate the main loop ..
if (playLoop == true) {
pa_mainloop_iterate(pulseAudioMainLoop, 1, nullptr);
}
}
// TODO: need to do it better ...
// set default device:
int32_t idInput = -1;
int32_t idOutput = -1;
for (int32_t iii=0; iii<out.size(); ++iii) {
if (out[iii].input == true) {
if (idInput != -1) {
continue;
}
if (etk::end_with(out[iii].name, ".monitor", false) == false) {
idInput = iii;
out[iii].isDefault = true;
}
} else {
if (idOutput != -1) {
continue;
}
if (etk::end_with(out[iii].name, ".monitor", false) == false) {
idOutput = iii;
out[iii].isDefault = true;
}
}
}
return out;
}
#endif

View File

@@ -0,0 +1,23 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_PULSE
#include <etk/types.h>
#include <audio/orchestra/DeviceInfo.h>
namespace audio {
namespace orchestra {
namespace api {
namespace pulse {
std::vector<audio::orchestra::DeviceInfo> getDeviceList();
}
}
}
}
#endif

6
audio/orchestra/base.cpp Normal file
View File

@@ -0,0 +1,6 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/

25
audio/orchestra/base.h Normal file
View File

@@ -0,0 +1,25 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <thread>
#include <condition_variable>
#include <mutex>
#include <chrono>
#include <functional>
#include <ememory/memory.h>
#include <audio/channel.h>
#include <audio/format.h>
#include <audio/orchestra/error.h>
#include <audio/orchestra/status.h>
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/CallbackInfo.h>
#include <audio/orchestra/DeviceInfo.h>
#include <audio/orchestra/StreamOptions.h>
#include <audio/orchestra/StreamParameters.h>

13
audio/orchestra/debug.cpp Normal file
View File

@@ -0,0 +1,13 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/debug.h>
int32_t audio::orchestra::getLogId() {
static int32_t g_val = elog::registerInstance("audio-orchestra");
return g_val;
}

View File

@@ -1,30 +1,21 @@
/**
/** @file
* @author Edouard DUPIN
*
* @copyright 2011, Edouard DUPIN, all right reserved
*
* @license BSD 3 clauses (see license file)
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __EAUDIOFX_DEBUG_H__
#define __EAUDIOFX_DEBUG_H__
#include <elog/log.h>
#include <etk/log.h>
namespace airtaudio {
int32_t getLogId();
};
// TODO : Review this problem of multiple intanciation of "std::stringbuf sb"
#define ATA_BASE(info,data) \
do { \
if (info <= etk::log::getLevel(airtaudio::getLogId())) { \
std::stringbuf sb; \
std::ostream tmpStream(&sb); \
tmpStream << data; \
etk::log::logStream(airtaudio::getLogId(), info, __LINE__, __class__, __func__, tmpStream); \
} \
} while(0)
namespace audio {
namespace orchestra {
int32_t getLogId();
}
}
#define ATA_BASE(info,data) ELOG_BASE(audio::orchestra::getLogId(),info,data)
#define ATA_PRINT(data) ATA_BASE(-1, data)
#define ATA_CRITICAL(data) ATA_BASE(1, data)
#define ATA_ERROR(data) ATA_BASE(2, data)
#define ATA_WARNING(data) ATA_BASE(3, data)
@@ -48,5 +39,3 @@ namespace airtaudio {
} \
} while (0)
#endif

View File

@@ -0,0 +1,9 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/error.h>
#include <audio/orchestra/debug.h>

22
audio/orchestra/error.h Normal file
View File

@@ -0,0 +1,22 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum error {
error_none, //!< No error
error_fail, //!< An error occure in the operation
error_warning, //!< A non-critical error.
error_inputNull, //!< null input or internal errror
error_invalidUse, //!< The function was called incorrectly.
error_systemError //!< A system error occured.
};
}
}

39
audio/orchestra/mode.cpp Normal file
View File

@@ -0,0 +1,39 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/mode.h>
#include <audio/orchestra/debug.h>
int32_t audio::orchestra::modeToIdTable(enum mode _mode) {
switch (_mode) {
case mode_unknow:
case mode_duplex:
case mode_output:
return 0;
case mode_input:
return 1;
}
return 0;
}
std::ostream& audio::operator <<(std::ostream& _os, enum audio::orchestra::mode _obj) {
switch (_obj) {
case audio::orchestra::mode_unknow:
_os << "unknow";
break;
case audio::orchestra::mode_duplex:
_os << "duplex";
break;
case audio::orchestra::mode_output:
_os << "output";
break;
case audio::orchestra::mode_input:
_os << "input";
break;
}
return _os;
}

23
audio/orchestra/mode.h Normal file
View File

@@ -0,0 +1,23 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum mode {
mode_unknow,
mode_output,
mode_input,
mode_duplex
};
int32_t modeToIdTable(enum mode _mode);
}
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::mode _obj);
}

View File

@@ -0,0 +1,6 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/

21
audio/orchestra/state.h Normal file
View File

@@ -0,0 +1,21 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum class state {
closed,
stopped,
stopping,
running
};
}
}

View File

@@ -0,0 +1,32 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/status.h>
#include <audio/orchestra/debug.h>
static const char* listValue[] = {
"ok",
"overflow",
"underflow"
};
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::status _obj) {
_os << listValue[int32_t(_obj)];
return _os;
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj) {
_os << std::string("{");
for (size_t iii=0; iii<_obj.size(); ++iii) {
if (iii!=0) {
_os << std::string(";");
}
_os << _obj[iii];
}
_os << std::string("}");
return _os;
}

22
audio/orchestra/status.h Normal file
View File

@@ -0,0 +1,22 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum class status {
ok, //!< nothing...
overflow, //!< Internal buffer has more data than they can accept
underflow //!< The internal buffer is empty
};
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::status _obj);
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj);
}
}

25
audio/orchestra/type.cpp Normal file
View File

@@ -0,0 +1,25 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/type.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
const std::string audio::orchestra::typeUndefined = "undefined";
const std::string audio::orchestra::typeAlsa = "alsa";
const std::string audio::orchestra::typePulse = "pulse";
const std::string audio::orchestra::typeOss = "oss";
const std::string audio::orchestra::typeJack = "jack";
const std::string audio::orchestra::typeCoreOSX = "coreOSX";
const std::string audio::orchestra::typeCoreIOS = "coreIOS";
const std::string audio::orchestra::typeAsio = "asio";
const std::string audio::orchestra::typeDs = "ds";
const std::string audio::orchestra::typeJava = "java";
const std::string audio::orchestra::typeDummy = "dummy";

30
audio/orchestra/type.h Normal file
View File

@@ -0,0 +1,30 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.h>
#include <etk/stdTools.h>
namespace audio {
namespace orchestra {
/**
* @brief Audio API specifier arguments.
*/
extern const std::string typeUndefined; //!< Error API.
extern const std::string typeAlsa; //!< LINUX The Advanced Linux Sound Architecture.
extern const std::string typePulse; //!< LINUX The Linux PulseAudio.
extern const std::string typeOss; //!< LINUX The Linux Open Sound System.
extern const std::string typeJack; //!< UNIX The Jack Low-Latency Audio Server.
extern const std::string typeCoreOSX; //!< Macintosh OSX Core Audio.
extern const std::string typeCoreIOS; //!< Macintosh iOS Core Audio.
extern const std::string typeAsio; //!< WINDOWS The Steinberg Audio Stream I/O.
extern const std::string typeDs; //!< WINDOWS The Microsoft Direct Sound.
extern const std::string typeJava; //!< ANDROID Interface.
extern const std::string typeDummy; //!< Empty wrapper (non-functional).
}
}

1
authors.txt Normal file
View File

@@ -0,0 +1 @@
MR Edouard DUPIN <yui.heero@gmail.com>

91
catkin/CMakeLists.txt Normal file
View File

@@ -0,0 +1,91 @@
cmake_minimum_required(VERSION 2.8.3)
project(audio_orchestra)
set(CMAKE_VERBOSE_MAKEFILE ON)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
etk
audio
)
find_package(ALSA REQUIRED)
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
INCLUDE_DIRS ../
LIBRARIES ${PROJECT_NAME}
CATKIN_DEPENDS etk audio
DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
..
${catkin_INCLUDE_DIRS}
)
## Declare a cpp library
add_library(${PROJECT_NAME}
../audio/orchestra/debug.cpp
../audio/orchestra/status.cpp
../audio/orchestra/type.cpp
../audio/orchestra/mode.cpp
../audio/orchestra/state.cpp
../audio/orchestra/error.cpp
../audio/orchestra/base.cpp
../audio/orchestra/Interface.cpp
../audio/orchestra/Flags.cpp
../audio/orchestra/Api.cpp
../audio/orchestra/DeviceInfo.cpp
../audio/orchestra/StreamOptions.cpp
../audio/orchestra/api/Dummy.cpp
../audio/orchestra/api/Alsa.cpp
../audio/orchestra/api/Jack.cpp
../audio/orchestra/api/Pulse.cpp
../audio/orchestra/api/Oss.cpp
)
add_definitions(-D__LINUX_ALSA__)
add_definitions(-D__DUMMY__)
## Add cmake target dependencies of the executable/library
## as an example, message headers may need to be generated before nodes
#add_dependencies(${PROJECT_NAME} test_perfo_core_generate_messages_cpp)
## Specify libraries to link a library or executable target against
target_link_libraries(${PROJECT_NAME}
${ALSA_LIBRARIES}
${catkin_LIBRARIES}
)
#############
## Install ##
#############
## Mark executables and/or libraries for installation
install(TARGETS ${PROJECT_NAME}
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
)
## Mark cpp header files for installation
install(DIRECTORY ../audio/orchestra/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h"
)

13
catkin/package.xml Normal file
View File

@@ -0,0 +1,13 @@
<?xml version="1.0"?>
<package>
<name>audio_orchestra</name>
<version>0.3.0</version>
<description>Ewol RTAudio fork</description>
<maintainer email="yui.heero@gmail.com">Edouard DUPIN</maintainer>
<license>Apache-2.0</license>
<build_depend>etk</build_depend>
<build_depend>audio</build_depend>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>etk</run_depend>
<run_depend>audio</run_depend>
</package>

View File

@@ -1,90 +0,0 @@
#!/usr/bin/python
import lutinModule as module
import lutinTools as tools
import lutinDebug as debug
def get_desc():
return "airtaudio : Generic wrapper on all audio interface"
def create(target):
myModule = module.Module(__file__, 'airtaudio', 'LIBRARY')
myModule.add_src_file([
'airtaudio/debug.cpp',
'airtaudio/base.cpp',
'airtaudio/Interface.cpp',
'airtaudio/Api.cpp',
'airtaudio/api/Dummy.cpp',
])
myModule.add_export_flag_CC(['-D__AIRTAUDIO_API_DUMMY_H__'])
if target.name=="Windows":
myModule.add_src_file([
'airtaudio/api/Asio.cpp',
'airtaudio/api/Ds.cpp',
])
# ASIO API on Windows
myModule.add_export_flag_CC(['__WINDOWS_ASIO__'])
# Windows DirectSound API
#myModule.add_export_flag_CC(['__WINDOWS_DS__'])
myModule.add_module_depend(['etk'])
elif target.name=="Linux":
myModule.add_src_file([
'airtaudio/api/Alsa.cpp',
'airtaudio/api/Jack.cpp',
'airtaudio/api/Pulse.cpp',
'airtaudio/api/Oss.cpp'
])
# Linux Alsa API
#myModule.add_export_flag_CC(['-D__LINUX_ALSA__'])
#myModule.add_export_flag_LD("-lasound")
# Linux Jack API
#myModule.add_export_flag_CC(['-D__UNIX_JACK__'])
#myModule.add_export_flag_LD("-ljack")
# Linux PulseAudio API
myModule.add_export_flag_CC(['-D__LINUX_PULSE__'])
myModule.add_export_flag_LD("-lpulse-simple")
myModule.add_export_flag_LD("-lpulse")
#myModule.add_export_flag_CC(['-D__LINUX_OSS__'])
# ...
myModule.add_module_depend(['etk'])
elif target.name=="MacOs":
myModule.add_src_file([
'airtaudio/api/Core.cpp',
'airtaudio/api/Oss.cpp'
])
# MacOsX core
myModule.add_export_flag_CC(['-D__MACOSX_CORE__'])
myModule.add_export_flag_LD("-framework CoreAudio")
myModule.add_module_depend(['etk'])
elif target.name=="IOs":
myModule.add_src_file('airtaudio/api/CoreIos.mm')
# IOsX core
myModule.add_export_flag_CC(['-D__IOS_CORE__'])
myModule.add_export_flag_LD("-framework CoreAudio")
myModule.add_export_flag_LD("-framework AudioToolbox")
myModule.add_module_depend(['etk'])
elif target.name=="Android":
myModule.add_src_file('airtaudio/api/Android.cpp')
# MacOsX core
myModule.add_export_flag_CC(['-D__ANDROID_JAVA__'])
myModule.add_module_depend(['ewol'])
else:
debug.warning("unknow target for AIRTAudio : " + target.name);
myModule.add_export_path(tools.get_current_path(__file__))
# add the currrent module at the
return myModule

181
lutin_audio-orchestra.py Normal file
View File

@@ -0,0 +1,181 @@
#!/usr/bin/python
import lutin.module as module
import lutin.tools as tools
import lutin.debug as debug
def get_type():
return "LIBRARY"
def get_desc():
return "Generic wrapper on all audio interface"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return "authors.txt"
def get_version():
return "version.txt"
def create(target, module_name):
my_module = module.Module(__file__, module_name, get_type())
my_module.add_src_file([
'audio/orchestra/debug.cpp',
'audio/orchestra/status.cpp',
'audio/orchestra/type.cpp',
'audio/orchestra/mode.cpp',
'audio/orchestra/state.cpp',
'audio/orchestra/error.cpp',
'audio/orchestra/base.cpp',
'audio/orchestra/Interface.cpp',
'audio/orchestra/Flags.cpp',
'audio/orchestra/Api.cpp',
'audio/orchestra/DeviceInfo.cpp',
'audio/orchestra/StreamOptions.cpp',
'audio/orchestra/api/Dummy.cpp'
])
my_module.add_header_file([
'audio/orchestra/debug.h',
'audio/orchestra/status.h',
'audio/orchestra/type.h',
'audio/orchestra/mode.h',
'audio/orchestra/state.h',
'audio/orchestra/error.h',
'audio/orchestra/base.h',
'audio/orchestra/Interface.h',
'audio/orchestra/Flags.h',
'audio/orchestra/Api.h',
'audio/orchestra/DeviceInfo.h',
'audio/orchestra/StreamOptions.h',
'audio/orchestra/CallbackInfo.h',
'audio/orchestra/StreamParameters.h'
])
my_module.add_depend(['audio', 'etk'])
# add all the time the dummy interface
my_module.add_flag('c++', ['-DORCHESTRA_BUILD_DUMMY'], export=True)
# TODO : Add a FILE interface:
if "Windows" in target.get_type():
my_module.add_src_file([
'audio/orchestra/api/Asio.cpp',
'audio/orchestra/api/Ds.cpp',
])
# load optionnal API:
my_module.add_optionnal_depend('asio', ["c++", "-DORCHESTRA_BUILD_ASIO"])
my_module.add_optionnal_depend('ds', ["c++", "-DORCHESTRA_BUILD_DS"])
my_module.add_optionnal_depend('wasapi', ["c++", "-DORCHESTRA_BUILD_WASAPI"])
elif "Linux" in target.get_type():
my_module.add_src_file([
'audio/orchestra/api/Alsa.cpp',
'audio/orchestra/api/Jack.cpp',
'audio/orchestra/api/Pulse.cpp',
'audio/orchestra/api/PulseDeviceList.cpp'
])
my_module.add_optionnal_depend('alsa', ["c++", "-DORCHESTRA_BUILD_ALSA"])
my_module.add_optionnal_depend('jack', ["c++", "-DORCHESTRA_BUILD_JACK"])
my_module.add_optionnal_depend('pulse', ["c++", "-DORCHESTRA_BUILD_PULSE"])
elif "MacOs" in target.get_type():
my_module.add_src_file([
'audio/orchestra/api/Core.cpp'
])
# MacOsX core
my_module.add_optionnal_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_MACOSX_CORE"])
elif "IOs" in target.get_type():
my_module.add_src_file('audio/orchestra/api/CoreIos.mm')
# IOsX core
my_module.add_optionnal_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_IOS_CORE"])
elif "Android" in target.get_type():
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraConstants.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraManagerCallback.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraNative.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraInterfaceInput.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraInterfaceOutput.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraManager.java')
# create inter language interface
my_module.add_src_file('org.musicdsp.orchestra.OrchestraConstants.javah')
my_module.add_path(tools.get_current_path(__file__) + '/android/', type='java')
my_module.add_depend(['SDK', 'jvm-basics', 'ejson'])
my_module.add_export_flag('c++', ['-DORCHESTRA_BUILD_JAVA'])
my_module.add_src_file('audio/orchestra/api/Android.cpp')
my_module.add_src_file('audio/orchestra/api/AndroidNativeInterface.cpp')
# add tre creator of the basic java class ...
target.add_action("BINARY", 11, "audio-orchestra-out-wrapper", tool_generate_add_java_section_in_class)
else:
debug.warning("unknow target for audio_orchestra : " + target.name);
my_module.add_path(tools.get_current_path(__file__))
return my_module
##################################################################
##
## Android specific section
##
##################################################################
def tool_generate_add_java_section_in_class(target, module, package_name):
module.pkg_add("GENERATE_SECTION__IMPORT", [
"import org.musicdsp.orchestra.OrchestraManager;"
])
module.pkg_add("GENERATE_SECTION__DECLARE", [
"private OrchestraManager m_audioManagerHandle;"
])
module.pkg_add("GENERATE_SECTION__CONSTRUCTOR", [
"// load audio maneger if it does not work, it is not critical ...",
"try {",
" m_audioManagerHandle = new OrchestraManager();",
"} catch (RuntimeException e) {",
" Log.e(\"" + package_name + "\", \"Can not load Audio interface (maybe not really needed) :\" + e);",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_CREATE", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onCreate();",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_START", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onStart();",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_RESTART", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onRestart();",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_RESUME", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onResume();",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_PAUSE", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onPause();",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_STOP", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onStop();",
"}"
])
module.pkg_add("GENERATE_SECTION__ON_DESTROY", [
"// Destroy the AdView.",
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onDestroy();",
"}"
])

View File

@@ -0,0 +1,37 @@
#!/usr/bin/python
import lutin.module as module
import lutin.tools as tools
import lutin.debug as debug
def get_type():
return "BINARY"
def get_sub_type():
return "TOOLS"
def get_desc():
return "'in' tool for orchestra"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return ["Mr DUPIN Edouard <yui.heero@gmail.com>"]
def create(target, module_name):
my_module = module.Module(__file__, module_name, get_type())
my_module.add_src_file([
'orchestra-in.cpp'
])
my_module.add_depend(['audio-orchestra', 'test-debug'])
return my_module

View File

@@ -0,0 +1,37 @@
#!/usr/bin/python
import lutin.module as module
import lutin.tools as tools
import lutin.debug as debug
def get_type():
return "BINARY"
def get_sub_type():
return "TOOLS"
def get_desc():
return "'list' i/o tool for orchestra"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return ["Mr DUPIN Edouard <yui.heero@gmail.com>"]
def create(target, module_name):
my_module = module.Module(__file__, module_name, get_type())
my_module.add_src_file([
'orchestra-list.cpp'
])
my_module.add_depend(['audio-orchestra', 'test-debug'])
return my_module

View File

@@ -0,0 +1,37 @@
#!/usr/bin/python
import lutin.module as module
import lutin.tools as tools
import lutin.debug as debug
def get_type():
return "BINARY"
def get_sub_type():
return "TOOLS"
def get_desc():
return "'out' tool for orchestra"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return ["Mr DUPIN Edouard <yui.heero@gmail.com>"]
def create(target, module_name):
my_module = module.Module(__file__, module_name, get_type())
my_module.add_src_file([
'orchestra-out.cpp'
])
my_module.add_depend(['audio-orchestra', 'test-debug'])
return my_module

28
tools/orchestra-in.cpp Normal file
View File

@@ -0,0 +1,28 @@
/** @file
* @author Edouard DUPIN
* @copyright 2015, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <etk/etk.h>
#include <test-debug/debug.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
int main(int _argc, const char **_argv) {
// the only one init for etk:
etk::init(_argc, _argv);
for (int32_t iii=0; iii<_argc ; ++iii) {
std::string data = _argv[iii];
if ( data == "-h"
|| data == "--help") {
std::cout << "Help : " << std::endl;
std::cout << " ./xxx ---" << std::endl;
exit(0);
}
}
audio::orchestra::Interface interface;
TEST_PRINT("TODO : Need to write it");
return 0;
}

39
tools/orchestra-list.cpp Normal file
View File

@@ -0,0 +1,39 @@
/** @file
* @author Edouard DUPIN
* @copyright 2015, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <etk/etk.h>
#include <test-debug/debug.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
int main(int _argc, const char **_argv) {
// the only one init for etk:
etk::init(_argc, _argv);
for (int32_t iii=0; iii<_argc ; ++iii) {
std::string data = _argv[iii];
if ( data == "-h"
|| data == "--help") {
std::cout << "Help : " << std::endl;
std::cout << " ./xxx ---" << std::endl;
exit(0);
}
}
audio::orchestra::Interface interface;
std::vector<std::string> apis = interface.getListApi();
TEST_PRINT("Find : " << apis.size() << " apis.");
for (auto &it : apis) {
interface.instanciate(it);
TEST_PRINT("Device list for : '" << it << "'");
for (int32_t iii=0; iii<interface.getDeviceCount(); ++iii) {
audio::orchestra::DeviceInfo info = interface.getDeviceInfo(iii);
TEST_PRINT(" " << iii << " name :" << info.name);
info.display(2);
}
interface.clear();
}
return 0;
}

27
tools/orchestra-out.cpp Normal file
View File

@@ -0,0 +1,27 @@
/** @file
* @author Edouard DUPIN
* @copyright 2015, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <etk/etk.h>
#include <test-debug/debug.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
int main(int _argc, const char **_argv) {
// the only one init for etk:
etk::init(_argc, _argv);
for (int32_t iii=0; iii<_argc ; ++iii) {
std::string data = _argv[iii];
if ( data == "-h"
|| data == "--help") {
std::cout << "Help : " << std::endl;
std::cout << " ./xxx ---" << std::endl;
exit(0);
}
}
audio::orchestra::Interface interface;
TEST_PRINT("TODO : Need to write it");
return 0;
}

1
version.txt Normal file
View File

@@ -0,0 +1 @@
0.3.1