Compare commits

...

61 Commits

Author SHA1 Message Date
5a9c89e4fe [RELEASE] Release v1.0.0 2021-02-16 21:47:42 +01:00
69f60f45fe [DEBUG] update new API of lutin log 2019-05-03 10:18:23 +02:00
28237ad66f [DEV] update etk null 2018-06-19 22:13:48 +02:00
0b8c0e3fd4 [DEV] update to the new ETK allocator wrapper 2017-10-21 19:05:21 +02:00
309b63254f [DEV] update new ETK 2017-09-26 15:57:44 +02:00
4bc3ff2f1a [DEV] continue removing STL 2017-09-14 00:59:21 +02:00
dfc867db87 [DEV] remove STL 2017-09-07 23:38:26 +02:00
a4ba319ffc [DEV] continue removing stl 2017-08-28 00:08:41 +02:00
bcff6508ab [DEBUG] corect puseaudio include 2017-04-12 23:35:49 +02:00
2a9f28b962 [DEV] update dev tag version 2016-10-24 21:55:23 +02:00
c3a13b4a39 [RELEASE] new version 0.4.0 2016-10-24 21:55:23 +02:00
7ab2025721 [CI] update integration 2016-10-24 21:55:23 +02:00
34eb6d5cf0 [DEV] remove dependency of unistd.h 2016-10-13 21:29:18 +02:00
429a1cd062 [DEBUG] correct builder interface 2016-10-06 22:19:47 +02:00
6e534cbf22 [DEV] Update new lutin 2.2.0 (no legacy support) 2016-10-04 23:41:29 +02:00
c7d5c42ea8 [DEBUG] correct .hpp port + clean libC include 2016-10-02 23:42:15 +02:00
918d573f18 [DEV/API] change .h in .hpp 2016-10-02 22:06:09 +02:00
fe23d699d1 [DEBUG] correct windows port 2016-09-27 23:18:27 +02:00
e0a01e4280 [DEBUG] try to correct IOS interface 2016-09-23 23:17:39 +02:00
fd1651924a [DOC] wrong website 2016-09-16 21:05:30 +02:00
ecb2b4d99e [DOC] basic documantation 2016-09-14 22:00:12 +02:00
276e4ce356 [DEV] remove catkin build methode 2016-09-14 21:43:24 +02:00
943323947d [DEV] update dev tag version 2016-09-12 21:07:12 +02:00
62c2f2fd25 [RELEASE] create release 0.3.1 2016-09-12 21:06:37 +02:00
9a96211853 [DEV] update to future lutin 2.0 2016-09-08 21:35:02 +02:00
7e104a1f72 [DEV] update to future lutin 2.0 and update to new ejson 2016-09-07 22:05:42 +02:00
94e2bbabe3 [DEV] update dev tag version 2016-08-30 22:54:57 +02:00
b1eebd75e9 [RELEASE] create release 0.3.0 2016-08-30 22:54:08 +02:00
8174d45416 [DEV] add support of multiple input stream type 2016-08-22 21:52:31 +02:00
db89c092be [DEV] update sharedPtr 2016-07-19 21:43:58 +02:00
151a3ddcf4 [DEV] rm __class__ 2016-05-02 22:01:55 +02:00
bc78a1858a [DEV] update the change on 'enum' to 'enum class' 2016-04-29 23:16:07 +02:00
f4f1ee888b [DEBUG] correct build on windows 2016-03-17 22:13:08 +01:00
e7511a0b92 [DEV] update of external of elog and ethread 2016-03-08 21:29:34 +01:00
b5bdb4e2db [DEV] replace 'include guard' with 'pragma once' 2016-02-02 21:18:54 +01:00
e90271754f [DEBUG] corect oss build (deprecated) 2015-10-20 21:25:11 +02:00
c60d6a8240 [DEV] update new lutin 0.8.0 2015-10-14 21:21:03 +02:00
4d2758cc63 [DEV] remove alsa log 2015-09-29 21:12:20 +02:00
2b665e9383 [DEV] update next lutin version and debug android play audio 2015-09-24 21:44:04 +02:00
71fc8af983 [DEV] update Build interface 2015-09-14 21:11:04 +02:00
145d930567 [CI] update travis with new interface (no sudo) 2015-08-24 23:55:27 +02:00
94c16ad846 [DEV] simplify APIs and remove OSS (not so used) 2015-07-10 23:42:42 +02:00
a8c1a92c7a [DEV] continue rework of list of device search 2015-07-07 22:39:09 +02:00
22dd01978a [DEBUG] correct the Mac audio interface 2015-07-07 21:37:03 +02:00
09e32a815a [DEV] update java interfaec of Input and output 2015-07-01 22:06:29 +02:00
3a0ab73a3a [DEV] rename android interface for java 2015-06-30 23:25:34 +02:00
36b0231a11 [DEV correct audio output 2015-06-26 22:07:50 +02:00
7aad6c26c4 [DEV] correct some interface of android 2015-06-23 21:09:57 +02:00
fbd6eceee6 [DEV] continue integration of audio interface 2015-06-22 23:11:04 +02:00
7d0a38e087 [DEV] continue dev of android audio interface 2015-06-22 21:39:29 +02:00
07684a0e54 [DEV] real integration for java 2015-06-21 21:58:15 +02:00
54ce284b1b [DEV] pulseaudio missing compilation flag 2015-06-16 21:34:51 +02:00
7b0316a8aa [DEV] rework continue (better integration of pulseaudio and low level devices 2015-06-16 21:08:23 +02:00
4b5bbd9626 [DEV] try to add a list of device for pulse 2015-06-16 21:08:23 +02:00
57c9cc1132 [CI] reme run of the library" 2015-06-15 22:23:55 +02:00
cc12384aea [DOC] create readme 2015-06-15 22:22:35 +02:00
06290dd92d [DEV] update new worktree 2015-06-15 19:27:55 +02:00
fa618958b8 [DEV] add basic tools and change some API 2015-06-11 21:39:56 +02:00
bb16adc099 [DEV] alsa poll mode availlable ... 2015-06-11 21:33:32 +02:00
dbd3c18ac3 [DEV] rework for stream read/write in mmap 2015-06-07 22:32:54 +02:00
9dec54d4c7 [DEV] start rework Alsa API to support poll event and MMAP system 2015-06-05 22:00:17 +02:00
78 changed files with 4783 additions and 3616 deletions

94
.travis.yml Normal file
View File

@ -0,0 +1,94 @@
language: cpp
sudo: required
dist: trusty
branches:
only:
- master
- dev
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-4.9
- expect
- binutils-mingw-w64-x86-64 # 64bit MinGW
- gcc-mingw-w64-x86-64
- g++-mingw-w64-x86-64
matrix:
include:
- os: linux
env: CONF=release BUILDER=gcc TARGET=Linux TAG=Linux COMPILATOR_OPTION="--compilator-version=4.9" GCOV=--gcov
compiler: gcc
- os: linux
env: CONF=debug BUILDER=clang TARGET=Linux
compiler: clang
- os: linux
env: CONF=release BUILDER=gcc TARGET=Windows TAG=Mingw
compiler: x86_64-w64-mingw32-gcc
- os: linux
env: CONF=release BUILDER=gcc TARGET=Android TAG=Android DISABLE_PACKAGE=-p
compiler: gcc
- os: osx
env: CONF=release BUILDER=clang TARGET=MacOs TAG=MacOs
compiler: clang
- os: osx
env: CONF=release BUILDER=clang TARGET=IOs TAG=IOs
compiler: clang
install:
- cd ..
- pip install --user lutin
- if [ "$TAG" == "Android" ]; then
git clone --depth 1 --branch master https://github.com/HeeroYui/android-download-tool;
./android-download-tool/dl-android.sh;
fi
- git clone --depth 1 --branch master https://github.com/atria-soft/ci.git
- cd -
before_script:
- cd ..
- git clone https://github.com/atria-soft/etk.git -b $TRAVIS_BRANCH
- git clone https://github.com/atria-soft/elog.git -b $TRAVIS_BRANCH
- git clone https://github.com/atria-soft/ememory.git -b $TRAVIS_BRANCH
- git clone https://github.com/atria-soft/echrono.git -b $TRAVIS_BRANCH
- git clone https://github.com/atria-soft/ethread.git -b $TRAVIS_BRANCH
- git clone https://github.com/atria-soft/ejson.git -b $TRAVIS_BRANCH
- git clone https://github.com/atria-soft/jvm-basics.git -b $TRAVIS_BRANCH
- git clone https://github.com/musicdsp/audio.git -b $TRAVIS_BRANCH
- git clone https://github.com/generic-library/gtest-lutin.git --recursive
- git clone https://github.com/generic-library/z-lutin.git --recursive
- pwd
- ls -l
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then
export PATH=$PATH:/Users/travis/Library/Python/2.7/bin/;
fi
- ./ci/build_send.py --tag=$TAG --status=START;
script:
- lutin -w -j4 -C -P -t$TARGET -c $BUILDER $COMPILATOR_OPTION $BUS -m $CONF $GCOV $DISABLE_PACKAGE audio-orchestra; STATUS=$?
- ./ci/build_send.py --tag=$TAG --status="$STATUS";
after_script:
- if [ "$GCOV" != "" ]; then
./ci/warning_send.py --find-path ./out/Linux_x86_64/$CONF/build/$BUILDER/audio-orchestra/;
fi
#- lutin -w -j4 -C -P -t$TARGET -c $BUILDER $COMPILATOR_OPTION $BUS -m $CONF $GCOV $DISABLE_PACKAGE audio-orchestra-test?run:--elog-level=3 | tee out_test.txt
#- if [ "$GCOV" != "" ]; then
# ./ci/test_send.py --file=out_test.txt;
# lutin -C -P -t $TARGET -c $BUILDER $COMPILATOR_OPTION $BUS -m $CONF -p audio-orchestra?gcov;
# ./ci/coverage_send.py --json=out/Linux_x86_64/$CONF/build/$BUILDER/audio-orchestra/audio-orchestra_coverage.json;
# fi
notifications:
email:
- yui.heero@gmail.com

4
README.md Normal file
View File

@ -0,0 +1,4 @@
# audio-orchestra
(MIT) audio: backend to acces hardware access (Fork of the original RTAudio lib)
[![Build Status](https://travis-ci.org/musicdsp/audio-orchestra.svg?branch=master)](https://travis-ci.org/musicdsp/audio-orchestra)

View File

@ -0,0 +1,13 @@
/**
* @author Edouard DUPIN
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
public interface OrchestraConstants {
public static final int BUFFER_SIZE = 512;
}

View File

@ -0,0 +1,119 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import android.media.AudioRecord;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.util.Log;
public class OrchestraInterfaceInput implements Runnable, OrchestraConstants {
private Thread m_thread = null;
private int m_uid = -1;
private OrchestraNative m_orchestraNativeHandle;
private boolean m_stop = false;
private boolean m_suspend = false;
private AudioRecord m_audio = null;
private int m_sampleRate = 48000;
private int m_nbChannel = 2;
private int m_format = 1;
private int m_bufferSize = BUFFER_SIZE;
public OrchestraInterfaceInput(int _id, OrchestraNative _instance, int _idDevice, int _sampleRate, int _nbChannel, int _format) {
Log.d("InterfaceInput", "new: Input");
m_uid = _id;
m_orchestraNativeHandle = _instance;
m_stop = false;
m_suspend = false;
m_sampleRate = _sampleRate;
m_nbChannel = _nbChannel;
m_format = _format;
m_bufferSize = BUFFER_SIZE * m_nbChannel;
}
public int getUId() {
return m_uid;
}
public void run() {
Log.e("InterfaceInput", "RUN (start)");
int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// we keep the minimum buffer size, otherwite the delay is too big ...
// TODO : int bufferSize = AudioRecord.getMinBufferSize(m_sampleRate, channelConfig, audioFormat);
int config = 0;
if (m_nbChannel == 1) {
config = AudioFormat.CHANNEL_IN_MONO;
} else {
config = AudioFormat.CHANNEL_IN_STEREO;
}
// Create a streaming AudioTrack for music playback
short[] streamBuffer = new short[m_bufferSize];
m_audio = new AudioRecord(MediaRecorder.AudioSource.MIC,
m_sampleRate,
config,
audioFormat,
m_bufferSize);
m_audio.startRecording();
while ( m_stop == false
&& m_suspend == false) {
// Stream PCM data into the local buffer
m_audio.read(streamBuffer, 0, m_bufferSize);
// Send it to C++
m_orchestraNativeHandle.record(m_uid, streamBuffer, m_bufferSize/m_nbChannel);
}
m_audio.stop();
m_audio = null;
streamBuffer = null;
Log.e("InterfaceInput", "RUN (stop)");
}
public void autoStart() {
m_stop=false;
if (m_suspend == false) {
Log.e("InterfaceInput", "Create thread");
m_thread = new Thread(this);
Log.e("InterfaceInput", "start thread");
m_thread.start();
Log.e("InterfaceInput", "start thread (done)");
}
}
public void autoStop() {
if(m_audio == null) {
return;
}
m_stop=true;
m_thread = null;
/*
try {
super.join();
} catch(InterruptedException e) { }
*/
}
public void activityResume() {
m_suspend = false;
if (m_stop == false) {
Log.i("InterfaceInput", "Resume audio stream : " + m_uid);
m_thread = new Thread(this);
m_thread.start();
}
}
public void activityPause() {
if(m_audio == null) {
return;
}
m_suspend = true;
Log.i("InterfaceInput", "Pause audio stream : " + m_uid);
m_thread = null;
}
}

View File

@ -0,0 +1,108 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import android.media.AudioTrack;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.util.Log;
public class OrchestraInterfaceOutput extends Thread implements OrchestraConstants {
private int m_uid = -1;
private OrchestraNative m_orchestraNativeHandle;
private boolean m_stop = false;
private boolean m_suspend = false;
private AudioTrack m_audio = null;
private int m_sampleRate = 48000;
private int m_nbChannel = 2;
private int m_format = 1;
private int m_bufferSize = BUFFER_SIZE;
public OrchestraInterfaceOutput(int _id, OrchestraNative _instance, int _idDevice, int _sampleRate, int _nbChannel, int _format) {
Log.d("InterfaceOutput", "new: output");
m_uid = _id;
m_orchestraNativeHandle = _instance;
m_stop = true;
m_sampleRate = _sampleRate;
m_nbChannel = _nbChannel;
m_format = _format;
m_bufferSize = BUFFER_SIZE * m_nbChannel;
}
public int getUId() {
return m_uid;
}
public void run() {
Log.e("InterfaceOutput", "RUN (start)");
int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// we keep the minimum buffer size, otherwite the delay is too big ...
//int bufferSize = AudioTrack.getMinBufferSize(m_sampleRate, channelConfig, audioFormat);
int config = 0;
if (m_nbChannel == 1) {
config = AudioFormat.CHANNEL_OUT_MONO;
} else if (m_nbChannel == 4) {
config = AudioFormat.CHANNEL_OUT_QUAD;
} else {
config = AudioFormat.CHANNEL_OUT_STEREO;
}
// Create a streaming AudioTrack for music playback
short[] streamBuffer = new short[m_bufferSize];
m_audio = new AudioTrack(AudioManager.STREAM_MUSIC,
m_sampleRate,
config,
audioFormat,
m_bufferSize,
AudioTrack.MODE_STREAM);
m_audio.play();
//m_audio.setPositionNotificationPeriod(2048);
while (m_stop == false) {
// Fill buffer with PCM data from C++
m_orchestraNativeHandle.playback(m_uid, streamBuffer, m_bufferSize/m_nbChannel);
// Stream PCM data into the music AudioTrack
m_audio.write(streamBuffer, 0, m_bufferSize);
}
m_audio.flush();
m_audio.stop();
m_audio = null;
streamBuffer = null;
Log.e("InterfaceOutput", "RUN (stop)");
}
public void autoStart() {
m_stop=false;
this.start();
}
public void autoStop() {
if(m_audio == null) {
return;
}
m_stop=true;
try {
super.join();
} catch(InterruptedException e) { }
}
public void activityResume() {
if (m_audio != null) {
Log.i("InterfaceOutput", "Resume audio stream : " + m_uid);
m_audio.play();
}
}
public void activityPause() {
if(m_audio == null) {
return;
}
if (m_audio != null) {
Log.i("InterfaceOutput", "Pause audio stream : " + m_uid);
m_audio.pause();
}
}
}

View File

@ -0,0 +1,262 @@
/**
* @author Edouard DUPIN
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import android.util.Log;
import java.util.Vector;
//import org.musicdsp.orchestra.Constants;
//import org.musicdsp.orchestra.ManagerCallback;
//import org.musicdsp.orchestra.Orchestra;
//import org.musicdsp.orchestra.InterfaceOutput;
//import org.musicdsp.orchestra.InterfaceInput;
/**
* @brief Class :
*
*/
public class OrchestraManager implements OrchestraManagerCallback, OrchestraConstants {
private OrchestraNative m_orchestraHandle;
private int m_uid = 0;
private Vector<OrchestraInterfaceOutput> m_outputList;
private Vector<OrchestraInterfaceInput> m_inputList;
public OrchestraManager() {
// set the java evironement in the C sources :
m_orchestraHandle = new OrchestraNative(this);
m_outputList = new Vector<OrchestraInterfaceOutput>();
m_inputList = new Vector<OrchestraInterfaceInput>();
}
public int getDeviceCount() {
Log.e("Manager", "Get device List");
return 2;
}
public String getDeviceProperty(int _idDevice) {
if (_idDevice == 0) {
return "{\n"
+ " name:'speaker',\n"
+ " type:'output',\n"
+ " sample-rate:[8000,16000,24000,32000,48000,96000],\n"
+ " channels:['front-left','front-right'],\n"
+ " format:['int16'],\n"
+ " default:true\n"
+ "}";
} else if (_idDevice == 1) {
return "{\n"
+ " name:'microphone',\n"
+ " type:'input',\n"
+ " sample-rate:[8000,16000,24000,32000,48000,96000],\n"
+ " channels:['front-left','front-right'],\n"
+ " format:['int16'],\n"
+ " default:true\n"
+ "}";
} else {
return "{}";
}
}
public int openDeviceOutput(int _idDevice, int _freq, int _nbChannel, int _format) {
OrchestraInterfaceOutput iface = new OrchestraInterfaceOutput(m_uid, m_orchestraHandle, _idDevice, _freq, _nbChannel, _format);
m_uid++;
Log.e("Manager", "Open device Output: " + _idDevice + " with m_uid=" + (m_uid-1));
if (iface != null) {
m_outputList.add(iface);
Log.e("Manager", "Added element count=" + m_outputList.size());
return m_uid-1;
}
return -1;
}
public int openDeviceInput(int _idDevice, int _freq, int _nbChannel, int _format) {
OrchestraInterfaceInput iface = new OrchestraInterfaceInput(m_uid, m_orchestraHandle, _idDevice, _freq, _nbChannel, _format);
m_uid++;
Log.e("Manager", "Open device Input: " + _idDevice + " with m_uid=" + (m_uid-1));
if (iface != null) {
m_inputList.add(iface);
return m_uid-1;
}
return -1;
}
public boolean closeDevice(int _uniqueID) {
Log.e("Manager", "Close device : " + _uniqueID);
if (_uniqueID<0) {
Log.e("Manager", "Can not Close device with m_uid: " + _uniqueID);
return false;
}
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_inputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_inputList.remove(iii);
return true;
}
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_outputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_outputList.remove(iii);
return true;
}
}
}
Log.e("Manager", "Can not start device with m_uid: " + _uniqueID + " Element does not exist ...");
return false;
}
public boolean start(int _uniqueID) {
Log.e("Manager", "start device : " + _uniqueID);
if (_uniqueID<0) {
Log.e("Manager", "Can not start device with m_uid: " + _uniqueID);
return false;
}
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_inputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_inputList.get(iii).autoStart();
return true;
}
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_outputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_outputList.get(iii).autoStart();
return true;
}
}
}
Log.e("Manager", "Can not start device with UID: " + _uniqueID + " Element does not exist ...");
return false;
}
public boolean stop(int _uniqueID) {
Log.e("Manager", "stop device : " + _uniqueID);
if (_uniqueID<0) {
Log.e("Manager", "Can not stop device with UID: " + _uniqueID);
return false;
}
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_inputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_inputList.get(iii).autoStop();
return true;
}
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
if (m_outputList.get(iii).getUId() == _uniqueID) {
// find it ...
m_outputList.get(iii).autoStop();
return true;
}
}
}
Log.e("Manager", "Can not stop device with UID: " + _uniqueID + " Element does not exist ...");
return false;
}
public void onCreate() {
Log.w("Manager", "onCreate ...");
// nothing to do ...
}
public void onStart() {
Log.w("Manager", "onStart ...");
// nothing to do ...
}
public void onRestart() {
Log.w("Manager", "onRestart ...");
// nothing to do ...
}
public void onResume() {
Log.w("Manager", "onResume ...");
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_inputList.get(iii).activityResume();
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_outputList.get(iii).activityResume();
}
}
}
public void onPause() {
Log.w("Manager", "onPause ...");
// find the Element with his ID:
if (m_inputList != null) {
for (int iii=0; iii<m_inputList.size(); iii++) {
if (m_inputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_inputList.get(iii).activityPause();
}
}
if (m_outputList != null) {
for (int iii=0; iii<m_outputList.size(); iii++) {
if (m_outputList.get(iii) == null) {
Log.e("Manager", "Null input element: " + iii);
continue;
}
m_outputList.get(iii).activityPause();
}
}
}
public void onStop() {
Log.w("Manager", "onStop ...");
}
public void onDestroy() {
Log.w("Manager", "onDestroy ...");
}
}

View File

@ -0,0 +1,19 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
public interface OrchestraManagerCallback {
public int getDeviceCount();
public String getDeviceProperty(int _idDevice);
public int openDeviceInput(int _idDevice, int _sampleRate, int _nbChannel, int _format);
public int openDeviceOutput(int _idDevice, int _sampleRate, int _nbChannel, int _format);
public boolean closeDevice(int _uniqueID);
public boolean start(int _uniqueID);
public boolean stop(int _uniqueID);
}

View File

@ -0,0 +1,43 @@
/**
* @author Edouard DUPIN, Kevin BILLONNEAU
*
* @copyright 2015, Edouard DUPIN, all right reserved
*
* @license APACHE v2.0 (see license file)
*/
package org.musicdsp.orchestra;
import java.lang.UnsatisfiedLinkError;
import java.lang.RuntimeException;
import android.util.Log;
public class OrchestraNative {
public <T extends OrchestraManagerCallback> OrchestraNative(T _managerInstance) {
try {
NNsetJavaManager(_managerInstance);
} catch (java.lang.UnsatisfiedLinkError e) {
Log.e("Orchestra", "JNI binding not present ...");
throw new RuntimeException("Orchestra binding not present ...");
}
Log.d("Orchestra", "new ...");
}
public void setManagerRemove() {
NNsetJavaManagerRemove();
}
public void playback(int _flowId, short[] _bufferData, int _nbChunk) {
NNPlayback(_flowId, _bufferData, _nbChunk);
}
public void record(int _flowId, short[] _bufferData, int _nbChunk) {
NNRecord(_flowId, _bufferData, _nbChunk);
}
private native <T extends OrchestraManagerCallback> void NNsetJavaManager(T _managerInstance);
private native void NNsetJavaManagerRemove();
private native void NNPlayback(int _flowId, short[] _bufferData, int _nbChunk);
private native void NNRecord(int _flowId, short[] _bufferData, int _nbChunk);
}

View File

@ -5,48 +5,43 @@
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
#undef __class__
#define __class__ "api"
//#include <etk/types.hpp>
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
#include <etk/types.hpp>
// Static variable definitions.
const std::vector<uint32_t>& audio::orchestra::genericSampleRate() {
static std::vector<uint32_t> list;
const etk::Vector<uint32_t>& audio::orchestra::genericSampleRate() {
static etk::Vector<uint32_t> list;
if (list.size() == 0) {
list.push_back(4000);
list.push_back(5512);
list.push_back(8000);
list.push_back(9600);
list.push_back(11025);
list.push_back(16000);
list.push_back(22050);
list.push_back(32000);
list.push_back(44100);
list.push_back(48000);
list.push_back(64000);
list.push_back(88200);
list.push_back(96000);
list.push_back(128000);
list.push_back(176400);
list.push_back(192000);
list.pushBack(4000);
list.pushBack(5512);
list.pushBack(8000);
list.pushBack(9600);
list.pushBack(11025);
list.pushBack(16000);
list.pushBack(22050);
list.pushBack(32000);
list.pushBack(44100);
list.pushBack(48000);
list.pushBack(64000);
list.pushBack(88200);
list.pushBack(96000);
list.pushBack(128000);
list.pushBack(176400);
list.pushBack(192000);
list.pushBack(256000);
}
return list;
};
audio::orchestra::Api::Api() :
m_callback(nullptr),
m_deviceBuffer(nullptr) {
m_callback(null),
m_deviceBuffer(null) {
m_device[0] = 11111;
m_device[1] = 11111;
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
m_mode = audio::orchestra::mode_unknow;
}
@ -57,34 +52,34 @@ audio::orchestra::Api::~Api() {
enum audio::orchestra::error audio::orchestra::Api::startStream() {
ATA_VERBOSE("Start Stream");
m_startTime = audio::Time::now();
m_duration = std11::chrono::microseconds(0);
m_duration = echrono::microseconds(0);
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra::StreamParameters* _oParams,
audio::orchestra::StreamParameters* _iParams,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_state != audio::orchestra::state_closed) {
audio::orchestra::StreamParameters* _iParams,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_state != audio::orchestra::state::closed) {
ATA_ERROR("a stream is already open!");
return audio::orchestra::error_invalidUse;
}
if ( _oParams != nullptr
if ( _oParams != null
&& _oParams->nChannels < 1) {
ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one.");
ATA_ERROR("a non-null output StreamParameters structure cannot have an nChannels value less than one.");
return audio::orchestra::error_invalidUse;
}
if ( _iParams != nullptr
if ( _iParams != null
&& _iParams->nChannels < 1) {
ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one.");
ATA_ERROR("a non-null input StreamParameters structure cannot have an nChannels value less than one.");
return audio::orchestra::error_invalidUse;
}
if ( _oParams == nullptr
&& _iParams == nullptr) {
ATA_ERROR("input and output StreamParameters structures are both nullptr!");
if ( _oParams == null
&& _iParams == null) {
ATA_ERROR("input and output StreamParameters structures are both null!");
return audio::orchestra::error_invalidUse;
}
if (audio::getFormatBytes(_format) == 0) {
@ -93,7 +88,7 @@ enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra:
}
uint32_t nDevices = getDeviceCount();
uint32_t oChannels = 0;
if (_oParams != nullptr) {
if (_oParams != null) {
oChannels = _oParams->nChannels;
if ( _oParams->deviceId >= nDevices
&& _oParams->deviceName == "") {
@ -102,7 +97,7 @@ enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra:
}
}
uint32_t iChannels = 0;
if (_iParams != nullptr) {
if (_iParams != null) {
iChannels = _iParams->nChannels;
if ( _iParams->deviceId >= nDevices
&& _iParams->deviceName == "") {
@ -114,23 +109,23 @@ enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra:
bool result;
if (oChannels > 0) {
if (_oParams->deviceId == -1) {
result = probeDeviceOpenName(_oParams->deviceName,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
result = openName(_oParams->deviceName,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
} else {
result = probeDeviceOpen(_oParams->deviceId,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
result = open(_oParams->deviceId,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
}
if (result == false) {
ATA_ERROR("system ERROR");
@ -139,23 +134,23 @@ enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra:
}
if (iChannels > 0) {
if (_iParams->deviceId == -1) {
result = probeDeviceOpenName(_iParams->deviceName,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
result = openName(_iParams->deviceName,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
} else {
result = probeDeviceOpen(_iParams->deviceId,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
result = open(_iParams->deviceId,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
}
if (result == false) {
if (oChannels > 0) {
@ -167,7 +162,7 @@ enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra:
}
m_callback = _callback;
//_options.numberOfBuffers = m_nBuffers;
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
return audio::orchestra::error_none;
}
@ -187,14 +182,14 @@ enum audio::orchestra::error audio::orchestra::Api::closeStream() {
return audio::orchestra::error_none;
}
bool audio::orchestra::Api::probeDeviceOpen(uint32_t /*device*/,
audio::orchestra::mode /*mode*/,
uint32_t /*channels*/,
uint32_t /*firstChannel*/,
uint32_t /*sampleRate*/,
audio::format /*format*/,
uint32_t * /*bufferSize*/,
const audio::orchestra::StreamOptions& /*options*/) {
bool audio::orchestra::Api::open(uint32_t /*device*/,
audio::orchestra::mode /*mode*/,
uint32_t /*channels*/,
uint32_t /*firstChannel*/,
uint32_t /*sampleRate*/,
audio::format /*format*/,
uint32_t * /*bufferSize*/,
const audio::orchestra::StreamOptions& /*options*/) {
// MUST be implemented in subclasses!
return false;
}
@ -236,7 +231,7 @@ uint32_t audio::orchestra::Api::getStreamSampleRate() {
}
enum audio::orchestra::error audio::orchestra::Api::verifyStream() {
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("a stream is not open!");
return audio::orchestra::error_invalidUse;
}
@ -245,15 +240,15 @@ enum audio::orchestra::error audio::orchestra::Api::verifyStream() {
void audio::orchestra::Api::clearStreamInfo() {
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
m_sampleRate = 0;
m_bufferSize = 0;
m_nBuffers = 0;
m_userFormat = audio::format_unknow;
m_startTime = audio::Time();
m_duration = audio::Duration(0);
m_deviceBuffer = nullptr;
m_callback = nullptr;
m_deviceBuffer = null;
m_callback = null;
for (int32_t iii=0; iii<2; ++iii) {
m_device[iii] = 11111;
m_doConvertBuffer[iii] = false;
@ -297,21 +292,21 @@ void audio::orchestra::Api::setConvertInfo(audio::orchestra::mode _mode, uint32_
if (m_deviceInterleaved[idTable] == false) {
if (_mode == audio::orchestra::mode_input) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk * m_bufferSize);
m_convertInfo[idTable].outOffset.push_back(kkk);
m_convertInfo[idTable].inOffset.pushBack(kkk * m_bufferSize);
m_convertInfo[idTable].outOffset.pushBack(kkk);
m_convertInfo[idTable].inJump = 1;
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk);
m_convertInfo[idTable].outOffset.push_back(kkk * m_bufferSize);
m_convertInfo[idTable].inOffset.pushBack(kkk);
m_convertInfo[idTable].outOffset.pushBack(kkk * m_bufferSize);
m_convertInfo[idTable].outJump = 1;
}
}
} else { // no (de)interleaving
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk);
m_convertInfo[idTable].outOffset.push_back(kkk);
m_convertInfo[idTable].inOffset.pushBack(kkk);
m_convertInfo[idTable].outOffset.pushBack(kkk);
}
}

View File

@ -4,37 +4,40 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_API_H__
#define __AUDIO_ORCHESTRA_API_H__
#include <sstream>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/type.h>
#include <audio/orchestra/state.h>
#include <audio/orchestra/mode.h>
#include <audio/Time.h>
#include <audio/Duration.h>
#include <etk/Stream.hpp>
#include <audio/orchestra/debug.hpp>
#include <audio/orchestra/type.hpp>
#include <audio/orchestra/state.hpp>
#include <audio/orchestra/mode.hpp>
#include <audio/Time.hpp>
#include <audio/Duration.hpp>
#include <ememory/memory.hpp>
/**
* @brief Audio library namespace
*/
namespace audio {
/**
* @brief Audio-orchestra library namespace
*/
namespace orchestra {
const std::vector<uint32_t>& genericSampleRate();
const etk::Vector<uint32_t>& genericSampleRate();
/**
* @brief airtaudio callback function prototype.
* @param _inputBuffer For input (or duplex) streams, this buffer will hold _nbChunk of input audio chunk (nullptr if no data).
* @param _inputBuffer For input (or duplex) streams, this buffer will hold _nbChunk of input audio chunk (null if no data).
* @param _timeInput Timestamp of the first buffer sample (recording time).
* @param _outputBuffer For output (or duplex) streams, the client should write _nbChunk of audio chunk into this buffer (nullptr if no data).
* @param _outputBuffer For output (or duplex) streams, the client should write _nbChunk of audio chunk into this buffer (null if no data).
* @param _timeOutput Timestamp of the first buffer sample (playing time).
* @param _nbChunk The number of chunk of input or output chunk in the buffer (same size).
* @param _status List of error that occured in the laps of time.
*/
typedef std11::function<int32_t (const void* _inputBuffer,
const audio::Time& _timeInput,
void* _outputBuffer,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status)> AirTAudioCallback;
typedef etk::Function<int32_t (const void* _inputBuffer,
const audio::Time& _timeInput,
void* _outputBuffer,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
const etk::Vector<audio::orchestra::status>& _status)> AirTAudioCallback;
// A protected structure used for buffer conversion.
class ConvertInfo {
public:
@ -43,35 +46,35 @@ namespace audio {
int32_t outJump;
enum audio::format inFormat;
enum audio::format outFormat;
std::vector<int> inOffset;
std::vector<int> outOffset;
etk::Vector<int> inOffset;
etk::Vector<int> outOffset;
};
class Api {
class Api : public ememory::EnableSharedFromThis<Api>{
protected:
std::string m_name;
etk::String m_name;
public:
Api();
virtual ~Api();
void setName(const std::string& _name) {
void setName(const etk::String& _name) {
m_name = _name;
}
virtual audio::orchestra::type getCurrentApi() = 0;
virtual const etk::String& getCurrentApi() = 0;
virtual uint32_t getDeviceCount() = 0;
virtual audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
// TODO : Check API ...
virtual bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
virtual bool getNamedDeviceInfo(const etk::String& _deviceName, audio::orchestra::DeviceInfo& _info) {
return false;
}
virtual uint32_t getDefaultInputDevice();
virtual uint32_t getDefaultOutputDevice();
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters* _outputParameters,
audio::orchestra::StreamParameters* _inputParameters,
audio::format _format,
uint32_t _sampleRate,
uint32_t* _nbChunk,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options);
audio::orchestra::StreamParameters* _inputParameters,
audio::format _format,
uint32_t _sampleRate,
uint32_t* _nbChunk,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options);
virtual enum audio::orchestra::error closeStream();
virtual enum audio::orchestra::error startStream();
virtual enum audio::orchestra::error stopStream() = 0;
@ -80,19 +83,19 @@ namespace audio {
uint32_t getStreamSampleRate();
virtual audio::Time getStreamTime();
bool isStreamOpen() const {
return m_state != audio::orchestra::state_closed;
return m_state != audio::orchestra::state::closed;
}
bool isStreamRunning() const {
return m_state == audio::orchestra::state_running;
return m_state == audio::orchestra::state::running;
}
protected:
mutable std11::mutex m_mutex;
mutable ethread::Mutex m_mutex;
audio::orchestra::AirTAudioCallback m_callback;
uint32_t m_device[2]; // Playback and record, respectively.
enum audio::orchestra::mode m_mode; // audio::orchestra::mode_output, audio::orchestra::mode_input, or audio::orchestra::mode_duplex.
enum audio::orchestra::state m_state; // STOPPED, RUNNING, or CLOSED
std::vector<char> m_userBuffer[2]; // Playback and record, respectively.
etk::Vector<char> m_userBuffer[2]; // Playback and record, respectively.
char *m_deviceBuffer;
bool m_doConvertBuffer[2]; // Playback and record, respectively.
bool m_deviceInterleaved[2]; // Playback and record, respectively.
@ -119,21 +122,21 @@ namespace audio {
* "warning" message is reported and false is returned. A
* successful probe is indicated by a return value of true.
*/
virtual bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool probeDeviceOpenName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
virtual bool open(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool openName(const etk::String& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) { return false; }
/**
* @brief Increment the stream time.
@ -168,15 +171,10 @@ namespace audio {
uint32_t _firstChannel);
public:
virtual bool isMasterOf(audio::orchestra::Api* _api) {
virtual bool isMasterOf(ememory::SharedPtr<audio::orchestra::Api> _api) {
return false;
};
};
}
}
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::type& _obj);
#endif

View File

@ -5,42 +5,56 @@
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/DeviceInfo.h>
#include <etk/stdTools.h>
#include <iostream>
#undef __class__
#define __class__ "DeviceInfo"
//#include <etk/types.hpp>
#include <audio/orchestra/debug.hpp>
#include <audio/orchestra/DeviceInfo.hpp>
#include <etk/stdTools.hpp>
void audio::orchestra::DeviceInfo::display(int32_t _tabNumber) const {
std::string space;
etk::String space;
for (int32_t iii=0; iii<_tabNumber; ++iii) {
space += " ";
}
ATA_INFO(space + "probe=" << probed);
ATA_INFO(space + "name=" << name);
ATA_INFO(space + "outputChannels=" << outputChannels);
ATA_INFO(space + "inputChannels=" << inputChannels);
ATA_INFO(space + "duplexChannels=" << duplexChannels);
ATA_INFO(space + "isDefaultOutput=" << (isDefaultOutput==true?"true":"false"));
ATA_INFO(space + "isDefaultInput=" << (isDefaultInput==true?"true":"false"));
ATA_INFO(space + "rates=" << sampleRates);
ATA_INFO(space + "native Format: " << nativeFormats);
if (isCorrect == false) {
ATA_PRINT(space + "NOT CORRECT INFORAMATIONS");
return;
}
ATA_PRINT(space + "mode=" << (input==true?"input":"output"));
ATA_PRINT(space + "name=" << name);
if (desc.size() != 0) {
ATA_PRINT(space + "desc=" << desc);
}
ATA_PRINT(space + "channel" << (channels.size()>1?"s":"") << "=" << channels.size() << " : " << channels);
ATA_PRINT(space + "rate" << (sampleRates.size()>1?"s":"") << "=" << sampleRates);
ATA_PRINT(space + "native Format" << (nativeFormats.size()>1?"s":"") << ": " << nativeFormats);
ATA_PRINT(space + "default=" << (isDefault==true?"true":"false"));
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj) {
void audio::orchestra::DeviceInfo::clear() {
isCorrect = false;
input = false;
name = "";
desc = "";
channels.clear();
sampleRates.clear();
nativeFormats.clear();
isDefault = false;
}
etk::Stream& audio::orchestra::operator <<(etk::Stream& _os, const audio::orchestra::DeviceInfo& _obj) {
_os << "{";
_os << "probe=" << _obj.probed << ", ";
_os << "name=" << _obj.name << ", ";
_os << "outputChannels=" << _obj.outputChannels << ", ";
_os << "inputChannels=" << _obj.inputChannels << ", ";
_os << "duplexChannels=" << _obj.duplexChannels << ", ";
_os << "isDefaultOutput=" << _obj.isDefaultOutput << ", ";
_os << "isDefaultInput=" << _obj.isDefaultInput << ", ";
_os << "rates=" << _obj.sampleRates << ", ";
_os << "native Format: " << _obj.nativeFormats;
if (_obj.isCorrect == false) {
_os << "NOT CORRECT INFORAMATIONS";
} else {
_os << "name=" << _obj.name << ", ";
if (_obj.desc.size() != 0) {
_os << "description=" << _obj.desc << ", ";
}
_os << "channels=" << _obj.channels << ", ";
_os << "default=" << _obj.isDefault << ", ";
_os << "rates=" << _obj.sampleRates << ", ";
_os << "native Format: " << _obj.nativeFormats;
}
_os << "}";
return _os;
}

View File

@ -1,46 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_DEVICE_INFO_H__
#define __AUDIO_ORCHESTRA_DEVICE_INFO_H__
#include <audio/format.h>
namespace audio {
namespace orchestra {
/**
* @brief The public device information structure for returning queried values.
*/
class DeviceInfo {
public:
bool probed; //!< true if the device capabilities were successfully probed.
std::string name; //!< Character string device identifier.
uint32_t outputChannels; //!< Maximum output channels supported by device.
uint32_t inputChannels; //!< Maximum input channels supported by device.
uint32_t duplexChannels; //!< Maximum simultaneous input/output channels supported by device.
bool isDefaultOutput; //!< true if this is the default output device.
bool isDefaultInput; //!< true if this is the default input device.
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
std::vector<audio::format> nativeFormats; //!< Bit mask of supported data formats.
// Default constructor.
DeviceInfo() :
probed(false),
outputChannels(0),
inputChannels(0),
duplexChannels(0),
isDefaultOutput(false),
isDefaultInput(false),
nativeFormats() {}
void display(int32_t _tabNumber = 1) const;
};
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj);
}
}
#endif

View File

@ -0,0 +1,49 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <audio/format.hpp>
#include <audio/channel.hpp>
namespace audio {
namespace orchestra {
/**
* @brief The public device information structure for returning queried values.
*/
class DeviceInfo {
public:
bool isCorrect; //!< the information is correct (the system can return information incorect).
bool input; //!< true if the device in an input; false: output.
etk::String name; //!< Character string device identifier.
etk::String desc; //!< description of the device
etk::Vector<audio::channel> channels; //!< Channels interfaces.
etk::Vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
etk::Vector<audio::format> nativeFormats; //!< Bit mask of supported data formats.
bool isDefault; //! is default input/output
// Default constructor.
DeviceInfo() :
isCorrect(false),
input(false),
name(),
desc(),
channels(),
sampleRates(),
nativeFormats(),
isDefault(false) {}
/**
* @brief Display the current information of the device (on console)
*/
void display(int32_t _tabNumber = 1) const;
/**
* @brief Clear all internal data
*/
void clear();
};
etk::Stream& operator <<(etk::Stream& _os, const audio::orchestra::DeviceInfo& _obj);
}
}

View File

@ -5,5 +5,5 @@
* @fork from RTAudio
*/
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/Flags.hpp>
#include <audio/orchestra/debug.hpp>

View File

@ -4,12 +4,9 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_FLAGS_H__
#define __AUDIO_ORCHESTRA_FLAGS_H__
#include <etk/types.h>
#include <etk/types.hpp>
namespace audio {
namespace orchestra {
@ -23,5 +20,3 @@ namespace audio {
};
}
}
#endif

View File

@ -5,35 +5,39 @@
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <iostream>
//#include <etk/types.hpp>
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
#include <audio/orchestra/api/Alsa.hpp>
#include <audio/orchestra/api/Android.hpp>
#include <audio/orchestra/api/Asio.hpp>
#include <audio/orchestra/api/Core.hpp>
#include <audio/orchestra/api/CoreIos.hpp>
#include <audio/orchestra/api/Ds.hpp>
#include <audio/orchestra/api/Dummy.hpp>
#include <audio/orchestra/api/Jack.hpp>
#include <audio/orchestra/api/Pulse.hpp>
#undef __class__
#define __class__ "Interface"
std::vector<enum audio::orchestra::type> audio::orchestra::Interface::getCompiledApi() {
std::vector<enum audio::orchestra::type> apis;
etk::Vector<etk::String> audio::orchestra::Interface::getListApi() {
etk::Vector<etk::String> apis;
// The order here will control the order of RtAudio's API search in
// the constructor.
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
apis.push_back(m_apiAvaillable[iii].first);
apis.pushBack(m_apiAvaillable[iii].first);
}
return apis;
}
void audio::orchestra::Interface::openRtApi(enum audio::orchestra::type _api) {
delete m_rtapi;
m_rtapi = nullptr;
void audio::orchestra::Interface::openApi(const etk::String& _api) {
m_api.reset();
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
ATA_INFO("try open " << m_apiAvaillable[iii].first);
if (_api == m_apiAvaillable[iii].first) {
ATA_INFO(" ==> call it");
m_rtapi = m_apiAvaillable[iii].second();
if (m_rtapi != nullptr) {
m_api = m_apiAvaillable[iii].second();
if (m_api != null) {
return;
}
}
@ -44,103 +48,101 @@ void audio::orchestra::Interface::openRtApi(enum audio::orchestra::type _api) {
audio::orchestra::Interface::Interface() :
m_rtapi(nullptr) {
m_api(null) {
ATA_DEBUG("Add interface:");
#if defined(ORCHESTRA_BUILD_JACK)
ATA_DEBUG(" JACK");
addInterface(audio::orchestra::type_jack, audio::orchestra::api::Jack::create);
addInterface(audio::orchestra::typeJack, audio::orchestra::api::Jack::create);
#endif
#if defined(ORCHESTRA_BUILD_ALSA)
ATA_DEBUG(" ALSA");
addInterface(audio::orchestra::type_alsa, audio::orchestra::api::Alsa::create);
addInterface(audio::orchestra::typeAlsa, audio::orchestra::api::Alsa::create);
#endif
#if defined(ORCHESTRA_BUILD_PULSE)
ATA_DEBUG(" PULSE");
addInterface(audio::orchestra::type_pulse, audio::orchestra::api::Pulse::create);
#endif
#if defined(ORCHESTRA_BUILD_OSS)
ATA_DEBUG(" OSS");
addInterface(audio::orchestra::type_oss, audio::orchestra::api::Oss::create);
addInterface(audio::orchestra::typePulse, audio::orchestra::api::Pulse::create);
#endif
#if defined(ORCHESTRA_BUILD_ASIO)
ATA_DEBUG(" ASIO");
addInterface(audio::orchestra::type_asio, audio::orchestra::api::Asio::create);
addInterface(audio::orchestra::typeAsio, audio::orchestra::api::Asio::create);
#endif
#if defined(ORCHESTRA_BUILD_DS)
ATA_DEBUG(" DS");
addInterface(audio::orchestra::type_ds, audio::orchestra::api::Ds::create);
addInterface(audio::orchestra::typeDs, audio::orchestra::api::Ds::create);
#endif
#if defined(ORCHESTRA_BUILD_MACOSX_CORE)
ATA_DEBUG(" CORE OSX");
addInterface(audio::orchestra::type_coreOSX, audio::orchestra::api::Core::create);
addInterface(audio::orchestra::typeCoreOSX, audio::orchestra::api::Core::create);
#endif
#if defined(ORCHESTRA_BUILD_IOS_CORE)
ATA_DEBUG(" CORE IOS");
addInterface(audio::orchestra::type_coreIOS, audio::orchestra::api::CoreIos::create);
addInterface(audio::orchestra::typeCoreIOS, audio::orchestra::api::CoreIos::create);
#endif
#if defined(ORCHESTRA_BUILD_JAVA)
ATA_DEBUG(" JAVA");
addInterface(audio::orchestra::type_java, audio::orchestra::api::Android::create);
addInterface(audio::orchestra::typeJava, audio::orchestra::api::Android::create);
#endif
#if defined(ORCHESTRA_BUILD_DUMMY)
ATA_DEBUG(" DUMMY");
addInterface(audio::orchestra::type_dummy, audio::orchestra::api::Dummy::create);
addInterface(audio::orchestra::typeDummy, audio::orchestra::api::Dummy::create);
#endif
}
void audio::orchestra::Interface::addInterface(enum audio::orchestra::type _api, Api* (*_callbackCreate)()) {
m_apiAvaillable.push_back(std::pair<enum audio::orchestra::type, Api* (*)()>(_api, _callbackCreate));
void audio::orchestra::Interface::addInterface(const etk::String& _api, ememory::SharedPtr<Api> (*_callbackCreate)()) {
m_apiAvaillable.pushBack(etk::Pair<etk::String, ememory::SharedPtr<Api> (*)()>(_api, _callbackCreate));
}
enum audio::orchestra::error audio::orchestra::Interface::instanciate(enum audio::orchestra::type _api) {
ATA_INFO("Instanciate API ...");
if (m_rtapi != nullptr) {
ATA_WARNING("Interface already started ...!");
enum audio::orchestra::error audio::orchestra::Interface::clear() {
ATA_INFO("Clear API ...");
if (m_api == null) {
ATA_WARNING("Interface NOT started!");
return audio::orchestra::error_none;
}
if (_api != audio::orchestra::type_undefined) {
m_api.reset();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::Interface::instanciate(const etk::String& _api) {
ATA_INFO("Instanciate API ...");
if (m_api != null) {
ATA_WARNING("Interface already started!");
return audio::orchestra::error_none;
}
if (_api != audio::orchestra::typeUndefined) {
ATA_INFO("API specified : " << _api);
// Attempt to open the specified API.
openRtApi(_api);
if (m_rtapi != nullptr) {
if (m_rtapi->getDeviceCount() != 0) {
openApi(_api);
if (m_api != null) {
if (m_api->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
}
return audio::orchestra::error_none;
}
// No compiled support for specified API value. Issue a debug
// warning and continue as if no API was specified.
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
ATA_ERROR("API NOT Supported '" << _api << "' not in " << getListApi());
return audio::orchestra::error_fail;
}
ATA_INFO("Auto choice API :");
// Iterate through the compiled APIs and return as soon as we find
// one with at least one device or we reach the end of the list.
std::vector<enum audio::orchestra::type> apis = getCompiledApi();
etk::Vector<etk::String> apis = getListApi();
ATA_INFO(" find : " << apis.size() << " apis.");
for (size_t iii=0; iii<apis.size(); ++iii) {
ATA_INFO("try open ...");
openRtApi(apis[iii]);
if(m_rtapi == nullptr) {
openApi(apis[iii]);
if(m_api == null) {
ATA_ERROR(" ==> can not create ...");
continue;
}
if (m_rtapi->getDeviceCount() != 0) {
if (m_api->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
break;
} else {
ATA_INFO(" ==> Interface exist, but have no devices: " << m_api->getDeviceCount());
}
}
if (m_rtapi != nullptr) {
if (m_api != null) {
return audio::orchestra::error_none;
}
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
ATA_ERROR("API NOT Supported '" << _api << "' not in " << getListApi());
return audio::orchestra::error_fail;
}
audio::orchestra::Interface::~Interface() {
ATA_INFO("Remove interface");
delete m_rtapi;
m_rtapi = nullptr;
m_api.reset();
}
enum audio::orchestra::error audio::orchestra::Interface::openStream(audio::orchestra::StreamParameters* _outputParameters,
@ -150,10 +152,10 @@ enum audio::orchestra::error audio::orchestra::Interface::openStream(audio::orch
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->openStream(_outputParameters,
return m_api->openStream(_outputParameters,
_inputParameters,
_format,
_sampleRate,
@ -163,22 +165,22 @@ enum audio::orchestra::error audio::orchestra::Interface::openStream(audio::orch
}
bool audio::orchestra::Interface::isMasterOf(audio::orchestra::Interface& _interface) {
if (m_rtapi == nullptr) {
ATA_ERROR("Current Master API is nullptr ...");
if (m_api == null) {
ATA_ERROR("Current Master API is null ...");
return false;
}
if (_interface.m_rtapi == nullptr) {
ATA_ERROR("Current Slave API is nullptr ...");
if (_interface.m_api == null) {
ATA_ERROR("Current Slave API is null ...");
return false;
}
if (m_rtapi->getCurrentApi() != _interface.m_rtapi->getCurrentApi()) {
if (m_api->getCurrentApi() != _interface.m_api->getCurrentApi()) {
ATA_ERROR("Can not link 2 Interface with not the same Low level type (?)");//" << _interface.m_adac->getCurrentApi() << " != " << m_adac->getCurrentApi() << ")");
return false;
}
if (m_rtapi->getCurrentApi() != audio::orchestra::type_alsa) {
ATA_ERROR("Link 2 device together work only if the interafec is ?");// << audio::orchestra::type_alsa << " not for " << m_rtapi->getCurrentApi());
if (m_api->getCurrentApi() != audio::orchestra::typeAlsa) {
ATA_ERROR("Link 2 device together work only if the interafec is ?");// << audio::orchestra::type::alsa << " not for " << m_api->getCurrentApi());
return false;
}
return m_rtapi->isMasterOf(_interface.m_rtapi);
return m_api->isMasterOf(_interface.m_api);
}

View File

@ -4,25 +4,14 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_INTERFACE_H__
#define __AUDIO_ORCHESTRA_INTERFACE_H__
#include <string>
#include <vector>
#include <audio/orchestra/base.h>
#include <audio/orchestra/CallbackInfo.h>
#include <audio/orchestra/Api.h>
#include <audio/orchestra/api/Alsa.h>
#include <audio/orchestra/api/Android.h>
#include <audio/orchestra/api/Asio.h>
#include <audio/orchestra/api/Core.h>
#include <audio/orchestra/api/CoreIos.h>
#include <audio/orchestra/api/Ds.h>
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/api/Jack.h>
#include <audio/orchestra/api/Oss.h>
#include <audio/orchestra/api/Pulse.h>
#include <etk/String.hpp>
#include <etk/Vector.hpp>
#include <etk/Pair.hpp>
#include <audio/orchestra/base.hpp>
#include <audio/orchestra/CallbackInfo.hpp>
#include <audio/orchestra/Api.hpp>
namespace audio {
namespace orchestra {
@ -38,25 +27,27 @@ namespace audio {
*/
class Interface {
protected:
std::vector<std::pair<enum audio::orchestra::type, Api* (*)()> > m_apiAvaillable;
etk::Vector<etk::Pair<etk::String, ememory::SharedPtr<Api> (*)()> > m_apiAvaillable;
protected:
audio::orchestra::Api *m_rtapi;
ememory::SharedPtr<audio::orchestra::Api> m_api;
public:
void setName(const std::string& _name) {
if (m_rtapi == nullptr) {
void setName(const etk::String& _name) {
if (m_api == null) {
return;
}
m_rtapi->setName(_name);
m_api->setName(_name);
}
/**
* @brief A static function to determine the available compiled audio APIs.
*
* The values returned in the std::vector can be compared against
* the enumerated list values. Note that there can be more than one
* API compiled for certain operating systems.
* @brief Get the list of all availlable API in the system.
* @return the list of all APIs
*/
std::vector<enum audio::orchestra::type> getCompiledApi();
etk::Vector<etk::String> getListApi();
/**
* @brief Add an interface of the Possible List.
* @param[in] _api Type of the interface.
* @param[in] _callbackCreate API creation callback.
*/
void addInterface(const etk::String& _api, ememory::SharedPtr<Api> (*_callbackCreate)());
/**
* @brief The class constructor.
* @note the creating of the basic instance is done by Instanciate
@ -70,23 +61,21 @@ namespace audio {
*/
virtual ~Interface();
/**
* @brief Add an interface of the Possible List.
* @param[in] _api Type of the interface.
* @param[in] _callbackCreate API creation callback.
* @brief Clear the current Interface
*/
void addInterface(enum audio::orchestra::type _api, Api* (*_callbackCreate)());
enum audio::orchestra::error clear();
/**
* @brief Create an interface instance
*/
enum audio::orchestra::error instanciate(enum audio::orchestra::type _api = audio::orchestra::type_undefined);
enum audio::orchestra::error instanciate(const etk::String& _api = audio::orchestra::typeUndefined);
/**
* @return the audio API specifier for the current instance of airtaudio.
*/
enum audio::orchestra::type getCurrentApi() {
if (m_rtapi == nullptr) {
return audio::orchestra::type_undefined;
const etk::String& getCurrentApi() {
if (m_api == null) {
return audio::orchestra::typeUndefined;
}
return m_rtapi->getCurrentApi();
return m_api->getCurrentApi();
}
/**
* @brief A public function that queries for the number of audio devices available.
@ -96,10 +85,10 @@ namespace audio {
* a system error occurs during processing, a warning will be issued.
*/
uint32_t getDeviceCount() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return 0;
}
return m_rtapi->getDeviceCount();
return m_api->getDeviceCount();
}
/**
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
@ -113,17 +102,17 @@ namespace audio {
* @return An audio::orchestra::DeviceInfo structure for a specified device number.
*/
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::orchestra::DeviceInfo();
}
return m_rtapi->getDeviceInfo(_device);
return m_api->getDeviceInfo(_device);
}
audio::orchestra::DeviceInfo getDeviceInfo(const std::string& _deviceName) {
if (m_rtapi == nullptr) {
audio::orchestra::DeviceInfo getDeviceInfo(const etk::String& _deviceName) {
if (m_api == null) {
return audio::orchestra::DeviceInfo();
}
audio::orchestra::DeviceInfo info;
m_rtapi->getNamedDeviceInfo(_deviceName, info);
m_api->getNamedDeviceInfo(_deviceName, info);
return info;
}
/**
@ -136,10 +125,10 @@ namespace audio {
* before attempting to open a stream.
*/
uint32_t getDefaultOutputDevice() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return 0;
}
return m_rtapi->getDefaultOutputDevice();
return m_api->getDefaultOutputDevice();
}
/**
* @brief A function that returns the index of the default input device.
@ -151,10 +140,10 @@ namespace audio {
* before attempting to open a stream.
*/
uint32_t getDefaultInputDevice() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return 0;
}
return m_rtapi->getDefaultInputDevice();
return m_api->getDefaultInputDevice();
}
/**
* @brief A public function for opening a stream with the specified parameters.
@ -166,12 +155,12 @@ namespace audio {
* @param _outputParameters Specifies output stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For input-only streams, this
* argument should be nullptr. The device ID is an index value between
* argument should be null. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _inputParameters Specifies input stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For output-only streams, this
* argument should be nullptr. The device ID is an index value between
* argument should be null. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _format An audio::format specifying the desired sample data format.
* @param _sampleRate The desired sample rate (sample frames per second).
@ -195,12 +184,12 @@ namespace audio {
* when an error has occured.
*/
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters *_outputParameters,
audio::orchestra::StreamParameters *_inputParameters,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options = audio::orchestra::StreamOptions());
audio::orchestra::StreamParameters *_inputParameters,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options = audio::orchestra::StreamOptions());
/**
* @brief A function that closes a stream and frees any associated stream memory.
@ -209,10 +198,10 @@ namespace audio {
* returns (no exception is thrown).
*/
enum audio::orchestra::error closeStream() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->closeStream();
return m_api->closeStream();
}
/**
* @brief A function that starts a stream.
@ -223,10 +212,10 @@ namespace audio {
* running.
*/
enum audio::orchestra::error startStream() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->startStream();
return m_api->startStream();
}
/**
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
@ -237,10 +226,10 @@ namespace audio {
* stopped.
*/
enum audio::orchestra::error stopStream() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->stopStream();
return m_api->stopStream();
}
/**
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
@ -250,38 +239,38 @@ namespace audio {
* stopped.
*/
enum audio::orchestra::error abortStream() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->abortStream();
return m_api->abortStream();
}
/**
* @return true if a stream is open and false if not.
*/
bool isStreamOpen() const {
if (m_rtapi == nullptr) {
if (m_api == null) {
return false;
}
return m_rtapi->isStreamOpen();
return m_api->isStreamOpen();
}
/**
* @return true if the stream is running and false if it is stopped or not open.
*/
bool isStreamRunning() const {
if (m_rtapi == nullptr) {
if (m_api == null) {
return false;
}
return m_rtapi->isStreamRunning();
return m_api->isStreamRunning();
}
/**
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
* @return the number of elapsed seconds since the stream was started.
*/
audio::Time getStreamTime() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return audio::Time();
}
return m_rtapi->getStreamTime();
return m_api->getStreamTime();
}
/**
* @brief The stream latency refers to delay in audio input and/or output
@ -293,10 +282,10 @@ namespace audio {
* @return The internal stream latency in sample frames.
*/
long getStreamLatency() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return 0;
}
return m_rtapi->getStreamLatency();
return m_api->getStreamLatency();
}
/**
* @brief On some systems, the sample rate used may be slightly different
@ -305,16 +294,15 @@ namespace audio {
* @return Returns actual sample rate in use by the stream.
*/
uint32_t getStreamSampleRate() {
if (m_rtapi == nullptr) {
if (m_api == null) {
return 0;
}
return m_rtapi->getStreamSampleRate();
return m_api->getStreamSampleRate();
}
bool isMasterOf(audio::orchestra::Interface& _interface);
protected:
void openRtApi(enum audio::orchestra::type _api);
void openApi(const etk::String& _api);
};
}
}
#endif

View File

@ -5,9 +5,9 @@
* @fork from RTAudio
*/
#include <audio/orchestra/StreamOptions.h>
#include <etk/stdTools.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/StreamOptions.hpp>
#include <etk/stdTools.hpp>
#include <audio/orchestra/debug.hpp>
static const char* listValue[] = {
"hardware",
@ -15,13 +15,13 @@ static const char* listValue[] = {
"soft"
};
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj) {
etk::Stream& audio::orchestra::operator <<(etk::Stream& _os, enum audio::orchestra::timestampMode _obj) {
_os << listValue[_obj];
return _os;
}
namespace etk {
template <> bool from_string<enum audio::orchestra::timestampMode>(enum audio::orchestra::timestampMode& _variableRet, const std::string& _value) {
template <> bool from_string<enum audio::orchestra::timestampMode>(enum audio::orchestra::timestampMode& _variableRet, const etk::String& _value) {
if (_value == "hardware") {
_variableRet = audio::orchestra::timestampMode_Hardware;
return true;
@ -37,7 +37,7 @@ namespace etk {
return false;
}
template <enum audio::orchestra::timestampMode> std::string to_string(const enum audio::orchestra::timestampMode& _variable) {
template <enum audio::orchestra::timestampMode> etk::String toString(const enum audio::orchestra::timestampMode& _variable) {
return listValue[_variable];
}
}

View File

@ -4,27 +4,25 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_STREAM_OPTION_H__
#define __AUDIO_ORCHESTRA_STREAM_OPTION_H__
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/Flags.hpp>
#include <etk/String.hpp>
namespace audio {
namespace orchestra {
enum timestampMode {
timestampMode_Hardware, //!< enable harware timestamp
timestampMode_trigered, //!< get harware triger time stamp and ingrement with duration
timestampMode_trigered, //!< get harware triger time stamp and increment with duration
timestampMode_soft, //!< Simulate all timestamp.
};
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj);
etk::Stream& operator <<(etk::Stream& _os, enum audio::orchestra::timestampMode _obj);
class StreamOptions {
public:
audio::orchestra::Flags flags; //!< A bit-mask of stream flags
uint32_t numberOfBuffers; //!< Number of stream buffers.
std::string streamName; //!< A stream name (currently used only in Jack).
etk::String streamName; //!< A stream name (currently used only in Jack).
enum timestampMode mode; //!< mode of timestamping data...
// Default constructor.
StreamOptions() :
@ -35,5 +33,3 @@ namespace audio {
}
}
#endif

View File

@ -4,10 +4,7 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_STREAM_PARAMETER_H__
#define __AUDIO_ORCHESTRA_STREAM_PARAMETER_H__
#pragma once
namespace audio {
namespace orchestra {
@ -17,7 +14,7 @@ namespace audio {
class StreamParameters {
public:
int32_t deviceId; //!< Device index (-1 to getDeviceCount() - 1).
std::string deviceName; //!< name of the device (if deviceId==-1 this must not be == "", and the oposite ...)
etk::String deviceName; //!< name of the device (if deviceId==-1 this must not be == "", and the oposite ...)
uint32_t nChannels; //!< Number of channels.
uint32_t firstChannel; //!< First channel index on device (default = 0).
// Default constructor.
@ -31,5 +28,3 @@ namespace audio {
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,77 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ALSA_H__) && defined(ORCHESTRA_BUILD_ALSA)
#define __AUDIO_ORCHESTRA_API_ALSA_H__
namespace audio {
namespace orchestra {
namespace api {
class AlsaPrivate;
class Alsa: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
public:
Alsa();
virtual ~Alsa();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_alsa;
}
uint32_t getDeviceCount();
private:
bool getNamedDeviceInfoLocal(const std::string& _deviceName,
audio::orchestra::DeviceInfo& _info,
int32_t _cardId=-1, // Alsa card ID
int32_t _subdevice=-1, // alsa subdevice ID
int32_t _localDeviceId=-1); // local ID of device fined
public:
bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
return getNamedDeviceInfoLocal(_deviceName, _info);
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
void callbackEventOneCycle();
private:
static void alsaCallbackEvent(void* _userData);
private:
std11::shared_ptr<AlsaPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool probeDeviceOpenName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual audio::Time getStreamTime();
public:
bool isMasterOf(audio::orchestra::Api* _api);
};
}
}
}
#endif

View File

@ -0,0 +1,79 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_ALSA
namespace audio {
namespace orchestra {
namespace api {
class AlsaPrivate;
class Alsa: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Alsa();
virtual ~Alsa();
const etk::String& getCurrentApi() {
return audio::orchestra::typeAlsa;
}
uint32_t getDeviceCount();
private:
bool getNamedDeviceInfoLocal(const etk::String& _deviceName,
audio::orchestra::DeviceInfo& _info,
int32_t _cardId=-1, // Alsa card ID
int32_t _subdevice=-1, // alsa subdevice ID
int32_t _localDeviceId=-1,// local ID of device find
bool _input=false);
public:
bool getNamedDeviceInfo(const etk::String& _deviceName, audio::orchestra::DeviceInfo& _info) {
return getNamedDeviceInfoLocal(_deviceName, _info);
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
void callbackEventOneCycleRead();
void callbackEventOneCycleWrite();
void callbackEventOneCycleMMAPRead();
void callbackEventOneCycleMMAPWrite();
private:
ememory::SharedPtr<AlsaPrivate> m_private;
etk::Vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool open(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
bool openName(const etk::String& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual audio::Time getStreamTime();
public:
bool isMasterOf(ememory::SharedPtr<audio::orchestra::Api> _api);
};
}
}
}
#endif

View File

@ -7,60 +7,25 @@
#ifdef ORCHESTRA_BUILD_JAVA
#include <ewol/context/Context.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
//#include <ewol/context/Context.h>
#undef __class__
#define __class__ "api::Android"
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
#include <audio/orchestra/api/AndroidNativeInterface.hpp>
#include <audio/orchestra/api/Android.hpp>
extern "C" {
#include <limits.h>
}
audio::orchestra::Api* audio::orchestra::api::Android::create() {
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Android::create() {
ATA_INFO("Create Android device ... ");
return new audio::orchestra::api::Android();
return ememory::SharedPtr<audio::orchestra::api::Android>(ETK_NEW(audio::orchestra::api::Android));
}
audio::orchestra::api::Android::Android() {
ATA_INFO("new Android");
// On android, we set a static device ...
ATA_INFO("get context");
ewol::Context& tmpContext = ewol::getContext();
ATA_INFO("done p=" << (int64_t)&tmpContext);
int32_t deviceCount = tmpContext.audioGetDeviceCount();
ATA_ERROR("Get count devices : " << deviceCount);
for (int32_t iii=0; iii<deviceCount; ++iii) {
std::string property = tmpContext.audioGetDeviceProperty(iii);
ATA_ERROR("Get devices property : " << property);
std::vector<std::string> listProperty = etk::split(property, ':');
audio::orchestra::DeviceInfo tmp;
tmp.name = listProperty[0];
std::vector<std::string> listFreq = etk::split(listProperty[2], ',');
for(size_t fff=0; fff<listFreq.size(); ++fff) {
tmp.sampleRates.push_back(etk::string_to_int32_t(listFreq[fff]));
}
tmp.outputChannels = 0;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
if (listProperty[1] == "out") {
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.outputChannels = etk::string_to_int32_t(listProperty[3]);
} else if (listProperty[1] == "in") {
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.inputChannels = etk::string_to_int32_t(listProperty[3]);
} else {
/* duplex */
tmp.isDefaultOutput = true;
tmp.isDefaultInput = true;
tmp.duplexChannels = etk::string_to_int32_t(listProperty[3]);
}
tmp.nativeFormats = audio::getListFormatFromString(listProperty[4]);
m_devices.push_back(tmp);
}
ATA_INFO("Create Android interface (end)");
audio::orchestra::api::Android::Android() :
m_uid(-1) {
ATA_INFO("Create Android interface");
}
audio::orchestra::api::Android::~Android() {
@ -69,16 +34,16 @@ audio::orchestra::api::Android::~Android() {
uint32_t audio::orchestra::api::Android::getDeviceCount() {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
return audio::orchestra::api::android::getDeviceCount();
}
audio::orchestra::DeviceInfo audio::orchestra::api::Android::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
return audio::orchestra::api::android::getDeviceInfo(_device);
}
enum audio::orchestra::error audio::orchestra::api::Android::closeStream() {
ATA_INFO("Clese Stream");
ATA_INFO("Close Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
@ -88,45 +53,48 @@ enum audio::orchestra::error audio::orchestra::api::Android::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
// Can not close the stream now...
return audio::orchestra::error_none;
return audio::orchestra::api::android::startStream(m_uid);
}
enum audio::orchestra::error audio::orchestra::api::Android::stopStream() {
ATA_INFO("Stop stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return audio::orchestra::error_none;
return audio::orchestra::api::android::stopStream(m_uid);
}
enum audio::orchestra::error audio::orchestra::api::Android::abortStream() {
ATA_INFO("Abort Stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::Android::callBackEvent(void* _data,
int32_t _frameRate) {
void audio::orchestra::api::Android::playback(int16_t* _dst, int32_t _nbChunk) {
// clear output buffer:
if (_dst != null) {
memset(_dst, 0, _nbChunk*audio::getFormatBytes(m_deviceFormat[modeToIdTable(m_mode)])*m_nDeviceChannels[modeToIdTable(m_mode)]);
}
int32_t doStopStream = 0;
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[audio::orchestra::mode_output] == true) {
doStopStream = m_callback(nullptr,
etk::Vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(m_mode)] == true) {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " userbuffer size = " << m_userBuffer[audio::orchestra::mode_output].size() << "pointer=" << int64_t(&m_userBuffer[audio::orchestra::mode_output][0]));
doStopStream = m_callback(null,
audio::Time(),
m_userBuffer[audio::orchestra::mode_output],
&m_userBuffer[m_mode][0],
streamTime,
_frameRate,
uint32_t(_nbChunk),
status);
convertBuffer((char*)_data, (char*)m_userBuffer[audio::orchestra::mode_output], m_convertInfo[audio::orchestra::mode_output]);
convertBuffer((char*)_dst, (char*)&m_userBuffer[audio::orchestra::mode_output][0], m_convertInfo[audio::orchestra::mode_output]);
} else {
doStopStream = m_callback(_data,
streamTime,
nullptr,
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " pointer=" << int64_t(_dst));
doStopStream = m_callback(null,
audio::Time(),
_frameRate,
_dst,
streamTime,
uint32_t(_nbChunk),
status);
}
if (doStopStream == 2) {
abortStream();
@ -135,71 +103,88 @@ void audio::orchestra::api::Android::callBackEvent(void* _data,
audio::orchestra::Api::tickStreamTime();
}
void audio::orchestra::api::Android::androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData) {
if (_userData == nullptr) {
ATA_INFO("callback event ... nullptr pointer");
void audio::orchestra::api::Android::record(int16_t* _dst, int32_t _nbChunk) {
int32_t doStopStream = 0;
audio::Time streamTime = getStreamTime();
etk::Vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(m_mode)] == true) {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " userbuffer size = " << m_userBuffer[audio::orchestra::mode_output].size() << "pointer=" << int64_t(&m_userBuffer[audio::orchestra::mode_output][0]));
convertBuffer((char*)&m_userBuffer[audio::orchestra::mode_input][0], (char*)_dst, m_convertInfo[audio::orchestra::mode_input]);
doStopStream = m_callback(&m_userBuffer[m_mode][0],
streamTime,
null,
audio::Time(),
uint32_t(_nbChunk),
status);
} else {
ATA_VERBOSE("Need playback data " << int32_t(_nbChunk) << " pointer=" << int64_t(_dst));
doStopStream = m_callback(_dst,
streamTime,
null,
audio::Time(),
uint32_t(_nbChunk),
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::api::Android* myClass = static_cast<audio::orchestra::api::Android*>(_userData);
myClass->callBackEvent(_data, _frameRate/2);
audio::orchestra::Api::tickStreamTime();
}
bool audio::orchestra::api::Android::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for Android ...");
return false;
}
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
ewol::Context& tmpContext = ewol::getContext();
bool audio::orchestra::api::Android::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool ret = false;
if (_format == SINT8) {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
m_mode = _mode;
m_userFormat = _format;
m_nUserChannels[modeToIdTable(m_mode)] = _channels;
m_uid = audio::orchestra::api::android::open(_device, m_mode, _channels, _firstChannel, _sampleRate, _format, _bufferSize, _options, ememory::staticPointerCast<audio::orchestra::api::Android>(sharedFromThis()));
if (m_uid < 0) {
ret = false;
} else {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
ret = true;
}
m_bufferSize = 256;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
m_doByteSwap[modeToIdTable(m_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to bu update later ...
m_deviceFormat[modeToIdTable(_mode)] = SINT16;
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_deviceFormat[modeToIdTable(m_mode)] = audio::format_int16;
m_nDeviceChannels[modeToIdTable(m_mode)] = 2;
m_deviceInterleaved[modeToIdTable(m_mode)] = true;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
m_doConvertBuffer[modeToIdTable(m_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(m_mode)]) {
m_doConvertBuffer[modeToIdTable(m_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
if (m_nUserChannels[modeToIdTable(m_mode)] < m_nDeviceChannels[modeToIdTable(m_mode)]) {
m_doConvertBuffer[modeToIdTable(m_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
if ( m_deviceInterleaved[modeToIdTable(m_mode)] == false
&& m_nUserChannels[modeToIdTable(m_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(m_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
if (m_doConvertBuffer[modeToIdTable(m_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("audio::orchestra::api::Android::probeDeviceOpen: error allocating user buffer memory.");
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(m_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(m_mode)].resize(bufferBytes);
if (m_userBuffer[modeToIdTable(m_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
setConvertInfo(m_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(m_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(m_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(m_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(m_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}

View File

@ -1,56 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ANDROID_H__) && defined(ORCHESTRA_BUILD_JAVA)
#define __AUDIO_ORCHESTRA_API_ANDROID_H__
namespace audio {
namespace orchestra {
namespace api {
class Android: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
public:
Android();
virtual ~Android();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_java;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
private:
void callBackEvent(void* _data,
int32_t _frameRate);
static void androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData);
};
}
}
}
#endif

View File

@ -0,0 +1,60 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#pragma once
#ifdef ORCHESTRA_BUILD_JAVA
#include <audio/orchestra/Interface.hpp>
namespace audio {
namespace orchestra {
namespace api {
class Android: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Android();
virtual ~Android();
const etk::String& getCurrentApi() {
return audio::orchestra::typeJava;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
int32_t m_uid;
public:
int32_t getUId() {
return m_uid;
}
private:
etk::Vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
public:
void playback(int16_t* _dst, int32_t _nbChunk);
void record(int16_t* _dst, int32_t _nbChunk);
};
}
}
}
#endif

View File

@ -0,0 +1,542 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
extern "C" {
#include <jni.h>
#include <pthread.h>
}
#include <ethread/Mutex.hpp>
#include <audio/orchestra/debug.hpp>
#include <audio/orchestra/error.hpp>
#include <audio/orchestra/api/AndroidNativeInterface.hpp>
#include <audio/orchestra/api/Android.hpp>
/* include auto generated file */
#include <org_musicdsp_orchestra_OrchestraConstants.h>
#include <jvm-basics/jvm-basics.hpp>
#include <ememory/memory.hpp>
#include <ejson/ejson.hpp>
class AndroidOrchestraContext {
public:
// get a resources from the java environement :
JNIEnv* m_JavaVirtualMachinePointer; //!< the JVM
jclass m_javaClassOrchestra; //!< main activity class (android ...)
jclass m_javaClassOrchestraCallback;
jobject m_javaObjectOrchestraCallback;
jmethodID m_javaMethodOrchestraActivityAudioGetDeviceCount;
jmethodID m_javaMethodOrchestraActivityAudioGetDeviceProperty;
jmethodID m_javaMethodOrchestraActivityAudioOpenDeviceInput;
jmethodID m_javaMethodOrchestraActivityAudioOpenDeviceOutput;
jmethodID m_javaMethodOrchestraActivityAudioCloseDevice;
jmethodID m_javaMethodOrchestraActivityAudioStart;
jmethodID m_javaMethodOrchestraActivityAudioStop;
jclass m_javaDefaultClassString; //!< default string class
private:
bool safeInitMethodID(jmethodID& _mid, jclass& _cls, const char* _name, const char* _sign) {
_mid = m_JavaVirtualMachinePointer->GetMethodID(_cls, _name, _sign);
if(_mid == null) {
ATA_ERROR("C->java : Can't find the method " << _name);
/* remove access on the virtual machine : */
m_JavaVirtualMachinePointer = null;
return false;
}
return true;
}
bool java_attach_current_thread(int *_rstatus) {
ATA_DEBUG("C->java : call java");
if (jvm_basics::getJavaVM() == null) {
ATA_ERROR("C->java : JVM not initialised");
m_JavaVirtualMachinePointer = null;
return false;
}
*_rstatus = jvm_basics::getJavaVM()->GetEnv((void **) &m_JavaVirtualMachinePointer, JNI_VERSION_1_6);
if (*_rstatus == JNI_EDETACHED) {
JavaVMAttachArgs lJavaVMAttachArgs;
lJavaVMAttachArgs.version = JNI_VERSION_1_6;
lJavaVMAttachArgs.name = "EwolNativeThread";
lJavaVMAttachArgs.group = null;
int status = jvm_basics::getJavaVM()->AttachCurrentThread(&m_JavaVirtualMachinePointer, &lJavaVMAttachArgs);
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
if (status != JNI_OK) {
ATA_ERROR("C->java : AttachCurrentThread failed : " << status);
m_JavaVirtualMachinePointer = null;
return false;
}
}
return true;
}
void java_detach_current_thread(int _status) {
if(_status == JNI_EDETACHED) {
jvm_basics::getJavaVM()->DetachCurrentThread();
m_JavaVirtualMachinePointer = null;
}
}
public:
AndroidOrchestraContext(JNIEnv* _env, jclass _classBase, jobject _objCallback) :
m_JavaVirtualMachinePointer(null),
m_javaClassOrchestra(0),
m_javaClassOrchestraCallback(0),
m_javaObjectOrchestraCallback(0),
m_javaMethodOrchestraActivityAudioGetDeviceCount(0),
m_javaMethodOrchestraActivityAudioGetDeviceProperty(0),
m_javaMethodOrchestraActivityAudioOpenDeviceInput(0),
m_javaMethodOrchestraActivityAudioOpenDeviceOutput(0),
m_javaMethodOrchestraActivityAudioCloseDevice(0),
m_javaMethodOrchestraActivityAudioStart(0),
m_javaMethodOrchestraActivityAudioStop(0),
m_javaDefaultClassString(0) {
ATA_DEBUG("*******************************************");
ATA_DEBUG("** set JVM Pointer (orchestra) **");
ATA_DEBUG("*******************************************");
m_JavaVirtualMachinePointer = _env;
// get default needed all time elements :
if (m_JavaVirtualMachinePointer == null) {
ATA_ERROR("C->java: NULLPTR jvm interface");
return;
}
ATA_DEBUG("C->java: try load org/musicdsp/orchestra/OrchestraNative class");
m_javaClassOrchestra = m_JavaVirtualMachinePointer->FindClass("org/musicdsp/orchestra/OrchestraNative" );
if (m_javaClassOrchestra == 0) {
ATA_ERROR("C->java : Can't find org/musicdsp/orchestra/OrchestraNative class");
// remove access on the virtual machine :
m_JavaVirtualMachinePointer = null;
return;
}
/* The object field extends Activity and implement OrchestraCallback */
m_javaClassOrchestraCallback = m_JavaVirtualMachinePointer->GetObjectClass(_objCallback);
if(m_javaClassOrchestraCallback == null) {
ATA_ERROR("C->java : Can't find org/musicdsp/orchestra/OrchestraManagerCallback class");
// remove access on the virtual machine :
m_JavaVirtualMachinePointer = null;
return;
}
bool functionCallbackIsMissing = false;
bool ret= false;
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioGetDeviceCount,
m_javaClassOrchestraCallback,
"getDeviceCount",
"()I");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : getDeviceCount");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioGetDeviceProperty,
m_javaClassOrchestraCallback,
"getDeviceProperty",
"(I)Ljava/lang/String;");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : getDeviceProperty");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioOpenDeviceInput,
m_javaClassOrchestraCallback,
"openDeviceInput",
"(IIII)I");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : openDeviceInput");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioOpenDeviceOutput,
m_javaClassOrchestraCallback,
"openDeviceOutput",
"(IIII)I");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : openDeviceOutput");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioCloseDevice,
m_javaClassOrchestraCallback,
"closeDevice",
"(I)Z");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : closeDevice");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioStart,
m_javaClassOrchestraCallback,
"start",
"(I)Z");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : start");
functionCallbackIsMissing = true;
}
ret = safeInitMethodID(m_javaMethodOrchestraActivityAudioStop,
m_javaClassOrchestraCallback,
"stop",
"(I)Z");
if (ret == false) {
jvm_basics::checkExceptionJavaVM(_env);
ATA_ERROR("system can not start without function : stop");
functionCallbackIsMissing = true;
}
m_javaObjectOrchestraCallback = _env->NewGlobalRef(_objCallback);
if (m_javaObjectOrchestraCallback == null) {
functionCallbackIsMissing = true;
}
m_javaDefaultClassString = m_JavaVirtualMachinePointer->FindClass("java/lang/String" );
if (m_javaDefaultClassString == 0) {
ATA_ERROR("C->java : Can't find java/lang/String" );
// remove access on the virtual machine :
m_JavaVirtualMachinePointer = null;
functionCallbackIsMissing = true;
}
if (functionCallbackIsMissing == true) {
ATA_CRITICAL(" mission one function ==> system can not work withut it...");
}
}
~AndroidOrchestraContext() {
// TODO ...
}
void unInit(JNIEnv* _env) {
_env->DeleteGlobalRef(m_javaObjectOrchestraCallback);
m_javaObjectOrchestraCallback = null;
}
uint32_t getDeviceCount() {
// Request the clipBoard :
ATA_WARNING("C->java : audio get device count");
int status;
if(!java_attach_current_thread(&status)) {
return 0;
}
ATA_DEBUG("Call CallIntMethod ...");
//Call java ...
jint ret = m_JavaVirtualMachinePointer->CallIntMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioGetDeviceCount);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
ATA_WARNING(" find " << (uint32_t)ret << " IO");
return (uint32_t)ret;
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _idDevice) {
audio::orchestra::DeviceInfo info;
// Request the clipBoard :
ATA_WARNING("C->java : audio get device info " << _idDevice);
int status;
if(!java_attach_current_thread(&status)) {
return info;
}
//Call java ...
jstring returnString = (jstring) m_JavaVirtualMachinePointer->CallObjectMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioGetDeviceProperty, _idDevice);
const char *js = m_JavaVirtualMachinePointer->GetStringUTFChars(returnString, null);
etk::String retString(js);
m_JavaVirtualMachinePointer->ReleaseStringUTFChars(returnString, js);
//m_JavaVirtualMachinePointer->DeleteLocalRef(returnString);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
ATA_WARNING("get device information : " << retString);
ejson::Document doc;
if (doc.parse(retString) == false) {
return info;
}
info.name = doc["name"].toString().get("no-name");
if (doc["type"].toString().get("output") == "output") {
info.input = false;
} else {
info.input = true;
}
ejson::Array list = doc["sample-rate"].toArray();
if (list.exist() == true) {
for (auto it : list) {
info.sampleRates.pushBack(int32_t(it.toNumber().get(48000)));
}
}
list = doc["channels"].toArray();
if (list.exist() == true) {
for (auto it : list) {
info.channels.pushBack(audio::getChannelFromString(it.toString().get("???")));
}
}
list = doc["format"].toArray();
if (list.exist() == true) {
for (auto it : list) {
info.nativeFormats.pushBack(audio::getFormatFromString(it.toString().get("???")));
}
}
info.isDefault = doc["default"].toBoolean().get(false);
info.isCorrect = true;
return info;
}
private:
etk::Vector<ememory::WeakPtr<audio::orchestra::api::Android> > m_instanceList; // list of connected handle ...
//AndroidAudioCallback m_audioCallBack;
//void* m_audioCallBackUserData;
public:
int32_t open(uint32_t _idDevice,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options,
ememory::SharedPtr<audio::orchestra::api::Android> _instance) {
ATA_DEBUG("C->java : audio open device");
int status;
if(!java_attach_current_thread(&status)) {
return -1;
}
//Call java ...
jint ret = false;
if (_mode == audio::orchestra::mode_output) {
ret = m_JavaVirtualMachinePointer->CallIntMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioOpenDeviceOutput, _idDevice, _sampleRate, _channels, /*_format*/ 1);
} else {
ret = m_JavaVirtualMachinePointer->CallIntMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioOpenDeviceInput, _idDevice, _sampleRate, _channels, /*_format*/ 1);
}
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (int32_t(ret) >= 0) {
m_instanceList.pushBack(_instance);
return int32_t(ret);
}
return -1;
}
public:
enum audio::orchestra::error closeStream(int32_t _id) {
ATA_DEBUG("C->java : audio close device");
int status;
if(!java_attach_current_thread(&status)) {
return audio::orchestra::error_fail;
}
//Call java ...
jboolean ret = m_JavaVirtualMachinePointer->CallBooleanMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioCloseDevice, _id);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (bool(ret) == false) {
return audio::orchestra::error_fail;
}
return audio::orchestra::error_none;
}
enum audio::orchestra::error startStream(int32_t _id) {
ATA_DEBUG("C->java : audio start device");
int status;
if(!java_attach_current_thread(&status)) {
return audio::orchestra::error_fail;
}
//Call java ...
jboolean ret = m_JavaVirtualMachinePointer->CallBooleanMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioStart, _id);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (bool(ret) == false) {
return audio::orchestra::error_fail;
}
return audio::orchestra::error_none;
}
enum audio::orchestra::error stopStream(int32_t _id) {
ATA_DEBUG("C->java : audio close device");
int status;
if(!java_attach_current_thread(&status)) {
return audio::orchestra::error_fail;
}
//Call java ...
jboolean ret = m_JavaVirtualMachinePointer->CallBooleanMethod(m_javaObjectOrchestraCallback, m_javaMethodOrchestraActivityAudioStop, _id);
// manage execption :
jvm_basics::checkExceptionJavaVM(m_JavaVirtualMachinePointer);
java_detach_current_thread(status);
if (bool(ret) == false) {
return audio::orchestra::error_fail;
}
return audio::orchestra::error_none;
}
enum audio::orchestra::error abortStream(int32_t _id) {
return audio::orchestra::error_fail;
}
void playback(int32_t _id, int16_t* _dst, int32_t _nbChunk) {
auto it = m_instanceList.begin();
while (it != m_instanceList.end()) {
auto elem = it->lock();
if (elem == null) {
it = m_instanceList.erase(it);
continue;
}
if (elem->getUId() == _id) {
elem->playback(_dst, _nbChunk);
}
++it;
}
}
void record(int32_t _id, int16_t* _dst, int32_t _nbChunk) {
auto it = m_instanceList.begin();
while (it != m_instanceList.end()) {
auto elem = it->lock();
if (elem == null) {
it = m_instanceList.erase(it);
continue;
}
if (elem->getUId() == _id) {
elem->record(_dst, _nbChunk);
}
++it;
}
}
};
static ememory::SharedPtr<AndroidOrchestraContext> s_localContext;
static int32_t s_nbContextRequested(0);
uint32_t audio::orchestra::api::android::getDeviceCount() {
if (s_localContext == null) {
ATA_ERROR("Have no Orchertra API instanciate in JAVA ...");
return 0;
}
return s_localContext->getDeviceCount();
}
audio::orchestra::DeviceInfo audio::orchestra::api::android::getDeviceInfo(uint32_t _device) {
if (s_localContext == null) {
return audio::orchestra::DeviceInfo();
}
return s_localContext->getDeviceInfo(_device);
}
int32_t audio::orchestra::api::android::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options,
ememory::SharedPtr<audio::orchestra::api::Android> _instance) {
if (s_localContext == null) {
return -1;
}
return s_localContext->open(_device, _mode, _channels, _firstChannel, _sampleRate, _format, _bufferSize, _options, _instance);
}
enum audio::orchestra::error audio::orchestra::api::android::closeStream(int32_t _id) {
if (s_localContext == null) {
return audio::orchestra::error_fail;
}
return s_localContext->closeStream(_id);
}
enum audio::orchestra::error audio::orchestra::api::android::startStream(int32_t _id) {
if (s_localContext == null) {
return audio::orchestra::error_fail;
}
return s_localContext->startStream(_id);
}
enum audio::orchestra::error audio::orchestra::api::android::stopStream(int32_t _id) {
if (s_localContext == null) {
return audio::orchestra::error_fail;
}
return s_localContext->stopStream(_id);
}
enum audio::orchestra::error audio::orchestra::api::android::abortStream(int32_t _id) {
if (s_localContext == null) {
return audio::orchestra::error_fail;
}
return s_localContext->abortStream(_id);
}
extern "C" {
void Java_org_musicdsp_orchestra_OrchestraNative_NNsetJavaManager(JNIEnv* _env,
jclass _classBase,
jobject _objCallback) {
ethread::UniqueLock lock(jvm_basics::getMutexJavaVM());
ATA_INFO("*******************************************");
ATA_INFO("** Creating Orchestra context **");
ATA_INFO("*******************************************");
if (s_localContext != null) {
s_nbContextRequested++;
}
s_localContext = ememory::makeShared<AndroidOrchestraContext>(_env, _classBase, _objCallback);
if (s_localContext == null) {
ATA_ERROR("Can not allocate the orchestra main context instance");
return;
}
s_nbContextRequested++;
}
void Java_org_musicdsp_orchestra_OrchestraNative_NNsetJavaManagerRemove(JNIEnv* _env, jclass _cls) {
ethread::UniqueLock lock(jvm_basics::getMutexJavaVM());
ATA_INFO("*******************************************");
ATA_INFO("** remove Orchestra Pointer **");
ATA_INFO("*******************************************");
if (s_nbContextRequested == 0) {
ATA_ERROR("Request remove orchestra interface from Android, but no more interface availlable");
return;
}
s_nbContextRequested--;
if (s_nbContextRequested == 0) {
s_localContext.reset();
}
}
void Java_org_musicdsp_orchestra_OrchestraNative_NNPlayback(JNIEnv* _env,
void* _reserved,
jint _id,
jshortArray _location,
jint _nbChunk) {
ethread::UniqueLock lock(jvm_basics::getMutexJavaVM());
if (s_localContext == null) {
ATA_ERROR("Call audio with no more Low level interface");
return;
}
// get the short* pointer from the Java array
jboolean isCopy;
jshort* dst = _env->GetShortArrayElements(_location, &isCopy);
if (dst != null) {
//ATA_INFO("Need audioData " << int32_t(_nbChunk));
s_localContext->playback(int32_t(_id), static_cast<short*>(dst), int32_t(_nbChunk));
}
// TODO : Understand why it did not work corectly ...
//if (isCopy == JNI_TRUE) {
// release the short* pointer
_env->ReleaseShortArrayElements(_location, dst, 0);
//}
}
void Java_org_musicdsp_orchestra_OrchestraNative_NNRecord(JNIEnv* _env,
void* _reserved,
jint _id,
jshortArray _location,
jint _nbChunk) {
ethread::UniqueLock lock(jvm_basics::getMutexJavaVM());
if (s_localContext == null) {
ATA_ERROR("Call audio with no more Low level interface");
return;
}
// get the short* pointer from the Java array
jboolean isCopy;
jshort* dst = _env->GetShortArrayElements(_location, &isCopy);
if (dst != null) {
//ATA_INFO("Need audioData " << int32_t(_nbChunk));
s_localContext->record(int32_t(_id), static_cast<short*>(dst), int32_t(_nbChunk));
}
// TODO : Understand why it did not work corectly ...
//if (isCopy == JNI_TRUE) {
// release the short* pointer
_env->ReleaseShortArrayElements(_location, dst, 0);
//}
}
}

View File

@ -0,0 +1,42 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#pragma once
#ifdef ORCHESTRA_BUILD_JAVA
#include <audio/orchestra/DeviceInfo.hpp>
#include <audio/orchestra/mode.hpp>
#include <audio/orchestra/error.hpp>
#include <audio/orchestra/StreamOptions.hpp>
#include <audio/format.hpp>
#include <ememory/memory.hpp>
namespace audio {
namespace orchestra {
namespace api {
class Android;
namespace android {
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
int32_t open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options,
ememory::SharedPtr<audio::orchestra::api::Android> _instance);
enum audio::orchestra::error closeStream(int32_t _id);
enum audio::orchestra::error startStream(int32_t _id);
enum audio::orchestra::error stopStream(int32_t _id);
enum audio::orchestra::error abortStream(int32_t _id);
}
}
}
}
#endif

View File

@ -8,11 +8,11 @@
#if defined(ORCHESTRA_BUILD_ASIO)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
audio::orchestra::Api* audio::orchestra::api::Asio::create() {
return new audio::orchestra::api::Asio();
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Asio::create() {
return ememory::SharedPtr<audio::orchestra::api::Asio>(ETK_NEW(audio::orchestra::api::Asio));
}
@ -32,15 +32,13 @@ audio::orchestra::Api* audio::orchestra::api::Asio::create() {
// on information found in
// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
#include "asiosys.h"
#include "asio.h"
#include "iasiothiscallresolver.h"
#include "asiodrivers.h"
#include <cmath>
#undef __class__
#define __class__ "api::Asio"
extern "C" {
#include "asiosys.h"
#include "asio.h"
#include "iasiothiscallresolver.h"
#include "asiodrivers.h"
#include <math.h>
}
static AsioDrivers drivers;
static ASIOCallbacks asioCallbacks;
static ASIODriverInfo driverInfo;
@ -73,12 +71,12 @@ static void sampleRateChanged(ASIOSampleRate _sRate);
static long asioMessages(long _selector, long _value, void* _message, double* _opt);
audio::orchestra::api::Asio::Asio() :
m_private(new audio::orchestra::api::AsioPrivate()) {
m_private(ETK_NEW(audio::orchestra::api::AsioPrivate)) {
// ASIO cannot run on a multi-threaded appartment. You can call
// CoInitialize beforehand, but it must be for appartment threading
// (in which case, CoInitilialize will return S_FALSE here).
m_coInitialized = false;
HRESULT hr = CoInitialize(nullptr);
HRESULT hr = CoInitialize(null);
if (FAILED(hr)) {
ATA_ERROR("requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)");
}
@ -90,7 +88,7 @@ audio::orchestra::api::Asio::Asio() :
}
audio::orchestra::api::Asio::~Asio() {
if (m_state != audio::orchestra::state_closed) {
if (m_state != audio::orchestra::state::closed) {
closeStream();
}
if (m_coInitialized) {
@ -116,7 +114,7 @@ rtaudio::DeviceInfo audio::orchestra::api::Asio::getDeviceInfo(uint32_t _device)
return info;
}
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.
if (m_state != audio::orchestra::state_closed) {
if (m_state != audio::orchestra::state::closed) {
if (_device >= m_devices.size()) {
ATA_ERROR("device ID was not present before stream was opened.");
return info;
@ -157,7 +155,7 @@ rtaudio::DeviceInfo audio::orchestra::api::Asio::getDeviceInfo(uint32_t _device)
for (uint32_t i=0; i<MAX_SAMPLE_RATES; i++) {
result = ASIOCanSampleRate((ASIOSampleRate) SAMPLE_RATES[i]);
if (result == ASE_OK) {
info.sampleRates.push_back(SAMPLE_RATES[i]);
info.sampleRates.pushBack(SAMPLE_RATES[i]);
}
}
// Determine supported data types ... just check first channel and assume rest are the same.
@ -176,19 +174,19 @@ rtaudio::DeviceInfo audio::orchestra::api::Asio::getDeviceInfo(uint32_t _device)
info.nativeFormats.clear();
if ( channelInfo.type == ASIOSTInt16MSB
|| channelInfo.type == ASIOSTInt16LSB) {
info.nativeFormats.push_back(audio::format_int16);
info.nativeFormats.pushBack(audio::format_int16);
} else if ( channelInfo.type == ASIOSTInt32MSB
|| channelInfo.type == ASIOSTInt32LSB) {
info.nativeFormats.push_back(audio::format_int32);
info.nativeFormats.pushBack(audio::format_int32);
} else if ( channelInfo.type == ASIOSTFloat32MSB
|| channelInfo.type == ASIOSTFloat32LSB) {
info.nativeFormats.push_back(audio::format_float);
info.nativeFormats.pushBack(audio::format_float);
} else if ( channelInfo.type == ASIOSTFloat64MSB
|| channelInfo.type == ASIOSTFloat64LSB) {
info.nativeFormats.push_back(audio::format_double);
info.nativeFormats.pushBack(audio::format_double);
} else if ( channelInfo.type == ASIOSTInt24MSB
|| channelInfo.type == ASIOSTInt24LSB) {
info.nativeFormats.push_back(audio::format_int24);
info.nativeFormats.pushBack(audio::format_int24);
}
if (info.outputChannels > 0){
if (getDefaultOutputDevice() == _device) {
@ -219,14 +217,14 @@ void audio::orchestra::api::Asio::saveDeviceInfo() {
}
}
bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool audio::orchestra::api::Asio::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// For ASIO, a duplex stream MUST use the same driver.
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output
@ -381,10 +379,10 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
log2_of_max_size = i;
}
}
long min_delta = std::abs((long)*_bufferSize - ((long)1 << log2_of_min_size));
long min_delta = etk::abs((long)*_bufferSize - ((long)1 << log2_of_min_size));
int32_t min_delta_num = log2_of_min_size;
for (int32_t i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
long current_delta = std::abs((long)*_bufferSize - ((long)1 << i));
long current_delta = etk::abs((long)*_bufferSize - ((long)1 << i));
if (current_delta < min_delta) {
min_delta = current_delta;
min_delta_num = i;
@ -413,10 +411,10 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
m_deviceInterleaved[modeToIdTable(_mode)] = false;
m_private->bufferInfos = 0;
// Create a manual-reset event.
m_private->condition = CreateEvent(nullptr, // no security
m_private->condition = CreateEvent(null, // no security
TRUE, // manual-reset
FALSE, // non-signaled initially
nullptr); // unnamed
null); // unnamed
// Create the ASIO internal buffers. Since RtAudio sets up input
// and output separately, we'll have to dispose of previously
// created output buffers for a duplex stream.
@ -424,16 +422,16 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output) {
ASIODisposeBuffers();
if (m_private->bufferInfos == nullptr) {
if (m_private->bufferInfos == null) {
free(m_private->bufferInfos);
m_private->bufferInfos = nullptr;
m_private->bufferInfos = null;
}
}
// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
bool buffersAllocated = false;
uint32_t i, nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1];
m_private->bufferInfos = (ASIOBufferInfo *) malloc(nChannels * sizeof(ASIOBufferInfo));
if (m_private->bufferInfos == nullptr) {
if (m_private->bufferInfos == null) {
ATA_ERROR("error allocating bufferInfo memory for driver (" << driverName << ").");
goto error;
}
@ -453,7 +451,7 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
asioCallbacks.bufferSwitch = &bufferSwitch;
asioCallbacks.sampleRateDidChange = &sampleRateChanged;
asioCallbacks.asioMessage = &asioMessages;
asioCallbacks.bufferSwitchTimeInfo = nullptr;
asioCallbacks.bufferSwitchTimeInfo = null;
result = ASIOCreateBuffers(m_private->bufferInfos, nChannels, m_bufferSize, &asioCallbacks);
if (result != ASE_OK) {
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") creating buffers.");
@ -473,7 +471,7 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
if (m_userBuffer[modeToIdTable(_mode)] == null) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
@ -492,10 +490,10 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
m_deviceBuffer = null;
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
if (m_deviceBuffer == null) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
@ -503,7 +501,7 @@ bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
}
m_sampleRate = _sampleRate;
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
if ( _mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up an output stream.
@ -532,9 +530,9 @@ error:
}
drivers.removeCurrentDriver();
CloseHandle(m_private->condition);
if (m_private->bufferInfos != nullptr) {
if (m_private->bufferInfos != null) {
free(m_private->bufferInfos);
m_private->bufferInfos = nullptr;
m_private->bufferInfos = null;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
@ -550,12 +548,12 @@ error:
}
enum audio::orchestra::error audio::orchestra::api::Asio::closeStream() {
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_state == audio::orchestra::state_running) {
m_state = audio::orchestra::state_stopped;
if (m_state == audio::orchestra::state::running) {
m_state = audio::orchestra::state::stopped;
ASIOStop();
}
ASIODisposeBuffers();
@ -575,7 +573,7 @@ enum audio::orchestra::error audio::orchestra::api::Asio::closeStream() {
m_deviceBuffer = 0;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
return audio::orchestra::error_none;
}
@ -587,7 +585,7 @@ enum audio::orchestra::error audio::orchestra::api::Asio::startStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
if (m_state == audio::orchestra::state::running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
@ -599,7 +597,7 @@ enum audio::orchestra::error audio::orchestra::api::Asio::startStream() {
m_private->drainCounter = 0;
m_private->internalDrain = false;
ResetEvent(m_private->condition);
m_state = audio::orchestra::state_running;
m_state = audio::orchestra::state::running;
asioXRun = false;
unlock:
stopThreadCalled = false;
@ -613,7 +611,7 @@ enum audio::orchestra::error audio::orchestra::api::Asio::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
@ -623,7 +621,7 @@ enum audio::orchestra::error audio::orchestra::api::Asio::stopStream() {
WaitForSingleObject(m_private->condition, INFINITE); // block until signaled
}
}
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
ASIOError result = ASIOStop();
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device.");
@ -638,7 +636,7 @@ enum audio::orchestra::error audio::orchestra::api::Asio::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
error(audio::orchestra::error_warning);
return;
@ -666,23 +664,23 @@ static unsigned __stdcall asioStopStream(void *_ptr) {
}
bool audio::orchestra::api::Asio::callbackEvent(long bufferIndex) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
if ( m_state == audio::orchestra::state::stopped
|| m_state == audio::orchestra::state::stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
CallbackInfo *info = (CallbackInfo *) &m_callbackInfo;
// Check if we were draining the stream and signal if finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
m_state = audio::orchestra::state::stopping;
if (m_private->internalDrain == false) {
SetEvent(m_private->condition);
} else { // spawn a thread to stop the stream
unsigned threadId;
m_callbackInfo.thread = _beginthreadex(nullptr,
m_callbackInfo.thread = _beginthreadex(null,
0,
&asioStopStream,
&m_callbackInfo,
@ -695,13 +693,13 @@ bool audio::orchestra::api::Asio::callbackEvent(long bufferIndex) {
// draining stream.
if (m_private->drainCounter == 0) {
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status status;
etk::Vector<enum audio::orchestra::status status;
if (m_mode != audio::orchestra::mode_input && asioXRun == true) {
status.push_back(audio::orchestra::status_underflow);
status.pushBack(audio::orchestra::status::underflow);
asioXRun = false;
}
if (m_mode != audio::orchestra::mode_output && asioXRun == true) {
status.push_back(audio::orchestra::status_underflow;
status.pushBack(audio::orchestra::status::underflow;
asioXRun = false;
}
int32_t cbReturnValue = info->callback(m_userBuffer[1],
@ -711,10 +709,10 @@ bool audio::orchestra::api::Asio::callbackEvent(long bufferIndex) {
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_state = audio::orchestra::state::stopping;
m_private->drainCounter = 2;
unsigned threadId;
m_callbackInfo.thread = _beginthreadex(nullptr,
m_callbackInfo.thread = _beginthreadex(null,
0,
&asioStopStream,
&m_callbackInfo,

View File

@ -4,9 +4,8 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ASIO_H__) && defined(ORCHESTRA_BUILD_ASIO)
#define __AUDIO_ORCHESTRA_API_ASIO_H__
#pragma once
#ifdef ORCHESTRA_BUILD_ASIO
namespace audio {
namespace orchestra {
@ -14,12 +13,12 @@ namespace audio {
class AsioPrivate:
class Asio: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Asio();
virtual ~Asio();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::WINDOWS_ASIO;
const etk::String& getCurrentApi() {
return audio::orchestra::typeAsio;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
@ -34,18 +33,18 @@ namespace audio {
// will most likely produce highly undesireable results!
bool callbackEvent(long _bufferIndex);
private:
std::shared_ptr<AsioPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
ememory::SharedPtr<AsioPrivate> m_private;
etk::Vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool m_coInitialized;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}

View File

@ -14,18 +14,16 @@
#if defined(__MACOSX_CORE__) || defined(ORCHESTRA_BUILD_MACOSX_CORE)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <etk/thread.h>
#include <etk/thread/tools.h>
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
#include <ethread/Thread.hpp>
#include <ethread/tools.hpp>
#include <audio/orchestra/api/Core.hpp>
audio::orchestra::Api* audio::orchestra::api::Core::create() {
return new audio::orchestra::api::Core();
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Core::create() {
return ememory::SharedPtr<audio::orchestra::api::Core>(ETK_NEW(audio::orchestra::api::Core));
}
#undef __class__
#define __class__ "api::Core"
namespace audio {
namespace orchestra {
namespace api {
@ -39,7 +37,7 @@ namespace audio {
uint32_t nStreams[2]; // number of streams to use
bool xrun[2];
char *deviceBuffer;
std11::condition_variable condition;
ethread::Semaphore m_semaphore;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
CorePrivate() :
@ -59,13 +57,13 @@ namespace audio {
}
audio::orchestra::api::Core::Core() :
m_private(new audio::orchestra::api::CorePrivate()) {
m_private(ETK_NEW(audio::orchestra::api::CorePrivate)) {
#if defined(AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER)
// This is a largely undocumented but absolutely necessary
// requirement starting with OS-X 10.6. If not called, queries and
// updates to various audio device properties are not handled
// correctly.
CFRunLoopRef theRunLoop = nullptr;
CFRunLoopRef theRunLoop = null;
AudioObjectPropertyAddress property = {
kAudioHardwarePropertyRunLoop,
kAudioObjectPropertyScopeGlobal,
@ -74,7 +72,7 @@ audio::orchestra::api::Core::Core() :
OSStatus result = AudioObjectSetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
sizeof(CFRunLoopRef),
&theRunLoop);
if (result != noErr) {
@ -87,7 +85,7 @@ audio::orchestra::api::Core::~Core() {
// The subclass destructor gets called before the base class
// destructor, so close an existing stream before deallocating
// apiDeviceId memory.
if (m_state != audio::orchestra::state_closed) {
if (m_state != audio::orchestra::state::closed) {
closeStream();
}
}
@ -100,12 +98,12 @@ uint32_t audio::orchestra::api::Core::getDeviceCount() {
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
OSStatus result = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, nullptr, &dataSize);
OSStatus result = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &propertyAddress, 0, null, &dataSize);
if (result != noErr) {
ATA_ERROR("OS-X error getting device info!");
return 0;
}
return dataSize / sizeof(AudioDeviceID);
return (dataSize / sizeof(AudioDeviceID)) * 2;
}
uint32_t audio::orchestra::api::Core::getDefaultInputDevice() {
@ -123,7 +121,7 @@ uint32_t audio::orchestra::api::Core::getDefaultInputDevice() {
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
&dataSize,
&id);
if (result != noErr) {
@ -136,7 +134,7 @@ uint32_t audio::orchestra::api::Core::getDefaultInputDevice() {
result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
&dataSize,
(void*)&deviceList);
if (result != noErr) {
@ -145,7 +143,7 @@ uint32_t audio::orchestra::api::Core::getDefaultInputDevice() {
}
for (uint32_t iii=0; iii<nDevices; iii++) {
if (id == deviceList[iii]) {
return iii;
return iii*2+1;
}
}
ATA_ERROR("No default device found!");
@ -167,7 +165,7 @@ uint32_t audio::orchestra::api::Core::getDefaultOutputDevice() {
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
&dataSize,
&id);
if (result != noErr) {
@ -180,7 +178,7 @@ uint32_t audio::orchestra::api::Core::getDefaultOutputDevice() {
result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
&dataSize,
(void*)&deviceList);
if (result != noErr) {
@ -189,7 +187,7 @@ uint32_t audio::orchestra::api::Core::getDefaultOutputDevice() {
}
for (uint32_t iii=0; iii<nDevices; iii++) {
if (id == deviceList[iii]) {
return iii;
return iii*2;
}
}
ATA_ERROR("No default device found!");
@ -198,19 +196,25 @@ uint32_t audio::orchestra::api::Core::getDefaultOutputDevice() {
audio::orchestra::DeviceInfo audio::orchestra::api::Core::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = false;
// Get device ID
uint32_t nDevices = getDeviceCount();
if (nDevices == 0) {
ATA_ERROR("no devices found!");
info.clear();
return info;
}
if (_device >= nDevices) {
ATA_ERROR("device ID is invalid!");
info.clear();
return info;
}
AudioDeviceID deviceList[ nDevices ];
uint32_t dataSize = sizeof(AudioDeviceID) * nDevices;
info.input = false;
if (_device%2 == 1) {
info.input = true;
}
// The /2 corespond of not mixing input and output ... ==< then the user number of devide is twice the number of real device ...
AudioDeviceID deviceList[nDevices/2];
uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2;
AudioObjectPropertyAddress property = {
kAudioHardwarePropertyDevices,
kAudioObjectPropertyScopeGlobal,
@ -219,125 +223,112 @@ audio::orchestra::DeviceInfo audio::orchestra::api::Core::getDeviceInfo(uint32_t
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
&dataSize,
(void*)&deviceList);
if (result != noErr) {
ATA_ERROR("OS-X system error getting device IDs.");
info.clear();
return info;
}
AudioDeviceID id = deviceList[ _device ];
AudioDeviceID id = deviceList[ _device/2 ];
// ------------------------------------------------
// Get the device name.
// ------------------------------------------------
info.name.erase();
CFStringRef cfname;
dataSize = sizeof(CFStringRef);
property.mSelector = kAudioObjectPropertyManufacturer;
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &cfname);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &cfname);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting device manufacturer.");
info.clear();
return info;
}
//const char *mname = CFStringGetCStringPtr(cfname, CFStringGetSystemEncoding());
int32_t length = CFStringGetLength(cfname);
char *mname = (char *)malloc(length * 3 + 1);
CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
info.name.append((const char *)mname, strlen(mname));
etk::Vector<char> name;
name.resize(length * 3 + 1, '\0');
CFStringGetCString(cfname, &name[0], length * 3 + 1, CFStringGetSystemEncoding());
info.name.append(&name[0], strlen(&name[0]));
info.name.append(": ");
CFRelease(cfname);
free(mname);
property.mSelector = kAudioObjectPropertyName;
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &cfname);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &cfname);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting device name.");
info.clear();
return info;
}
//const char *name = CFStringGetCStringPtr(cfname, CFStringGetSystemEncoding());
length = CFStringGetLength(cfname);
char *name = (char *)malloc(length * 3 + 1);
CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
info.name.append((const char *)name, strlen(name));
name.resize(length * 3 + 1, '\0');
CFStringGetCString(cfname, &name[0], length * 3 + 1, CFStringGetSystemEncoding());
info.name.append(&name[0], strlen(&name[0]));
CFRelease(cfname);
free(name);
// ------------------------------------------------
// Get the output stream "configuration".
AudioBufferList *bufferList = nil;
// ------------------------------------------------
property.mSelector = kAudioDevicePropertyStreamConfiguration;
property.mScope = kAudioDevicePropertyScopeOutput;
// property.mElement = kAudioObjectPropertyElementWildcard;
if (info.input == false) {
property.mScope = kAudioDevicePropertyScopeOutput;
} else {
property.mScope = kAudioDevicePropertyScopeInput;
}
AudioBufferList *bufferList = null;
dataSize = 0;
result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize);
result = AudioObjectGetPropertyDataSize(id, &property, 0, null, &dataSize);
if (result != noErr || dataSize == 0) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting output stream configuration info for device (" << _device << ").");
ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ").");
info.clear();
return info;
}
// Allocate the AudioBufferList.
bufferList = (AudioBufferList *) malloc(dataSize);
if (bufferList == nullptr) {
ATA_ERROR("memory error allocating output AudioBufferList.");
if (bufferList == null) {
ATA_ERROR("memory error allocating AudioBufferList.");
info.clear();
return info;
}
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, bufferList);
if ( result != noErr
|| dataSize == 0) {
free(bufferList);
ATA_ERROR("system error (" << getErrorCode(result) << ") getting output stream configuration for device (" << _device << ").");
ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ").");
info.clear();
return info;
}
// Get output channel information.
uint32_t i, nStreams = bufferList->mNumberBuffers;
for (i=0; i<nStreams; i++) {
info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
// Get channel information.
for (size_t iii=0; iii<bufferList->mNumberBuffers; ++iii) {
for (size_t jjj=0; jjj<bufferList->mBuffers[iii].mNumberChannels; ++jjj) {
info.channels.pushBack(audio::channel_unknow);
}
}
free(bufferList);
// Get the input stream "configuration".
property.mScope = kAudioDevicePropertyScopeInput;
result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize);
if ( result != noErr
|| dataSize == 0) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting input stream configuration info for device (" << _device << ").");
if (info.channels.size() == 0) {
ATA_DEBUG("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ") ==> no channels.");
info.clear();
return info;
}
// Allocate the AudioBufferList.
bufferList = (AudioBufferList *) malloc(dataSize);
if (bufferList == nullptr) {
ATA_ERROR("memory error allocating input AudioBufferList.");
return info;
}
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList);
if (result != noErr || dataSize == 0) {
free(bufferList);
ATA_ERROR("system error (" << getErrorCode(result) << ") getting input stream configuration for device (" << _device << ").");
return info;
}
// Get input channel information.
nStreams = bufferList->mNumberBuffers;
for (i=0; i<nStreams; i++) {
info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
}
free(bufferList);
// If device opens for both playback and capture, we determine the channels.
if ( info.outputChannels > 0
&& info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Probe the device sample rates.
bool isInput = false;
if (info.outputChannels == 0) {
isInput = true;
}
// ------------------------------------------------
// Determine the supported sample rates.
// ------------------------------------------------
property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
if (isInput == false) property.mScope = kAudioDevicePropertyScopeOutput;
result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize);
result = AudioObjectGetPropertyDataSize(id, &property, 0, null, &dataSize);
if ( result != kAudioHardwareNoError
|| dataSize == 0) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting sample rate info.");
info.clear();
return info;
}
uint32_t nRanges = dataSize / sizeof(AudioValueRange);
AudioValueRange rangeList[ nRanges ];
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &rangeList);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &rangeList);
if (result != kAudioHardwareNoError) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting sample rates.");
info.clear();
return info;
}
double minimumRate = 100000000.0, maximumRate = 0.0;
@ -353,45 +344,52 @@ audio::orchestra::DeviceInfo audio::orchestra::api::Core::getDeviceInfo(uint32_t
for (auto &it : audio::orchestra::genericSampleRate()) {
if ( it >= minimumRate
&& it <= maximumRate) {
info.sampleRates.push_back(it);
info.sampleRates.pushBack(it);
}
}
if (info.sampleRates.size() == 0) {
ATA_ERROR("No supported sample rates found for device (" << _device << ").");
info.clear();
return info;
}
// ------------------------------------------------
// Determine the format.
// ------------------------------------------------
// CoreAudio always uses 32-bit floating point data for PCM streams.
// Thus, any other "physical" formats supported by the device are of
// no interest to the client.
info.nativeFormats.push_back(audio::format_float);
if (info.outputChannels > 0) {
info.nativeFormats.pushBack(audio::format_float);
// ------------------------------------------------
// Determine the default channel.
// ------------------------------------------------
if (info.input == false) {
if (getDefaultOutputDevice() == _device) {
info.isDefaultOutput = true;
info.isDefault = true;
}
}
if (info.inputChannels > 0) {
} else {
if (getDefaultInputDevice() == _device) {
info.isDefaultInput = true;
info.isDefault = true;
}
}
info.probed = true;
info.isCorrect = true;
return info;
}
OSStatus audio::orchestra::api::Core::callbackEvent(AudioDeviceID _inDevice,
const AudioTimeStamp* _inNow,
const AudioBufferList* _inInputData,
const AudioTimeStamp* _inInputTime,
AudioBufferList* _outOutputData,
const AudioTimeStamp* _inOutputTime,
void* _userData) {
const AudioTimeStamp* _inNow,
const AudioBufferList* _inInputData,
const AudioTimeStamp* _inInputTime,
AudioBufferList* _outOutputData,
const AudioTimeStamp* _inOutputTime,
void* _userData) {
audio::orchestra::api::Core* myClass = reinterpret_cast<audio::orchestra::api::Core*>(_userData);
audio::Time inputTime;
audio::Time outputTime;
if (_inInputTime != nullptr) {
if (_inInputTime != null) {
inputTime = audio::Time(_inInputTime->mHostTime/1000000000LL, _inInputTime->mHostTime%1000000000LL);
}
if (_inOutputTime != nullptr) {
if (_inOutputTime != null) {
outputTime = audio::Time(_inOutputTime->mHostTime/1000000000LL, _inOutputTime->mHostTime%1000000000LL);
}
if (myClass->callbackEvent(_inDevice, _inInputData, inputTime, _outOutputData, outputTime) == false) {
@ -429,18 +427,18 @@ static OSStatus rateListener(AudioObjectID _inDevice,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
AudioObjectGetPropertyData(_inDevice, &property, 0, nullptr, &dataSize, rate);
AudioObjectGetPropertyData(_inDevice, &property, 0, null, &dataSize, rate);
return kAudioHardwareNoError;
}
bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool audio::orchestra::api::Core::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
// Get device ID
uint32_t nDevices = getDeviceCount();
if (nDevices == 0) {
@ -453,8 +451,8 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
ATA_ERROR("device ID is invalid!");
return false;
}
AudioDeviceID deviceList[ nDevices ];
uint32_t dataSize = sizeof(AudioDeviceID) * nDevices;
AudioDeviceID deviceList[ nDevices/2 ];
uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2;
AudioObjectPropertyAddress property = {
kAudioHardwarePropertyDevices,
kAudioObjectPropertyScopeGlobal,
@ -463,14 +461,14 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property,
0,
nullptr,
null,
&dataSize,
(void *) &deviceList);
if (result != noErr) {
ATA_ERROR("OS-X system error getting device IDs.");
return false;
}
AudioDeviceID id = deviceList[ _device ];
AudioDeviceID id = deviceList[ _device/2 ];
// Setup for stream mode.
bool isInput = false;
if (_mode == audio::orchestra::mode_input) {
@ -483,7 +481,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
AudioBufferList *bufferList = nil;
dataSize = 0;
property.mSelector = kAudioDevicePropertyStreamConfiguration;
result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize);
result = AudioObjectGetPropertyDataSize(id, &property, 0, null, &dataSize);
if ( result != noErr
|| dataSize == 0) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ").");
@ -491,11 +489,11 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
}
// Allocate the AudioBufferList.
bufferList = (AudioBufferList *) malloc(dataSize);
if (bufferList == nullptr) {
if (bufferList == null) {
ATA_ERROR("memory error allocating AudioBufferList.");
return false;
}
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, bufferList);
if ( result != noErr
|| dataSize == 0) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ").");
@ -505,7 +503,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
// channels. CoreAudio devices can have an arbitrary number of
// streams and each stream can have an arbitrary number of channels.
// For each stream, a single buffer of interleaved samples is
// provided. RtAudio prefers the use of one stream of interleaved
// provided. orchestra prefers the use of one stream of interleaved
// data or multiple consecutive single-channel streams. However, we
// now support multiple consecutive multi-channel streams of
// interleaved data as well.
@ -571,7 +569,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
AudioValueRange bufferRange;
dataSize = sizeof(AudioValueRange);
property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &bufferRange);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &bufferRange);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting buffer size range for device (" << _device << ").");
return false;
@ -589,7 +587,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
uint32_t theSize = (uint32_t) *_bufferSize;
dataSize = sizeof(uint32_t);
property.mSelector = kAudioDevicePropertyBufferFrameSize;
result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &theSize);
result = AudioObjectSetPropertyData(id, &property, 0, null, dataSize, &theSize);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") setting the buffer size for device (" << _device << ").");
return false;
@ -609,7 +607,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
double nominalRate;
dataSize = sizeof(double);
property.mSelector = kAudioDevicePropertyNominalSampleRate;
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &nominalRate);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &nominalRate);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting current sample rate.");
return false;
@ -625,7 +623,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
return false;
}
nominalRate = (double) _sampleRate;
result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &nominalRate);
result = AudioObjectSetPropertyData(id, &property, 0, null, dataSize, &nominalRate);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate for device (" << _device << ").");
return false;
@ -637,7 +635,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
if (microCounter > 5000000) {
break;
}
usleep(5000);
ethread::sleepMilliSeconds((5));
}
// Remove the property listener.
AudioObjectRemovePropertyListener(id, &tmp, &rateListener, (void *) &reportedRate);
@ -651,7 +649,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
AudioStreamBasicDescription description;
dataSize = sizeof(AudioStreamBasicDescription);
property.mSelector = kAudioStreamPropertyVirtualFormat;
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &description);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream format for device (" << _device << ").");
return false;
@ -669,7 +667,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
updateFormat = true;
}
if (updateFormat) {
result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &description);
result = AudioObjectSetPropertyData(id, &property, 0, null, dataSize, &description);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate or data format for device (" << _device << ").");
return false;
@ -677,16 +675,16 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
}
// Now check the physical format.
property.mSelector = kAudioStreamPropertyPhysicalFormat;
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &description);
if (result != noErr) {
ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream physical format for device (" << _device << ").");
return false;
}
//std::cout << "Current physical stream format:" << std::endl;
//std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
//std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
//std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
//std::cout << " sample rate = " << description.mSampleRate << std::endl;
//ATA_DEBUG("Current physical stream format:");
//ATA_DEBUG(" mBitsPerChan = " << description.mBitsPerChannel);
//ATA_DEBUG(" aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked));
//ATA_DEBUG(" bytesPerFrame = " << description.mBytesPerFrame);
//ATA_DEBUG(" sample rate = " << description.mSampleRate);
if ( description.mFormatID != kAudioFormatLinearPCM
|| description.mBitsPerChannel < 16) {
description.mFormatID = kAudioFormatLinearPCM;
@ -694,19 +692,19 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
AudioStreamBasicDescription testDescription = description;
uint32_t formatFlags;
// We'll try higher bit rates first and then work our way down.
std::vector< std::pair<uint32_t, uint32_t> > physicalFormats;
etk::Vector< etk::Pair<uint32_t, uint32_t> > physicalFormats;
formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags));
physicalFormats.pushBack(etk::Pair<float, uint32_t>(32, formatFlags));
formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags));
physicalFormats.push_back(std::pair<float, uint32_t>(24, formatFlags)); // 24-bit packed
physicalFormats.pushBack(etk::Pair<float, uint32_t>(32, formatFlags));
physicalFormats.pushBack(etk::Pair<float, uint32_t>(24, formatFlags)); // 24-bit packed
formatFlags &= ~(kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh);
physicalFormats.push_back(std::pair<float, uint32_t>(24.2, formatFlags)); // 24-bit in 4 bytes, aligned low
physicalFormats.pushBack(etk::Pair<float, uint32_t>(24.2, formatFlags)); // 24-bit in 4 bytes, aligned low
formatFlags |= kAudioFormatFlagIsAlignedHigh;
physicalFormats.push_back(std::pair<float, uint32_t>(24.4, formatFlags)); // 24-bit in 4 bytes, aligned high
physicalFormats.pushBack(etk::Pair<float, uint32_t>(24.4, formatFlags)); // 24-bit in 4 bytes, aligned high
formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
physicalFormats.push_back(std::pair<float, uint32_t>(16, formatFlags));
physicalFormats.push_back(std::pair<float, uint32_t>(8, formatFlags));
physicalFormats.pushBack(etk::Pair<float, uint32_t>(16, formatFlags));
physicalFormats.pushBack(etk::Pair<float, uint32_t>(8, formatFlags));
bool setPhysicalFormat = false;
for(uint32_t i=0; i<physicalFormats.size(); i++) {
testDescription = description;
@ -719,14 +717,14 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
}
testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &testDescription);
result = AudioObjectSetPropertyData(id, &property, 0, null, dataSize, &testDescription);
if (result == noErr) {
setPhysicalFormat = true;
//std::cout << "Updated physical stream format:" << std::endl;
//std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
//std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
//std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
//std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
//ATA_DEBUG("Updated physical stream format:");
//ATA_DEBUG(" mBitsPerChan = " << testDescription.mBitsPerChannel);
//ATA_DEBUG(" aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked));
//ATA_DEBUG(" bytesPerFrame = " << testDescription.mBytesPerFrame);
//ATA_DEBUG(" sample rate = " << testDescription.mSampleRate);
break;
}
}
@ -740,7 +738,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
dataSize = sizeof(uint32_t);
property.mSelector = kAudioDevicePropertyLatency;
if (AudioObjectHasProperty(id, &property) == true) {
result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &latency);
result = AudioObjectGetPropertyData(id, &property, 0, null, &dataSize, &latency);
if (result == kAudioHardwareNoError) {
m_latency[ _mode ] = latency;
} else {
@ -816,10 +814,10 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
m_deviceBuffer = null;
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
if (m_deviceBuffer == null) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
@ -827,7 +825,7 @@ bool audio::orchestra::api::Core::probeDeviceOpen(uint32_t _device,
}
m_sampleRate = _sampleRate;
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
ATA_VERBOSE("Set state as stopped");
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
@ -871,19 +869,19 @@ error:
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
ATA_VERBOSE("Set state as closed");
return false;
}
enum audio::orchestra::error audio::orchestra::api::Core::closeStream() {
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_state == audio::orchestra::state_running) {
if (m_state == audio::orchestra::state::running) {
AudioDeviceStop(m_private->id[0], &audio::orchestra::api::Core::callbackEvent);
}
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
@ -896,7 +894,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::closeStream() {
if ( m_mode == audio::orchestra::mode_input
|| ( m_mode == audio::orchestra::mode_duplex
&& m_device[0] != m_device[1])) {
if (m_state == audio::orchestra::state_running) {
if (m_state == audio::orchestra::state::running) {
AudioDeviceStop(m_private->id[1], &audio::orchestra::api::Core::callbackEvent);
}
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
@ -910,10 +908,10 @@ enum audio::orchestra::error audio::orchestra::api::Core::closeStream() {
m_userBuffer[1].clear();
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
m_deviceBuffer = null;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
ATA_VERBOSE("Set state as closed");
return audio::orchestra::error_none;
}
@ -924,7 +922,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::startStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
if (m_state == audio::orchestra::state::running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
@ -948,7 +946,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::startStream() {
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
m_state = audio::orchestra::state_running;
m_state = audio::orchestra::state::running;
ATA_VERBOSE("Set state as running");
unlock:
if (result == noErr) {
@ -961,7 +959,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
@ -969,9 +967,9 @@ enum audio::orchestra::error audio::orchestra::api::Core::stopStream() {
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
std11::unique_lock<std11::mutex> lck(m_mutex);
ethread::UniqueLock lck(m_mutex);
m_private->drainCounter = 2;
m_private->condition.wait(lck);
m_private->m_semaphore.wait();
}
result = AudioDeviceStop(m_private->id[0], &audio::orchestra::api::Core::callbackEvent);
if (result != noErr) {
@ -988,7 +986,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::stopStream() {
goto unlock;
}
}
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
ATA_VERBOSE("Set state as stopped");
unlock:
if (result == noErr) {
@ -1001,7 +999,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
@ -1015,7 +1013,7 @@ enum audio::orchestra::error audio::orchestra::api::Core::abortStream() {
// callbackEvent() function probably should return before the AudioDeviceStop()
// function is called.
void audio::orchestra::api::Core::coreStopStream(void *_userData) {
etk::thread::setName("CoreAudio_stopStream");
ethread::setName("CoreAudio_stopStream");
audio::orchestra::api::Core* myClass = reinterpret_cast<audio::orchestra::api::Core*>(_userData);
myClass->stopStream();
}
@ -1025,23 +1023,23 @@ bool audio::orchestra::api::Core::callbackEvent(AudioDeviceID _deviceId,
const audio::Time& _inTime,
const AudioBufferList *_outBufferList,
const audio::Time& _outTime) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
if ( m_state == audio::orchestra::state::stopped
|| m_state == audio::orchestra::state::stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
// Check if we were draining the stream and signal is finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
m_state = audio::orchestra::state::stopping;
ATA_VERBOSE("Set state as stopping");
if (m_private->internalDrain == true) {
new std11::thread(&audio::orchestra::api::Core::coreStopStream, this);
ETK_NEW(ethread::Thread, &audio::orchestra::api::Core::coreStopStream, this);
} else {
// external call to stopStream()
m_private->condition.notify_one();
m_private->m_semaphore.post();
}
return true;
}
@ -1050,15 +1048,15 @@ bool audio::orchestra::api::Core::callbackEvent(AudioDeviceID _deviceId,
// draining stream or duplex mode AND the input/output devices are
// different AND this function is called for the input device.
if (m_private->drainCounter == 0 && (m_mode != audio::orchestra::mode_duplex || _deviceId == outputDevice)) {
std::vector<enum audio::orchestra::status> status;
etk::Vector<enum audio::orchestra::status> status;
if ( m_mode != audio::orchestra::mode_input
&& m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
status.pushBack(audio::orchestra::status::underflow);
m_private->xrun[0] = false;
}
if ( m_mode != audio::orchestra::mode_output
&& m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
status.pushBack(audio::orchestra::status::overflow);
m_private->xrun[1] = false;
}
int32_t cbReturnValue = m_callback(&m_userBuffer[1][0],
@ -1068,7 +1066,7 @@ bool audio::orchestra::api::Core::callbackEvent(AudioDeviceID _deviceId,
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_state = audio::orchestra::state::stopping;
ATA_VERBOSE("Set state as stopping");
m_private->drainCounter = 2;
abortStream();

View File

@ -4,9 +4,8 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_CORE_H__) && defined(ORCHESTRA_BUILD_MACOSX_CORE)
#define __AUDIO_ORCHESTRA_API_CORE_H__
#pragma once
#ifdef ORCHESTRA_BUILD_MACOSX_CORE
#include <CoreAudio/AudioHardware.h>
@ -17,12 +16,12 @@ namespace audio {
class CorePrivate;
class Core: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Core();
virtual ~Core();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_coreOSX;
const etk::String& getCurrentApi() {
return audio::orchestra::typeCoreOSX;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
@ -47,15 +46,15 @@ namespace audio {
void* _infoPointer);
static void coreStopStream(void *_userData);
private:
std::shared_ptr<CorePrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
ememory::SharedPtr<CorePrivate> m_private;
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
static const char* getErrorCode(OSStatus _code);
static OSStatus xrunListener(AudioObjectID _inDevice,
uint32_t _nAddresses,

View File

@ -4,10 +4,8 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_CORE_IOS_H__) && defined(ORCHESTRA_BUILD_IOS_CORE)
#define __AUDIO_ORCHESTRA_API_CORE_IOS_H__
#pragma once
#ifdef ORCHESTRA_BUILD_IOS_CORE
namespace audio {
namespace orchestra {
@ -15,12 +13,12 @@ namespace audio {
class CoreIosPrivate;
class CoreIos: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
CoreIos();
virtual ~CoreIos();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_coreIOS;
const etk::String& getCurrentApi() {
return audio::orchestra::typeCoreIOS;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
@ -34,22 +32,24 @@ namespace audio {
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
etk::Vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
public:
void callBackEvent(void* _data,
int32_t _nbChunk,
const audio::Time& _time);
const audio::Time& _time);
public:
std11::shared_ptr<CoreIosPrivate> m_private;
ememory::SharedPtr<CoreIosPrivate> m_private;
uint32_t getDefaultInputDevice();
uint32_t getDefaultOutputDevice();
};
}
}

View File

@ -10,17 +10,17 @@
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
#undef __class__
#define __class__ "api::CoreIos"
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
extern "C" {
#include <limits.h>
}
#include <audio/orchestra/api/CoreIos.hpp>
audio::orchestra::Api* audio::orchestra::api::CoreIos::create() {
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::CoreIos::create() {
ATA_INFO("Create CoreIos device ... ");
return new audio::orchestra::api::CoreIos();
return ememory::SharedPtr<audio::orchestra::api::CoreIos>(ETK_NEW(audio::orchestra::api::CoreIos));
}
#define kOutputBus 0
@ -39,57 +39,75 @@ namespace audio {
audio::orchestra::api::CoreIos::CoreIos(void) :
m_private(new audio::orchestra::api::CoreIosPrivate()) {
audio::orchestra::api::CoreIos::CoreIos() :
m_private(ETK_NEW(audio::orchestra::api::CoreIosPrivate)) {
ATA_INFO("new CoreIos");
int32_t deviceCount = 2;
ATA_ERROR("Get count devices : " << 2);
audio::orchestra::DeviceInfo tmp;
// Add default output format :
tmp.name = "out";
tmp.name = "speaker";
tmp.input = false;
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 2;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.channels.push_back(audio::channel_frontRight);
tmp.channels.push_back(audio::channel_frontLeft);
tmp.nativeFormats.push_back(audio::format_int16);
tmp.isDefault = true;
tmp.isCorrect = true;
m_devices.push_back(tmp);
// add default input format:
tmp.name = "in";
tmp.name = "microphone";
tmp.input = true;
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 0;
tmp.inputChannels = 2;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.channels.push_back(audio::channel_frontRight);
tmp.channels.push_back(audio::channel_frontLeft);
tmp.nativeFormats.push_back(audio::format_int16);
tmp.isDefault = true;
tmp.isCorrect = true;
m_devices.push_back(tmp);
ATA_INFO("Create CoreIOs interface (end)");
}
audio::orchestra::api::CoreIos::~CoreIos(void) {
uint32_t audio::orchestra::api::CoreIos::getDefaultInputDevice() {
// Should be implemented in subclasses if possible.
return 1;
}
uint32_t audio::orchestra::api::CoreIos::getDefaultOutputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
audio::orchestra::api::CoreIos::~CoreIos() {
ATA_INFO("Destroy CoreIOs interface");
AudioUnitUninitialize(m_private->audioUnit);
}
uint32_t audio::orchestra::api::CoreIos::getDeviceCount(void) {
uint32_t audio::orchestra::api::CoreIos::getDeviceCount() {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
audio::orchestra::DeviceInfo audio::orchestra::api::CoreIos::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
if (_device >= m_devices.size()) {
audio::orchestra::DeviceInfo tmp;
tmp.sampleRates.push_back(0);
tmp.channels.push_back(audio::channel_frontCenter);
tmp.isDefault = false;
tmp.nativeFormats.push_back(audio::format_int8);
return tmp;
}
return m_devices[_device];
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::closeStream(void) {
enum audio::orchestra::error audio::orchestra::api::CoreIos::closeStream() {
ATA_INFO("Close Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream(void) {
enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream() {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
@ -98,14 +116,14 @@ enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream(void) {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::stopStream(void) {
enum audio::orchestra::error audio::orchestra::api::CoreIos::stopStream() {
ATA_INFO("Stop stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream(void) {
enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream() {
ATA_INFO("Abort Stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
@ -114,22 +132,37 @@ enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream(void) {
void audio::orchestra::api::CoreIos::callBackEvent(void* _data,
int32_t _nbChunk,
const audio::Time& _time) {
const audio::Time& _time) {
int32_t doStopStream = 0;
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(audio::orchestra::mode_output)] == true) {
doStopStream = m_callback(nullptr,
audio::Time(),
&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0],
_time,
_nbChunk,
status);
convertBuffer((char*)_data, &m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0], m_convertInfo[modeToIdTable(audio::orchestra::mode_output)]);
} else {
etk::Vector<enum audio::orchestra::status> status;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[modeToIdTable(audio::orchestra::mode_output)] == true) {
ATA_INFO("get output DATA : " << uint64_t(&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0]));
doStopStream = m_callback(null,
audio::Time(),
&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0],
_time,
_nbChunk,
status);
convertBuffer((char*)_data, &m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0], m_convertInfo[modeToIdTable(audio::orchestra::mode_output)]);
} else {
ATA_INFO("have output DATA : " << uint64_t(_data));
doStopStream = m_callback(null,
_time,
_data,
audio::Time(),
_nbChunk,
status);
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
ATA_INFO("have input DATA : " << uint64_t(_data));
doStopStream = m_callback(_data,
_time,
nullptr,
audio::Time(),
null,
audio::Time(),
_nbChunk,
status);
}
@ -147,12 +180,12 @@ static OSStatus playbackCallback(void *_userData,
uint32_t _inBusNumber,
uint32_t _inNumberFrames,
AudioBufferList* _ioData) {
if (_userData == nullptr) {
ATA_ERROR("callback event ... nullptr pointer");
if (_userData == null) {
ATA_ERROR("callback event ... null pointer");
return -1;
}
audio::Time tmpTimeime;
if (_inTime != nullptr) {
if (_inTime != null) {
tmpTimeime = audio::Time(_inTime->mHostTime/1000000000LL, _inTime->mHostTime%1000000000LL);
}
audio::orchestra::api::CoreIos* myClass = static_cast<audio::orchestra::api::CoreIos*>(_userData);
@ -160,28 +193,29 @@ static OSStatus playbackCallback(void *_userData,
for (int32_t iii=0; iii < _ioData->mNumberBuffers; iii++) {
AudioBuffer buffer = _ioData->mBuffers[iii];
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << _inBusNumber);
ATA_INFO("request data size: " << numberFrame << " busNumber=" << _inBusNumber);
myClass->callBackEvent(buffer.mData, numberFrame, tmpTimeime);
}
return noErr;
}
bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool audio::orchestra::api::CoreIos::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
return false;
}
bool ret = true;
// TODO : This is a bad ack ....
m_mode = audio::orchestra::mode_output;
// configure Airtaudio internal configuration:
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
@ -233,7 +267,7 @@ bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(nullptr, &desc);
AudioComponent inputComponent = AudioComponentFindNext(null, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
@ -244,11 +278,11 @@ bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
uint32_t flag = 1;
// Enable IO for playback
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (status != 0) {
ATA_ERROR("can not request audio autorisation...");
}
@ -266,11 +300,11 @@ bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
audioFormat.mReserved = 0;
// Apply format
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (status != 0) {
ATA_ERROR("can not set stream properties...");
}
@ -281,11 +315,11 @@ bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
callbackStruct.inputProc = &playbackCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
if (status != 0) {
ATA_ERROR("can not set Callback...");
}

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,8 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_DS_H__) && defined(ORCHESTRA_BUILD_DS)
#define __AUDIO_ORCHESTRA_API_DS_H__
#pragma once
#ifdef ORCHESTRA_BUILD_DS
namespace audio {
namespace orchestra {
@ -15,16 +13,14 @@ namespace audio {
class DsPrivate;
class Ds: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Ds();
virtual ~Ds();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_ds;
const etk::String& getCurrentApi() {
return audio::orchestra::typeDs;
}
uint32_t getDeviceCount();
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
@ -38,18 +34,18 @@ namespace audio {
void callbackEvent();
private:
static void dsCallbackEvent(void *_userData);
std11::shared_ptr<DsPrivate> m_private;
ememory::SharedPtr<DsPrivate> m_private;
bool m_coInitialized;
bool m_buffersRolling;
long m_duplexPrerollBytes;
bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
bool open(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}

View File

@ -6,14 +6,11 @@
*/
#if defined(ORCHESTRA_BUILD_DUMMY)
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/api/Dummy.hpp>
#include <audio/orchestra/debug.hpp>
#undef __class__
#define __class__ "api::Dummy"
audio::orchestra::Api* audio::orchestra::api::Dummy::create() {
return new audio::orchestra::api::Dummy();
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Dummy::create() {
return ememory::SharedPtr<audio::orchestra::api::Dummy>(ETK_NEW(audio::orchestra::api::Dummy));
}
@ -48,14 +45,14 @@ enum audio::orchestra::error audio::orchestra::api::Dummy::abortStream() {
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Dummy::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool audio::orchestra::api::Dummy::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
return false;
}

View File

@ -1,45 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_DUMMY__) && defined(ORCHESTRA_BUILD_DUMMY)
#define __AUDIO_ORCHESTRA_DUMMY__
#include <audio/orchestra/Interface.h>
namespace audio {
namespace orchestra {
namespace api {
class Dummy: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
public:
Dummy();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_dummy;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
private:
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@ -0,0 +1,44 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_DUMMY
#include <audio/orchestra/Interface.hpp>
namespace audio {
namespace orchestra {
namespace api {
class Dummy: public audio::orchestra::Api {
public:
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Dummy();
const etk::String& getCurrentApi() {
return audio::orchestra::typeDummy;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
private:
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@ -7,19 +7,17 @@
// must run before :
#if defined(ORCHESTRA_BUILD_JACK)
#include <unistd.h>
#include <limits.h>
#include <iostream>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <string.h>
#include <etk/thread/tools.h>
extern "C" {
#include <limits.h>
#include <string.h>
}
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
#include <ethread/tools.hpp>
#include <audio/orchestra/api/Jack.hpp>
#undef __class__
#define __class__ "api::Jack"
audio::orchestra::Api* audio::orchestra::api::Jack::create() {
return new audio::orchestra::api::Jack();
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Jack::create() {
return ememory::SharedPtr<audio::orchestra::api::Jack>(ETK_NEW(audio::orchestra::api::Jack));
}
@ -54,9 +52,9 @@ audio::orchestra::Api* audio::orchestra::api::Jack::create() {
// stream cannot be opened.
#include <jack/jack.h>
#include <unistd.h>
#include <cstdio>
extern "C" {
#include <stdio.h>
}
namespace audio {
namespace orchestra {
@ -65,9 +63,9 @@ namespace audio {
public:
jack_client_t *client;
jack_port_t **ports[2];
std::string deviceName[2];
etk::String deviceName[2];
bool xrun[2];
std11::condition_variable condition;
ethread::Semaphore m_semaphore;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
@ -86,12 +84,12 @@ namespace audio {
}
audio::orchestra::api::Jack::Jack() :
m_private(new audio::orchestra::api::JackPrivate()) {
m_private(ETK_NEW(audio::orchestra::api::JackPrivate)) {
// Nothing to do here.
}
audio::orchestra::api::Jack::~Jack() {
if (m_state != audio::orchestra::state_closed) {
if (m_state != audio::orchestra::state::closed) {
closeStream();
}
}
@ -99,23 +97,23 @@ audio::orchestra::api::Jack::~Jack() {
uint32_t audio::orchestra::api::Jack::getDeviceCount() {
// See if we can become a jack client.
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
jack_status_t *status = null;
jack_client_t *client = jack_client_open("orchestraJackCount", options, status);
if (client == nullptr) {
if (client == null) {
return 0;
}
const char **ports;
std::string port, previousPort;
etk::String port, previousPort;
uint32_t nChannels = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
ports = jack_get_ports(client, null, null, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nChannels ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon + 1);
if (iColon != etk::String::npos) {
port = port.extract(0, iColon + 1);
if (port != previousPort) {
nDevices++;
previousPort = port;
@ -123,36 +121,39 @@ uint32_t audio::orchestra::api::Jack::getDeviceCount() {
}
} while (ports[++nChannels]);
free(ports);
ports = null;
}
jack_client_close(client);
return nDevices;
return nDevices*2;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = false;
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
jack_status_t *status = nullptr;
jack_status_t *status = null;
jack_client_t *client = jack_client_open("orchestraJackInfo", options, status);
if (client == nullptr) {
if (client == null) {
ATA_ERROR("Jack server not found or connection error!");
// TODO : audio::orchestra::error_warning;
info.clear();
return info;
}
const char **ports;
std::string port, previousPort;
etk::String port, previousPort;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
ports = jack_get_ports(client, null, null, 0);
int32_t deviceID = _device/2;
info.input = _device%2==0?true:false; // note that jack sens are inverted
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
port = (char *) ports[nPorts];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (iColon != etk::String::npos) {
port = port.extract(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
if (nDevices == deviceID) {
info.name = port;
}
nDevices++;
@ -162,7 +163,7 @@ audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
if (deviceID >= nDevices) {
jack_client_close(client);
ATA_ERROR("device ID is invalid!");
// TODO : audio::orchestra::error_invalidUse;
@ -170,51 +171,45 @@ audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t
}
// Get the current jack server sample rate.
info.sampleRates.clear();
info.sampleRates.push_back(jack_get_sample_rate(client));
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsInput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
info.sampleRates.pushBack(jack_get_sample_rate(client));
if (info.input == true) {
ports = jack_get_ports(client, info.name.c_str(), null, JackPortIsOutput);
if (ports) {
int32_t iii=0;
while (ports[iii]) {
ATA_ERROR(" ploppp='" << ports[iii] << "'");
info.channels.pushBack(audio::channel_unknow);
iii++;
}
free(ports);
}
free(ports);
info.outputChannels = nChannels;
}
// Jack "output ports" equal RtAudio input channels.
nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsOutput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
} else {
ports = jack_get_ports(client, info.name.c_str(), null, JackPortIsInput);
if (ports) {
int32_t iii=0;
while (ports[iii]) {
ATA_ERROR(" ploppp='" << ports[iii] << "'");
info.channels.pushBack(audio::channel_unknow);
iii++;
}
free(ports);
}
free(ports);
info.inputChannels = nChannels;
}
if (info.outputChannels == 0 && info.inputChannels == 0) {
if (info.channels.size() == 0) {
jack_client_close(client);
ATA_ERROR("error determining Jack input/output channels!");
// TODO : audio::orchestra::error_warning;
info.clear();
return info;
}
// If device opens for both playback and capture, we determine the channels.
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Jack always uses 32-bit floats.
info.nativeFormats.push_back(audio::format_float);
info.nativeFormats.pushBack(audio::format_float);
// Jack doesn't provide default devices so we'll use the first available one.
if ( _device == 0
&& info.outputChannels > 0) {
info.isDefaultOutput = true;
}
if ( _device == 0
&& info.inputChannels > 0) {
info.isDefaultInput = true;
if (deviceID == 0) {
info.isDefault = true;
}
jack_client_close(client);
info.probed = true;
info.isCorrect = true;
return info;
}
@ -229,15 +224,6 @@ int32_t audio::orchestra::api::Jack::jackCallbackHandler(jack_nframes_t _nframes
return 0;
}
// This function will be called by a spawned thread when the Jack
// server signals that it is shutting down. It is necessary to handle
// it this way because the jackShutdown() function must return before
// the jack_deactivate() function (in closeStream()) will return.
void audio::orchestra::api::Jack::jackCloseStream(void* _userData) {
etk::thread::setName("Jack_closeStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->closeStream();
}
void audio::orchestra::api::Jack::jackShutdown(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
@ -249,7 +235,7 @@ void audio::orchestra::api::Jack::jackShutdown(void* _userData) {
if (myClass->isStreamRunning() == false) {
return;
}
new std11::thread(&audio::orchestra::api::Jack::jackCloseStream, _userData);
ETK_NEW(ethread::Thread, [=](){myClass->closeStream();});
ATA_ERROR("The Jack server is shutting down this client ... stream stopped and closed!!");
}
@ -264,22 +250,22 @@ int32_t audio::orchestra::api::Jack::jackXrun(void* _userData) {
return 0;
}
bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool audio::orchestra::api::Jack::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// Look for jack server and try to become a client (only do once per stream).
jack_client_t *client = 0;
if ( _mode == audio::orchestra::mode_output
|| ( _mode == audio::orchestra::mode_input
&& m_mode != audio::orchestra::mode_output)) {
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
if (!_options.streamName.empty()) {
jack_status_t *status = null;
if (_options.streamName.size() != 0) {
client = jack_client_open(_options.streamName.c_str(), jackoptions, status);
} else {
client = jack_client_open("orchestraJack", jackoptions, status);
@ -293,19 +279,21 @@ bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
client = m_private->client;
}
const char **ports;
std::string port, previousPort, deviceName;
etk::String port, previousPort, deviceName;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
int32_t deviceID = _device/2;
bool isInput = _device%2==0?true:false;
ports = jack_get_ports(client, null, null, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (iColon != etk::String::npos) {
port = port.extract(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
if (nDevices == deviceID) {
deviceName = port;
}
nDevices++;
@ -323,8 +311,10 @@ bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
uint64_t flag = JackPortIsInput;
if (_mode == audio::orchestra::mode_input) flag = JackPortIsOutput;
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (_mode == audio::orchestra::mode_input) {
flag = JackPortIsOutput;
}
ports = jack_get_ports(client, deviceName.c_str(), null, flag);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
@ -345,7 +335,7 @@ bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
}
m_sampleRate = jackRate;
// Get the latency of the JACK port.
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
ports = jack_get_ports(client, deviceName.c_str(), null, flag);
if (ports[ _firstChannel ]) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (_mode == audio::orchestra::mode_input ? JackCaptureLatency : JackPlaybackLatency);
@ -411,7 +401,7 @@ bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
if (m_deviceBuffer == null) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
@ -419,13 +409,13 @@ bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
}
// Allocate memory for the Jack ports (channels) identifiers.
m_private->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
if (m_private->ports[modeToIdTable(_mode)] == nullptr) {
if (m_private->ports[modeToIdTable(_mode)] == null) {
ATA_ERROR("error allocating port memory.");
goto error;
}
m_device[modeToIdTable(_mode)] = _device;
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
if ( m_mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up the stream for output.
@ -466,52 +456,52 @@ bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
return true;
error:
jack_client_close(m_private->client);
if (m_private->ports[0] != nullptr) {
if (m_private->ports[0] != null) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
m_private->ports[0] = null;
}
if (m_private->ports[1] != nullptr) {
if (m_private->ports[1] != null) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
m_private->ports[1] = null;
}
for (int32_t iii=0; iii<2; ++iii) {
m_userBuffer[iii].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
m_deviceBuffer = null;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Jack::closeStream() {
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_private != nullptr) {
if (m_state == audio::orchestra::state_running) {
if (m_private != null) {
if (m_state == audio::orchestra::state::running) {
jack_deactivate(m_private->client);
}
jack_client_close(m_private->client);
}
if (m_private->ports[0] != nullptr) {
if (m_private->ports[0] != null) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
m_private->ports[0] = null;
}
if (m_private->ports[1] != nullptr) {
if (m_private->ports[1] != null) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
m_private->ports[1] = null;
}
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
m_deviceBuffer = null;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
return audio::orchestra::error_none;
}
@ -521,7 +511,7 @@ enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
if (m_state == audio::orchestra::state::running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
@ -535,8 +525,8 @@ enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[0].c_str(), nullptr, JackPortIsInput);
if (ports == nullptr) {
ports = jack_get_ports(m_private->client, m_private->deviceName[0].c_str(), null, JackPortIsInput);
if (ports == null) {
ATA_ERROR("error determining available JACK input ports!");
goto unlock;
}
@ -558,8 +548,8 @@ enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[1].c_str(), nullptr, JackPortIsOutput);
if (ports == nullptr) {
ports = jack_get_ports(m_private->client, m_private->deviceName[1].c_str(), null, JackPortIsOutput);
if (ports == null) {
ATA_ERROR("error determining available JACK output ports!");
goto unlock;
}
@ -579,7 +569,7 @@ enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
m_state = audio::orchestra::state_running;
m_state = audio::orchestra::state::running;
unlock:
if (result == 0) {
return audio::orchestra::error_none;
@ -591,7 +581,7 @@ enum audio::orchestra::error audio::orchestra::api::Jack::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
@ -599,12 +589,11 @@ enum audio::orchestra::error audio::orchestra::api::Jack::stopStream() {
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
m_private->drainCounter = 2;
std11::unique_lock<std11::mutex> lck(m_mutex);
m_private->condition.wait(lck);
m_private->m_semaphore.wait();
}
}
jack_deactivate(m_private->client);
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
return audio::orchestra::error_none;
}
@ -612,7 +601,7 @@ enum audio::orchestra::error audio::orchestra::api::Jack::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
@ -620,23 +609,12 @@ enum audio::orchestra::error audio::orchestra::api::Jack::abortStream() {
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the jack_deactivate()
// function will return.
static void jackStopStream(void* _userData) {
etk::thread::setName("Jack_stopStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->stopStream();
}
bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
if ( m_state == audio::orchestra::state::stopped
|| m_state == audio::orchestra::state::stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
@ -646,24 +624,24 @@ bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
}
// Check if we were draining the stream and signal is finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
m_state = audio::orchestra::state::stopping;
if (m_private->internalDrain == true) {
new std11::thread(jackStopStream, this);
ETK_NEW(ethread::Thread, [&](){stopStream();}, "Jack_stopStream");
} else {
m_private->condition.notify_one();
m_private->m_semaphore.post();
}
return true;
}
// Invoke user callback first, to get fresh output data.
if (m_private->drainCounter == 0) {
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
etk::Vector<enum audio::orchestra::status> status;
if (m_mode != audio::orchestra::mode_input && m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
status.pushBack(audio::orchestra::status::underflow);
m_private->xrun[0] = false;
}
if (m_mode != audio::orchestra::mode_output && m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
status.pushBack(audio::orchestra::status::overflow);
m_private->xrun[1] = false;
}
int32_t cbReturnValue = m_callback(&m_userBuffer[1][0],
@ -673,9 +651,9 @@ bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_state = audio::orchestra::state::stopping;
m_private->drainCounter = 2;
new std11::thread(jackStopStream, this);
ETK_NEW(ethread::Thread, [&](){stopStream();}, "Jack_stopStream2");
return true;
}
else if (cbReturnValue == 1) {

View File

@ -4,9 +4,8 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_JACK_H__) && defined(ORCHESTRA_BUILD_JACK)
#define __AUDIO_ORCHESTRA_API_JACK_H__
#pragma once
#ifdef ORCHESTRA_BUILD_JACK
#include <jack/jack.h>
@ -16,12 +15,12 @@ namespace audio {
class JackPrivate;
class Jack: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Jack();
virtual ~Jack();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_jack;
const etk::String& getCurrentApi() {
return audio::orchestra::typeJack;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
@ -37,19 +36,18 @@ namespace audio {
bool callbackEvent(uint64_t _nframes);
private:
static int32_t jackXrun(void* _userData);
static void jackCloseStream(void* _userData);
static void jackShutdown(void* _userData);
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void* _userData);
private:
std11::shared_ptr<JackPrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
ememory::SharedPtr<JackPrivate> m_private;
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}

View File

@ -1,830 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_OSS)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <fcntl.h>
#include "soundcard.h"
#include <errno.h>
#include <math.h>
#undef __class__
#define __class__ "api::Oss"
audio::orchestra::Api* audio::orchestra::api::Oss::create() {
return new audio::orchestra::api::Oss();
}
static void *ossCallbackHandler(void* _userData);
namespace audio {
namespace orchestra {
namespace api {
class OssPrivate {
public:
int32_t id[2]; // device ids
bool xrun[2];
bool triggered;
std11::condition_variable runnable;
std11::shared_ptr<std11::thread> thread;
bool threadRunning;
OssPrivate():
triggered(false),
threadRunning(false) {
id[0] = 0;
id[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
}
}
}
audio::orchestra::api::Oss::Oss() :
m_private(new audio::orchestra::api::OssPrivate()) {
// Nothing to do here.
}
audio::orchestra::api::Oss::~Oss() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Oss::getDeviceCount() {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return 0;
}
oss_sysinfo sysinfo;
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return 0;
}
close(mixerfd);
return sysinfo.numaudios;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Oss::getDeviceInfo(uint32_t _device) {
rtaudio::DeviceInfo info;
info.probed = false;
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return info;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return info;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
close(mixerfd);
ATA_ERROR("no devices found!");
return info;
}
if (_device >= nDevices) {
close(mixerfd);
ATA_ERROR("device ID is invalid!");
return info;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
error(audio::orchestra::error_warning);
return info;
}
// Probe channels
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_output) {
info.outputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_input) {
info.inputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
if ( info.outputChannels > 0
&& info.inputChannels > 0
&& ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
}
// Probe data formats ... do for input
uint64_t mask = ainfo.iformats;
if ( mask & AFMT_S16_LE
|| mask & AFMT_S16_BE) {
info.nativeFormats.push_back(audio::format_int16);
}
if (mask & AFMT_S8) {
info.nativeFormats.push_back(audio::format_int8);
}
if ( mask & AFMT_S32_LE
|| mask & AFMT_S32_BE) {
info.nativeFormats.push_back(audio::format_int32);
}
if (mask & AFMT_FLOAT) {
info.nativeFormats.push_back(audio::format_float);
}
if ( mask & AFMT_S24_LE
|| mask & AFMT_S24_BE) {
info.nativeFormats.push_back(audio::format_int24);
}
// Check that we have at least one supported format
if (info.nativeFormats == 0) {
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
return info;
}
// Probe the supported sample rates.
info.sampleRates.clear();
if (ainfo.nrates) {
for (uint32_t i=0; i<ainfo.nrates; i++) {
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
break;
}
}
}
} else {
// Check min and max rate values;
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if ( ainfo.min_rate <= (int) SAMPLE_RATES[k]
&& ainfo.max_rate >= (int) SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
}
}
}
if (info.sampleRates.size() == 0) {
ATA_ERROR("no supported sample rates found for device (" << ainfo.name << ").");
} else {
info.probed = true;
info.name = ainfo.name;
}
return info;
}
bool audio::orchestra::api::Oss::probeDeviceOpen(uint32_t _device,
StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
rtaudio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return false;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return false;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("no devices found!");
return false;
}
if (_device >= nDevices) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("device ID is invalid!");
return false;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
return false;
}
// Check if device supports input or output
if ( ( _mode == audio::orchestra::mode_output
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_output))
|| ( _mode == audio::orchestra::mode_input
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_input))) {
if (_mode == audio::orchestra::mode_output) {
ATA_ERROR("device (" << ainfo.name << ") does not support output.");
} else {
ATA_ERROR("device (" << ainfo.name << ") does not support input.");
}
return false;
}
int32_t flags = 0;
if (_mode == audio::orchestra::mode_output) {
flags |= O_WRONLY;
} else { // _mode == audio::orchestra::mode_input
if ( m_mode == audio::orchestra::mode_output
&& m_device[0] == _device) {
// We just set the same device for playback ... close and reopen for duplex (OSS only).
close(m_private->id[0]);
m_private->id[0] = 0;
if (!(ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex)) {
ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode.");
return false;
}
// Check that the number previously set channels is the same.
if (m_nUserChannels[0] != _channels) {
ATA_ERROR("input/output channels must be equal for OSS duplex device (" << ainfo.name << ").");
return false;
}
flags |= O_RDWR;
} else {
flags |= O_RDONLY;
}
}
// Set exclusive access if specified.
if (_options.flags & RTAUDIO_HOG_DEVICE) {
flags |= O_EXCL;
}
// Try to open the device.
int32_t fd;
fd = open(ainfo.devnode, flags, 0);
if (fd == -1) {
if (errno == EBUSY) {
ATA_ERROR("device (" << ainfo.name << ") is busy.");
} else {
ATA_ERROR("error opening device (" << ainfo.name << ").");
}
return false;
}
// For duplex operation, specifically set this mode (this doesn't seem to work).
/*
if (flags | O_RDWR) {
result = ioctl(fd, SNDCTL_DSP_SETaudio::orchestra::mode_duplex, nullptr);
if (result == -1) {
m_errorStream << "error setting duplex mode for device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return false;
}
}
*/
// Check the device channel support.
m_nUserChannels[modeToIdTable(_mode)] = _channels;
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters.");
return false;
}
// Set the number of channels.
int32_t deviceChannels = _channels + _firstChannel;
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
if ( result == -1
|| deviceChannels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ").");
return false;
}
m_nDeviceChannels[modeToIdTable(_mode)] = deviceChannels;
// Get the data format mask
int32_t mask;
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
if (result == -1) {
close(fd);
ATA_ERROR("error getting device (" << ainfo.name << ") data formats.");
return false;
}
// Determine how to set the device format.
m_userFormat = _format;
int32_t deviceFormat = -1;
m_doByteSwap[modeToIdTable(_mode)] = false;
if (_format == RTAUDIO_SINT8) {
if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
}
} else if (_format == RTAUDIO_SINT16) {
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if (_format == RTAUDIO_SINT24) {
if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if (_format == RTAUDIO_SINT32) {
if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
}
if (deviceFormat == -1) {
// The user requested format is not natively supported by the device.
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
} else if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
} else if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
}
}
if (m_deviceFormat[modeToIdTable(_mode)] == 0) {
// This really shouldn't happen ...
close(fd);
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
return false;
}
// Set the data format.
int32_t temp = deviceFormat;
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
if ( result == -1
|| deviceFormat != temp) {
close(fd);
ATA_ERROR("error setting data format on device (" << ainfo.name << ").");
return false;
}
// Attempt to set the buffer size. According to OSS, the minimum
// number of buffers is two. The supposed minimum buffer size is 16
// bytes, so that will be our lower bound. The argument to this
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
// We'll check the actual value used near the end of the setup
// procedure.
int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
if (ossBufferBytes < 16) {
ossBufferBytes = 16;
}
int32_t buffers = 0;
buffers = _options.numberOfBuffers;
if (_options.flags.m_minimizeLatency == true) {
buffers = 2;
}
if (buffers < 2) {
buffers = 3;
}
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
if (result == -1) {
close(fd);
ATA_ERROR("error setting buffer size on device (" << ainfo.name << ").");
return false;
}
m_nBuffers = buffers;
// Save buffer size (in sample frames).
*_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
m_bufferSize = *_bufferSize;
// Set the sample rate.
int32_t srate = _sampleRate;
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
if (result == -1) {
close(fd);
ATA_ERROR("error setting sample rate (" << _sampleRate << ") on device (" << ainfo.name << ").");
return false;
}
// Verify the sample rate setup worked.
if (abs(srate - _sampleRate) > 100) {
close(fd);
ATA_ERROR("device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ").");
return false;
}
m_sampleRate = _sampleRate;
if ( _mode == audio::orchestra::mode_input
&& m__mode == audio::orchestra::mode_output
&& m_device[0] == _device) {
// We're doing duplex setup here.
m_deviceFormat[0] = m_deviceFormat[1];
m_nDeviceChannels[0] = deviceChannels;
}
// Set interleaving parameters.
m_deviceInterleaved[modeToIdTable(_mode)] = true;
// Set flags for buffer conversion
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
m_private->id[modeToIdTable(_mode)] = fd;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if ( m__mode == audio::orchestra::mode_output
&& m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, _firstChannel);
}
// Setup thread if necessary.
if (m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input) {
// We had already set up an output stream.
m_mode = audio::orchestra::mode_duplex;
if (m_device[0] == _device) {
m_private->id[0] = fd;
}
} else {
m_mode = _mode;
// Setup callback thread.
m_private->threadRunning = true;
m_private->thread = new std11::thread(ossCallbackHandler, this);
if (m_private->thread == nullptr) {
m_private->threadRunning = false;
ATA_ERROR("creating callback thread!");
goto error;
}
}
return true;
error:
if (m_private->id[0] != nullptr) {
close(m_private->id[0]);
m_private->id[0] = nullptr;
}
if (m_private->id[1] != nullptr) {
close(m_private->id[1]);
m_private->id[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Oss::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state_stopped) {
m_private->runnable.notify_one();
}
m_mutex.unlock();
m_private->thread->join();
if (m_state == audio::orchestra::state_running) {
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
} else {
ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
}
m_state = audio::orchestra::state_stopped;
}
if (m_private->id[0] != nullptr) {
close(m_private->id[0]);
m_private->id[0] = nullptr;
}
if (m_private->id[1] != nullptr) {
close(m_private->id[1]);
m_private->id[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Oss::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state_running;
// No need to do anything else here ... OSS automatically starts
// when fed samples.
m_mutex.unlock();
m_private->runnable.notify_one();
}
enum audio::orchestra::error audio::orchestra::api::Oss::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
m_mutex.unlock();
return;
}
int32_t result = 0;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
// Flush the output with zeros a few times.
char *buffer;
int32_t samples;
audio::format format;
if (m_doConvertBuffer[0]) {
buffer = m_deviceBuffer;
samples = m_bufferSize * m_nDeviceChannels[0];
format = m_deviceFormat[0];
} else {
buffer = m_userBuffer[0];
samples = m_bufferSize * m_nUserChannels[0];
format = m_userFormat;
}
memset(buffer, 0, samples * audio::getFormatBytes(format));
for (uint32_t i=0; i<m_nBuffers+1; i++) {
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
if (result == -1) {
ATA_ERROR("audio write error.");
return audio::orchestra::error_warning;
}
}
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
m_private->triggered = false;
}
if ( m_mode == audio::orchestra::mode_input
|| ( m_mode == audio::orchestra::mode_duplex
&& m_private->id[0] != m_private->id[1])) {
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
}
unlock:
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
if (result != -1) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Oss::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
m_mutex.unlock();
return;
}
int32_t result = 0;
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
m_private->triggered = false;
}
if (m_mode == audio::orchestra::mode_input || (m_mode == audio::orchestra::mode_duplex && m_private->id[0] != m_private->id[1])) {
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
}
unlock:
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
if (result != -1) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
void audio::orchestra::api::Oss::callbackEvent() {
if (m_state == audio::orchestra::state_stopped) {
std11::unique_lock<std11::mutex> lck(m_mutex);
m_private->runnable.wait(lck);
if (m_state != audio::orchestra::state_running) {
return;
}
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return audio::orchestra::error_warning;
}
// Invoke user callback to get fresh output data.
int32_t doStopStream = 0;
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if ( m_mode != audio::orchestra::mode_input
&& m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
m_private->xrun[0] = false;
}
if ( m_mode != audio::orchestra::mode_output
&& m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
m_private->xrun[1] = false;
}
doStopStream = m_callback(m_userBuffer[1],
streamTime,
m_userBuffer[0],
streamTime,
m_bufferSize,
status);
if (doStopStream == 2) {
this->abortStream();
return;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
goto unlock;
}
int32_t result;
char *buffer;
int32_t samples;
audio::format format;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
// Setup parameters and do buffer conversion if necessary.
if (m_doConvertBuffer[0]) {
buffer = m_deviceBuffer;
convertBuffer(buffer, m_userBuffer[0], m_convertInfo[0]);
samples = m_bufferSize * m_nDeviceChannels[0];
format = m_deviceFormat[0];
} else {
buffer = m_userBuffer[0];
samples = m_bufferSize * m_nUserChannels[0];
format = m_userFormat;
}
// Do byte swapping if necessary.
if (m_doByteSwap[0]) {
byteSwapBuffer(buffer, samples, format);
}
if ( m_mode == audio::orchestra::mode_duplex
&& m_private->triggered == false) {
int32_t trig = 0;
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
trig = PCM_ENABLE_audio::orchestra::mode_input|PCM_ENABLE_audio::orchestra::mode_output;
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
m_private->triggered = true;
} else {
// Write samples to device.
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
}
if (result == -1) {
// We'll assume this is an underrun, though there isn't a
// specific means for determining that.
m_private->xrun[0] = true;
ATA_ERROR("audio write error.");
//error(audio::orchestra::error_warning);
// Continue on to input section.
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
// Setup parameters.
if (m_doConvertBuffer[1]) {
buffer = m_deviceBuffer;
samples = m_bufferSize * m_nDeviceChannels[1];
format = m_deviceFormat[1];
} else {
buffer = m_userBuffer[1];
samples = m_bufferSize * m_nUserChannels[1];
format = m_userFormat;
}
// Read samples from device.
result = read(m_private->id[1], buffer, samples * audio::getFormatBytes(format));
if (result == -1) {
// We'll assume this is an overrun, though there isn't a
// specific means for determining that.
m_private->xrun[1] = true;
ATA_ERROR("audio read error.");
goto unlock;
}
// Do byte swapping if necessary.
if (m_doByteSwap[1]) {
byteSwapBuffer(buffer, samples, format);
}
// Do buffer conversion if necessary.
if (m_doConvertBuffer[1]) {
convertBuffer(m_userBuffer[1], m_deviceBuffer, m_convertInfo[1]);
}
}
unlock:
m_mutex.unlock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
this->stopStream();
}
}
static void ossCallbackHandler(void* _userData) {
etk::thread::setName("OSS callback-" + m_name);
audio::orchestra::api::Alsa* myClass = reinterpret_cast<audio::orchestra::api::Oss*>(_userData);
while (myClass->m_private->threadRunning == true) {
myClass->callbackEvent();
}
}
#endif

View File

@ -1,51 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_OSS_H__) && defined(ORCHESTRA_BUILD_OSS)
#define __AUDIO_ORCHESTRA_API_OSS_H__
namespace audio {
namespace orchestra {
namespace api {
class OssPrivate;
class Oss: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
public:
Oss();
virtual ~Oss();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_oss;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std11::shared_ptr<OssPrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@ -8,20 +8,20 @@
#if defined(ORCHESTRA_BUILD_PULSE)
#include <unistd.h>
#include <limits.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
extern "C" {
#include <limits.h>
#include <stdio.h>
}
#include <audio/orchestra/Interface.hpp>
#include <audio/orchestra/debug.hpp>
#include <pulse/error.h>
#include <pulse/simple.h>
#include <cstdio>
#include <etk/thread/tools.h>
#include <ethread/tools.hpp>
#include <audio/orchestra/api/PulseDeviceList.hpp>
#include <audio/orchestra/api/Pulse.hpp>
#undef __class__
#define __class__ "api::Pulse"
audio::orchestra::Api* audio::orchestra::api::Pulse::create() {
return new audio::orchestra::api::Pulse();
ememory::SharedPtr<audio::orchestra::Api> audio::orchestra::api::Pulse::create() {
return ememory::SharedPtr<audio::orchestra::api::Pulse>(ETK_NEW(audio::orchestra::api::Pulse));
}
@ -53,15 +53,13 @@ namespace audio {
namespace api {
class PulsePrivate {
public:
pa_simple *s_play;
pa_simple *s_rec;
std11::shared_ptr<std11::thread> thread;
pa_simple* handle;
ememory::SharedPtr<ethread::Thread> thread;
bool threadRunning;
std11::condition_variable runnable_cv;
ethread::Semaphore m_semaphore;
bool runnable;
PulsePrivate() :
s_play(0),
s_rec(0),
handle(0),
threadRunning(false),
runnable(false) {
@ -71,45 +69,38 @@ namespace audio {
}
}
audio::orchestra::api::Pulse::Pulse() :
m_private(new audio::orchestra::api::PulsePrivate()) {
m_private(ETK_NEW(audio::orchestra::api::PulsePrivate)) {
}
audio::orchestra::api::Pulse::~Pulse() {
if (m_state != audio::orchestra::state_closed) {
if (m_state != audio::orchestra::state::closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Pulse::getDeviceCount() {
return 1;
#if 1
etk::Vector<audio::orchestra::DeviceInfo> list = audio::orchestra::api::pulse::getDeviceList();
return list.size();
#else
return 1;
#endif
}
audio::orchestra::DeviceInfo audio::orchestra::api::Pulse::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = true;
info.name = "PulseAudio";
info.outputChannels = 2;
info.inputChannels = 2;
info.duplexChannels = 2;
info.isDefaultOutput = true;
info.isDefaultInput = true;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
info.sampleRates.push_back(*sr);
etk::Vector<audio::orchestra::DeviceInfo> list = audio::orchestra::api::pulse::getDeviceList();
if (_device >= list.size()) {
ATA_ERROR("Request device out of IDs:" << _device << " >= " << list.size());
return audio::orchestra::DeviceInfo();
}
info.nativeFormats.push_back(audio::format_int16);
info.nativeFormats.push_back(audio::format_int32);
info.nativeFormats.push_back(audio::format_float);
return info;
return list[_device];
}
static void pulseaudio_callback(void* _userData) {
audio::orchestra::api::Pulse* myClass = reinterpret_cast<audio::orchestra::api::Pulse*>(_userData);
myClass->callbackEvent();
}
void audio::orchestra::api::Pulse::callbackEvent() {
etk::thread::setName("Pulse IO-" + m_name);
ethread::setName("Pulse IO-" + m_name);
while (m_private->threadRunning == true) {
callbackEventOneCycle();
}
@ -118,43 +109,40 @@ void audio::orchestra::api::Pulse::callbackEvent() {
enum audio::orchestra::error audio::orchestra::api::Pulse::closeStream() {
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
m_private->runnable = true;
m_private->runnable_cv.notify_one();;
m_private->m_semaphore.post();;
}
m_mutex.unlock();
m_mutex.unLock();
m_private->thread->join();
if (m_private->s_play) {
pa_simple_flush(m_private->s_play, nullptr);
pa_simple_free(m_private->s_play);
}
if (m_private->s_rec) {
pa_simple_free(m_private->s_rec);
if (m_mode == audio::orchestra::mode_output) {
pa_simple_flush(m_private->handle, null);
}
pa_simple_free(m_private->handle);
m_private->handle = null;
m_userBuffer[0].clear();
m_userBuffer[1].clear();
m_state = audio::orchestra::state_closed;
m_state = audio::orchestra::state::closed;
m_mode = audio::orchestra::mode_unknow;
return audio::orchestra::error_none;
}
void audio::orchestra::api::Pulse::callbackEventOneCycle() {
if (m_state == audio::orchestra::state_stopped) {
std11::unique_lock<std11::mutex> lck(m_mutex);
if (m_state == audio::orchestra::state::stopped) {
while (!m_private->runnable) {
m_private->runnable_cv.wait(lck);
m_private->m_semaphore.wait();
}
if (m_state != audio::orchestra::state_running) {
m_mutex.unlock();
if (m_state != audio::orchestra::state::running) {
m_mutex.unLock();
return;
}
}
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return;
}
audio::Time streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
etk::Vector<enum audio::orchestra::status> status;
int32_t doStopStream = m_callback(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
streamTime,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
@ -168,13 +156,12 @@ void audio::orchestra::api::Pulse::callbackEventOneCycle() {
m_mutex.lock();
void *pulse_in = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0];
void *pulse_out = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0];
if (m_state != audio::orchestra::state_running) {
goto unlock;
if (m_state != audio::orchestra::state::running) {
goto unLock;
}
int32_t pa_error;
size_t bytes;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_mode == audio::orchestra::mode_output) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]) {
convertBuffer(m_deviceBuffer,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
@ -183,18 +170,18 @@ void audio::orchestra::api::Pulse::callbackEventOneCycle() {
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_write(m_private->s_play, pulse_out, bytes, &pa_error) < 0) {
if (pa_simple_write(m_private->handle, pulse_out, bytes, &pa_error) < 0) {
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
return;
}
}
if (m_mode == audio::orchestra::mode_input || m_mode == audio::orchestra::mode_duplex) {
if (m_mode == audio::orchestra::mode_input) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_read(m_private->s_rec, pulse_in, bytes, &pa_error) < 0) {
if (pa_simple_read(m_private->handle, pulse_in, bytes, &pa_error) < 0) {
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
return;
}
@ -204,8 +191,8 @@ void audio::orchestra::api::Pulse::callbackEventOneCycle() {
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
}
}
unlock:
m_mutex.unlock();
unLock:
m_mutex.unLock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
stopStream();
@ -217,78 +204,82 @@ unlock:
enum audio::orchestra::error audio::orchestra::api::Pulse::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_running) {
if (m_state == audio::orchestra::state::running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state_running;
m_state = audio::orchestra::state::running;
m_private->runnable = true;
m_private->runnable_cv.notify_one();
m_mutex.unlock();
m_private->m_semaphore.post();
m_mutex.unLock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::stopStream() {
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
m_mutex.lock();
if (m_private->s_play) {
if ( m_private != null
&& m_private->handle != null
&& m_mode == audio::orchestra::mode_output) {
int32_t pa_error;
if (pa_simple_drain(m_private->s_play, &pa_error) < 0) {
if (pa_simple_drain(m_private->handle, &pa_error) < 0) {
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
m_mutex.unLock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
m_state = audio::orchestra::state::stopped;
m_mutex.unLock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::abortStream() {
if (m_state == audio::orchestra::state_closed) {
if (m_state == audio::orchestra::state::closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_stopped) {
if (m_state == audio::orchestra::state::stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
m_mutex.lock();
if (m_private && m_private->s_play) {
if ( m_private != null
&& m_private->handle != null
&& m_mode == audio::orchestra::mode_output) {
int32_t pa_error;
if (pa_simple_flush(m_private->s_play, &pa_error) < 0) {
if (pa_simple_flush(m_private->handle, &pa_error) < 0) {
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
m_mutex.unLock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
m_state = audio::orchestra::state::stopped;
m_mutex.unLock();
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
bool audio::orchestra::api::Pulse::open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
uint64_t bufferBytes = 0;
pa_sample_spec ss;
if (_device != 0) {
@ -362,7 +353,7 @@ bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
if (m_deviceBuffer == null) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
@ -376,15 +367,15 @@ bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
int32_t error;
switch (_mode) {
case audio::orchestra::mode_input:
m_private->s_rec = pa_simple_new(nullptr, "orchestra", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
if (!m_private->s_rec) {
m_private->handle = pa_simple_new(null, "orchestra", PA_STREAM_RECORD, null, "Record", &ss, null, null, &error);
if (m_private->handle == null) {
ATA_ERROR("error connecting input to PulseAudio server.");
goto error;
}
break;
case audio::orchestra::mode_output:
m_private->s_play = pa_simple_new(nullptr, "orchestra", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
if (!m_private->s_play) {
m_private->handle = pa_simple_new(null, "orchestra", PA_STREAM_PLAYBACK, null, "Playback", &ss, null, null, &error);
if (m_private->handle == null) {
ATA_ERROR("error connecting output to PulseAudio server.");
goto error;
}
@ -394,24 +385,22 @@ bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
}
if (m_mode == audio::orchestra::mode_unknow) {
m_mode = _mode;
} else if (m_mode == _mode) {
} else {
goto error;
}else {
m_mode = audio::orchestra::mode_duplex;
}
if (!m_private->threadRunning) {
if (m_private->threadRunning == false) {
m_private->threadRunning = true;
m_private->thread = std11::make_shared<std11::thread>(&pulseaudio_callback, this);
if (m_private->thread == nullptr) {
m_private->thread = ememory::makeShared<ethread::Thread>([&](){callbackEvent();}, "pulseCallback");
if (m_private->thread == null) {
ATA_ERROR("error creating thread.");
goto error;
}
}
m_state = audio::orchestra::state_stopped;
m_state = audio::orchestra::state::stopped;
return true;
error:
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
for (int32_t iii=0; iii<2; ++iii) {
m_userBuffer[iii].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);

View File

@ -4,10 +4,8 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_PULSE_H__) && defined(ORCHESTRA_BUILD_PULSE)
#define __AUDIO_ORCHESTRA_API_PULSE_H__
#pragma once
#ifdef ORCHESTRA_BUILD_PULSE
namespace audio {
namespace orchestra {
@ -15,12 +13,12 @@ namespace audio {
class PulsePrivate;
class Pulse: public audio::orchestra::Api {
public:
static audio::orchestra::Api* create();
static ememory::SharedPtr<audio::orchestra::Api> create();
public:
Pulse();
virtual ~Pulse();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_pulse;
const etk::String& getCurrentApi() {
return audio::orchestra::typePulse;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
@ -35,17 +33,17 @@ namespace audio {
void callbackEventOneCycle();
void callbackEvent();
private:
std11::shared_ptr<PulsePrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
ememory::SharedPtr<PulsePrivate> m_private;
etk::Vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
bool open(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}

View File

@ -0,0 +1,363 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_PULSE)
extern "C" {
#include <stdio.h>
#include <string.h>
}
#include <pulse/pulseaudio.h>
#include <audio/orchestra/api/PulseDeviceList.hpp>
#include <audio/orchestra/debug.hpp>
#include <audio/Time.hpp>
#include <audio/Duration.hpp>
#include <audio/format.hpp>
#include <etk/stdTools.hpp>
// This callback gets called when our context changes state. We really only
// care about when it's ready or if it has failed
static void callbackStateMachine(pa_context* _contex, void *_userdata) {
pa_context_state_t state;
int *pulseAudioReady = static_cast<int*>(_userdata);
state = pa_context_get_state(_contex);
switch (state) {
// There are just here for reference
case PA_CONTEXT_UNCONNECTED:
ATA_VERBOSE("pulse state: PA_CONTEXT_UNCONNECTED");
break;
case PA_CONTEXT_CONNECTING:
ATA_VERBOSE("pulse state: PA_CONTEXT_CONNECTING");
break;
case PA_CONTEXT_AUTHORIZING:
ATA_VERBOSE("pulse state: PA_CONTEXT_AUTHORIZING");
break;
case PA_CONTEXT_SETTING_NAME:
ATA_VERBOSE("pulse state: PA_CONTEXT_SETTING_NAME");
break;
default:
ATA_VERBOSE("pulse state: default");
break;
case PA_CONTEXT_FAILED:
*pulseAudioReady = 2;
ATA_VERBOSE("pulse state: PA_CONTEXT_FAILED");
break;
case PA_CONTEXT_TERMINATED:
*pulseAudioReady = 2;
ATA_VERBOSE("pulse state: PA_CONTEXT_TERMINATED");
break;
case PA_CONTEXT_READY:
*pulseAudioReady = 1;
ATA_VERBOSE("pulse state: PA_CONTEXT_READY");
break;
}
}
static audio::format getFormatFromPulseFormat(enum pa_sample_format _format) {
switch (_format) {
case PA_SAMPLE_U8:
return audio::format_int8;
break;
case PA_SAMPLE_ALAW:
ATA_ERROR("Not supported: uint8_t a-law");
return audio::format_unknow;
case PA_SAMPLE_ULAW:
ATA_ERROR("Not supported: uint8_t mu-law");
return audio::format_unknow;
case PA_SAMPLE_S16LE:
return audio::format_int16;
break;
case PA_SAMPLE_S16BE:
return audio::format_int16;
break;
case PA_SAMPLE_FLOAT32LE:
return audio::format_float;
break;
case PA_SAMPLE_FLOAT32BE:
return audio::format_float;
break;
case PA_SAMPLE_S32LE:
return audio::format_int32;
break;
case PA_SAMPLE_S32BE:
return audio::format_int32;
break;
case PA_SAMPLE_S24LE:
return audio::format_int24;
break;
case PA_SAMPLE_S24BE:
return audio::format_int24;
break;
case PA_SAMPLE_S24_32LE:
return audio::format_int24_on_int32;
break;
case PA_SAMPLE_S24_32BE:
return audio::format_int24_on_int32;
break;
case PA_SAMPLE_INVALID:
case PA_SAMPLE_MAX:
ATA_ERROR("Not supported: invalid");
return audio::format_unknow;
}
ATA_ERROR("Not supported: UNKNOW flag...");
return audio::format_unknow;
}
static etk::Vector<audio::channel> getChannelOrderFromPulseChannel(const struct pa_channel_map& _map) {
etk::Vector<audio::channel> out;
for (int32_t iii=0; iii<_map.channels; ++iii) {
switch(_map.map[iii]) {
default:
case PA_CHANNEL_POSITION_MAX:
case PA_CHANNEL_POSITION_INVALID:
out.pushBack(audio::channel_unknow);
break;
case PA_CHANNEL_POSITION_MONO:
case PA_CHANNEL_POSITION_FRONT_CENTER:
out.pushBack(audio::channel_frontCenter);
break;
case PA_CHANNEL_POSITION_FRONT_LEFT:
out.pushBack(audio::channel_frontLeft);
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT:
out.pushBack(audio::channel_frontRight);
break;
case PA_CHANNEL_POSITION_REAR_CENTER:
out.pushBack(audio::channel_rearCenter);
break;
case PA_CHANNEL_POSITION_REAR_LEFT:
out.pushBack(audio::channel_rearLeft);
break;
case PA_CHANNEL_POSITION_REAR_RIGHT:
out.pushBack(audio::channel_rearRight);
break;
case PA_CHANNEL_POSITION_LFE:
out.pushBack(audio::channel_lfe);
break;
case PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER:
out.pushBack(audio::channel_centerLeft);
break;
case PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER:
out.pushBack(audio::channel_centerRight);
break;
case PA_CHANNEL_POSITION_SIDE_LEFT:
out.pushBack(audio::channel_topCenterLeft);
break;
case PA_CHANNEL_POSITION_SIDE_RIGHT:
out.pushBack(audio::channel_topCenterRight);
break;
case PA_CHANNEL_POSITION_TOP_CENTER:
case PA_CHANNEL_POSITION_TOP_FRONT_CENTER:
out.pushBack(audio::channel_topFrontCenter);
break;
case PA_CHANNEL_POSITION_TOP_FRONT_LEFT:
out.pushBack(audio::channel_topFrontLeft);
break;
case PA_CHANNEL_POSITION_TOP_FRONT_RIGHT:
out.pushBack(audio::channel_topFrontRight);
break;
case PA_CHANNEL_POSITION_TOP_REAR_LEFT:
out.pushBack(audio::channel_topRearLeft);
break;
case PA_CHANNEL_POSITION_TOP_REAR_RIGHT:
out.pushBack(audio::channel_topRearRight);
break;
case PA_CHANNEL_POSITION_TOP_REAR_CENTER:
out.pushBack(audio::channel_topRearCenter);
break;
case PA_CHANNEL_POSITION_AUX0: out.pushBack(audio::channel_aux0); break;
case PA_CHANNEL_POSITION_AUX1: out.pushBack(audio::channel_aux1); break;
case PA_CHANNEL_POSITION_AUX2: out.pushBack(audio::channel_aux2); break;
case PA_CHANNEL_POSITION_AUX3: out.pushBack(audio::channel_aux3); break;
case PA_CHANNEL_POSITION_AUX4: out.pushBack(audio::channel_aux4); break;
case PA_CHANNEL_POSITION_AUX5: out.pushBack(audio::channel_aux5); break;
case PA_CHANNEL_POSITION_AUX6: out.pushBack(audio::channel_aux6); break;
case PA_CHANNEL_POSITION_AUX7: out.pushBack(audio::channel_aux7); break;
case PA_CHANNEL_POSITION_AUX8: out.pushBack(audio::channel_aux8); break;
case PA_CHANNEL_POSITION_AUX9: out.pushBack(audio::channel_aux9); break;
case PA_CHANNEL_POSITION_AUX10: out.pushBack(audio::channel_aux10); break;
case PA_CHANNEL_POSITION_AUX11: out.pushBack(audio::channel_aux11); break;
case PA_CHANNEL_POSITION_AUX12: out.pushBack(audio::channel_aux12); break;
case PA_CHANNEL_POSITION_AUX13: out.pushBack(audio::channel_aux13); break;
case PA_CHANNEL_POSITION_AUX14: out.pushBack(audio::channel_aux14); break;
case PA_CHANNEL_POSITION_AUX15: out.pushBack(audio::channel_aux15); break;
case PA_CHANNEL_POSITION_AUX16: out.pushBack(audio::channel_aux16); break;
case PA_CHANNEL_POSITION_AUX17: out.pushBack(audio::channel_aux17); break;
case PA_CHANNEL_POSITION_AUX18: out.pushBack(audio::channel_aux18); break;
case PA_CHANNEL_POSITION_AUX19: out.pushBack(audio::channel_aux19); break;
case PA_CHANNEL_POSITION_AUX20: out.pushBack(audio::channel_aux20); break;
case PA_CHANNEL_POSITION_AUX21: out.pushBack(audio::channel_aux21); break;
case PA_CHANNEL_POSITION_AUX22: out.pushBack(audio::channel_aux22); break;
case PA_CHANNEL_POSITION_AUX23: out.pushBack(audio::channel_aux23); break;
case PA_CHANNEL_POSITION_AUX24: out.pushBack(audio::channel_aux24); break;
case PA_CHANNEL_POSITION_AUX25: out.pushBack(audio::channel_aux25); break;
case PA_CHANNEL_POSITION_AUX26: out.pushBack(audio::channel_aux26); break;
case PA_CHANNEL_POSITION_AUX27: out.pushBack(audio::channel_aux27); break;
case PA_CHANNEL_POSITION_AUX28: out.pushBack(audio::channel_aux28); break;
case PA_CHANNEL_POSITION_AUX29: out.pushBack(audio::channel_aux29); break;
case PA_CHANNEL_POSITION_AUX30: out.pushBack(audio::channel_aux30); break;
case PA_CHANNEL_POSITION_AUX31: out.pushBack(audio::channel_aux31); break;
}
}
return out;
}
// Callback on getting data from pulseaudio:
static void callbackGetSinkList(pa_context* _contex, const pa_sink_info* _info, int _eol, void* _userdata) {
etk::Vector<audio::orchestra::DeviceInfo>* list = static_cast<etk::Vector<audio::orchestra::DeviceInfo>*>(_userdata);
// If eol is set to a positive number, you're at the end of the list
if (_eol > 0) {
return;
}
audio::orchestra::DeviceInfo info;
info.isCorrect = true;
info.input = false;
info.name = _info->name;
info.desc = _info->description;
info.sampleRates.pushBack(_info->sample_spec.rate);
info.nativeFormats.pushBack(getFormatFromPulseFormat(_info->sample_spec.format));
info.channels = getChannelOrderFromPulseChannel(_info->channel_map);
ATA_VERBOSE("plop=" << _info->index << " " << _info->name);
//ATA_DEBUG(" ports=" << _info->n_ports);
list->pushBack(info);
}
// allback to get data from pulseaudio:
static void callbackGetSourceList(pa_context* _contex, const pa_source_info* _info, int _eol, void* _userdata) {
etk::Vector<audio::orchestra::DeviceInfo>* list = static_cast<etk::Vector<audio::orchestra::DeviceInfo>*>(_userdata);
if (_eol > 0) {
return;
}
audio::orchestra::DeviceInfo info;
info.isCorrect = true;
info.input = true;
info.name = _info->name;
info.desc = _info->description;
info.sampleRates.pushBack(_info->sample_spec.rate);
info.nativeFormats.pushBack(getFormatFromPulseFormat(_info->sample_spec.format));
info.channels = getChannelOrderFromPulseChannel(_info->channel_map);
ATA_VERBOSE("plop=" << _info->index << " " << _info->name);
list->pushBack(info);
}
// to not update all the time ...
static etk::Vector<audio::orchestra::DeviceInfo> pulseAudioListOfDevice;
static audio::Time pulseAudioListOfDeviceTime;
etk::Vector<audio::orchestra::DeviceInfo> audio::orchestra::api::pulse::getDeviceList() {
audio::Duration delta = audio::Time::now() - pulseAudioListOfDeviceTime;
if (delta < audio::Duration(30,0)) {
return pulseAudioListOfDevice;
}
// Define our pulse audio loop and connection variables
pa_mainloop* pulseAudioMainLoop;
pa_mainloop_api* pulseAudioMainLoopAPI;
pa_operation* pulseAudioOperation;
pa_context* pulseAudioContex;
pa_context_flags_t pulseAudioFlags = PA_CONTEXT_NOAUTOSPAWN;
etk::Vector<audio::orchestra::DeviceInfo>& out = pulseAudioListOfDevice;
out.clear();
// We'll need these state variables to keep track of our requests
int state = 0;
int pulseAudioReady = 0;
// Create a mainloop API and connection to the default server
pulseAudioMainLoop = pa_mainloop_new();
pulseAudioMainLoopAPI = pa_mainloop_get_api(pulseAudioMainLoop);
pulseAudioContex = pa_context_new(pulseAudioMainLoopAPI, "orchestraPulseCount");
// If there's an error, the callback will set pulseAudioReady
pa_context_set_state_callback(pulseAudioContex, callbackStateMachine, &pulseAudioReady);
// This function connects to the pulse server
pa_context_connect(pulseAudioContex, NULL, pulseAudioFlags, NULL);
bool playLoop = true;
while (playLoop == true) {
// We can't do anything until PA is ready, so just iterate the mainloop
// and continue
if (pulseAudioReady == 0) {
pa_mainloop_iterate(pulseAudioMainLoop, 1, null);
continue;
}
// We couldn't get a connection to the server, so exit out
if (pulseAudioReady == 2) {
pa_context_disconnect(pulseAudioContex);
pa_context_unref(pulseAudioContex);
pa_mainloop_free(pulseAudioMainLoop);
ATA_ERROR("Pulse interface error: Can not connect to the pulseaudio iterface...");
return out;
}
// At this point, we're connected to the server and ready to make
// requests
switch (state) {
// State 0: we haven't done anything yet
case 0:
ATA_DEBUG("Request sink list");
pulseAudioOperation = pa_context_get_sink_info_list(pulseAudioContex,
callbackGetSinkList,
&out);
state++;
break;
case 1:
// Now we wait for our operation to complete. When it's
// complete our pa_output_devicelist is filled out, and we move
// along to the next state
if (pa_operation_get_state(pulseAudioOperation) == PA_OPERATION_DONE) {
pa_operation_unref(pulseAudioOperation);
ATA_DEBUG("Request sources list");
pulseAudioOperation = pa_context_get_source_info_list(pulseAudioContex,
callbackGetSourceList,
&out);
state++;
}
break;
case 2:
if (pa_operation_get_state(pulseAudioOperation) == PA_OPERATION_DONE) {
ATA_DEBUG("All is done");
// Now we're done, clean up and disconnect and return
pa_operation_unref(pulseAudioOperation);
pa_context_disconnect(pulseAudioContex);
pa_context_unref(pulseAudioContex);
pa_mainloop_free(pulseAudioMainLoop);
playLoop = false;
break;
}
break;
default:
// We should never see this state
ATA_ERROR("Error in getting the devices list ...");
return out;
}
// Iterate the main loop ..
if (playLoop == true) {
pa_mainloop_iterate(pulseAudioMainLoop, 1, null);
}
}
// TODO: need to do it better ...
// set default device:
int32_t idInput = -1;
int32_t idOutput = -1;
for (int32_t iii=0; iii<out.size(); ++iii) {
if (out[iii].input == true) {
if (idInput != -1) {
continue;
}
if (etk::end_with(out[iii].name, ".monitor", false) == false) {
idInput = iii;
out[iii].isDefault = true;
}
} else {
if (idOutput != -1) {
continue;
}
if (etk::end_with(out[iii].name, ".monitor", false) == false) {
idOutput = iii;
out[iii].isDefault = true;
}
}
}
return out;
}
#endif

View File

@ -0,0 +1,23 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifdef ORCHESTRA_BUILD_PULSE
#include <etk/types.hpp>
#include <audio/orchestra/DeviceInfo.hpp>
namespace audio {
namespace orchestra {
namespace api {
namespace pulse {
etk::Vector<audio::orchestra::DeviceInfo> getDeviceList();
}
}
}
}
#endif

View File

@ -1,30 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_CB_H__
#define __AUDIO_ORCHESTRA_CB_H__
#include <etk/thread.h>
#include <etk/condition_variable.h>
#include <etk/mutex.h>
#include <etk/chrono.h>
#include <etk/functional.h>
#include <etk/memory.h>
#include <audio/channel.h>
#include <audio/format.h>
#include <audio/orchestra/error.h>
#include <audio/orchestra/status.h>
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/CallbackInfo.h>
#include <audio/orchestra/DeviceInfo.h>
#include <audio/orchestra/StreamOptions.h>
#include <audio/orchestra/StreamParameters.h>
#endif

25
audio/orchestra/base.hpp Normal file
View File

@ -0,0 +1,25 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <ethread/Thread.hpp>
#include <ethread/Semaphore.hpp>
#include <ethread/Mutex.hpp>
#include <echrono/Steady.hpp>
#include <etk/Function.hpp>
#include <ememory/memory.hpp>
#include <audio/channel.hpp>
#include <audio/format.hpp>
#include <audio/orchestra/error.hpp>
#include <audio/orchestra/status.hpp>
#include <audio/orchestra/Flags.hpp>
#include <audio/orchestra/CallbackInfo.hpp>
#include <audio/orchestra/DeviceInfo.hpp>
#include <audio/orchestra/StreamOptions.hpp>
#include <audio/orchestra/StreamParameters.hpp>

View File

@ -5,9 +5,9 @@
* @fork from RTAudio
*/
#include <audio/orchestra/debug.h>
#include <audio/orchestra/debug.hpp>
int32_t audio::orchestra::getLogId() {
static int32_t g_val = etk::log::registerInstance("audio-orchestra");
static int32_t g_val = elog::registerInstance("audio-orchestra");
return g_val;
}

View File

@ -4,19 +4,18 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_DEBUG_H__
#define __AUDIO_ORCHESTRA_DEBUG_H__
#include <etk/log.h>
#include <elog/log.hpp>
namespace audio {
namespace orchestra {
int32_t getLogId();
}
}
#define ATA_BASE(info,data) TK_LOG_BASE(audio::orchestra::getLogId(),info,data)
#define ATA_BASE(info,data) ELOG_BASE(audio::orchestra::getLogId(),info,data)
#define ATA_PRINT(data) ATA_BASE(-1, data)
#define ATA_CRITICAL(data) ATA_BASE(1, data)
#define ATA_ERROR(data) ATA_BASE(2, data)
#define ATA_WARNING(data) ATA_BASE(3, data)
@ -40,5 +39,3 @@ namespace audio {
} \
} while (0)
#endif

View File

@ -5,5 +5,5 @@
* @fork from RTAudio
*/
#include <audio/orchestra/error.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/error.hpp>
#include <audio/orchestra/debug.hpp>

View File

@ -4,11 +4,9 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_ERROR_H__
#define __AUDIO_ORCHESTRA_ERROR_H__
#include <etk/types.h>
#include <etk/types.hpp>
namespace audio {
namespace orchestra {
@ -22,5 +20,3 @@ namespace audio {
};
}
}
#endif

View File

@ -5,8 +5,8 @@
* @fork from RTAudio
*/
#include <audio/orchestra/mode.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/mode.hpp>
#include <audio/orchestra/debug.hpp>
int32_t audio::orchestra::modeToIdTable(enum mode _mode) {
switch (_mode) {
@ -18,4 +18,22 @@ int32_t audio::orchestra::modeToIdTable(enum mode _mode) {
return 1;
}
return 0;
}
}
etk::Stream& audio::operator <<(etk::Stream& _os, enum audio::orchestra::mode _obj) {
switch (_obj) {
case audio::orchestra::mode_unknow:
_os << "unknow";
break;
case audio::orchestra::mode_duplex:
_os << "duplex";
break;
case audio::orchestra::mode_output:
_os << "output";
break;
case audio::orchestra::mode_input:
_os << "input";
break;
}
return _os;
}

View File

@ -4,12 +4,10 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_MODE_H__
#define __AUDIO_ORCHESTRA_MODE_H__
#include <etk/types.h>
#include <etk/types.hpp>
#include <etk/Stream.hpp>
namespace audio {
namespace orchestra {
@ -21,6 +19,6 @@ namespace audio {
};
int32_t modeToIdTable(enum mode _mode);
}
etk::Stream& operator <<(etk::Stream& _os, enum audio::orchestra::mode _obj);
}
#endif

View File

@ -4,22 +4,18 @@
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#ifndef __AUDIO_ORCHESTRA_STATE_H__
#define __AUDIO_ORCHESTRA_STATE_H__
#include <etk/types.h>
#include <etk/types.hpp>
namespace audio {
namespace orchestra {
enum state {
state_closed,
state_stopped,
state_stopping,
state_running
enum class state {
closed,
stopped,
stopping,
running
};
}
}
#endif

View File

@ -5,28 +5,28 @@
* @fork from RTAudio
*/
#include <audio/orchestra/status.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/status.hpp>
#include <audio/orchestra/debug.hpp>
static const char* listValue[] = {
"ok",
"overflow",
"underflow"
};
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::status _obj) {
_os << listValue[_obj];
etk::Stream& audio::orchestra::operator <<(etk::Stream& _os, enum audio::orchestra::status _obj) {
_os << listValue[int32_t(_obj)];
return _os;
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj) {
_os << std::string("{");
etk::Stream& audio::orchestra::operator <<(etk::Stream& _os, const etk::Vector<enum audio::orchestra::status>& _obj) {
_os << etk::String("{");
for (size_t iii=0; iii<_obj.size(); ++iii) {
if (iii!=0) {
_os << std::string(";");
_os << etk::String(";");
}
_os << _obj[iii];
}
_os << std::string("}");
_os << etk::String("}");
return _os;
}

View File

@ -1,26 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_STATUS_H__
#define __AUDIO_ORCHESTRA_STATUS_H__
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum status {
status_ok, //!< nothing...
status_overflow, //!< Internal buffer has more data than they can accept
status_underflow //!< The internal buffer is empty
};
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::status _obj);
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj);
}
}
#endif

View File

@ -0,0 +1,23 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.hpp>
#include <etk/Vector.hpp>
namespace audio {
namespace orchestra {
enum class status {
ok, //!< nothing...
overflow, //!< Internal buffer has more data than they can accept
underflow //!< The internal buffer is empty
};
etk::Stream& operator <<(etk::Stream& _os, enum audio::orchestra::status _obj);
etk::Stream& operator <<(etk::Stream& _os, const etk::Vector<enum audio::orchestra::status>& _obj);
}
}

View File

@ -5,69 +5,17 @@
* @fork from RTAudio
*/
#include <audio/orchestra/type.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
#include <audio/orchestra/type.hpp>
#include <audio/orchestra/debug.hpp>
#undef __class__
#define __class__ "type"
static const char* listType[] = {
"undefined",
"alsa",
"pulse",
"oss",
"jack",
"coreOSX",
"corIOS",
"asio",
"ds",
"java",
"dummy",
"user1",
"user2",
"user3",
"user4"
};
static int32_t listTypeSize = sizeof(listType)/sizeof(char*);
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const enum audio::orchestra::type& _obj) {
_os << listType[_obj];
return _os;
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::type>& _obj) {
_os << std::string("{");
for (size_t iii=0; iii<_obj.size(); ++iii) {
if (iii!=0) {
_os << std::string(";");
}
_os << _obj[iii];
}
_os << std::string("}");
return _os;
}
/*
template <enum audio::format> std::string to_string(const enum audio::format& _variable) {
return listType[_value];
}
*/
std::string audio::orchestra::getTypeString(enum audio::orchestra::type _value) {
return listType[_value];
}
enum audio::orchestra::type audio::orchestra::getTypeFromString(const std::string& _value) {
for (int32_t iii=0; iii<listTypeSize; ++iii) {
if (_value == listType[iii]) {
return static_cast<enum audio::orchestra::type>(iii);
}
}
if (_value == "auto") {
return audio::orchestra::type_undefined;
}
return audio::orchestra::type_undefined;
}
const etk::String audio::orchestra::typeUndefined = "undefined";
const etk::String audio::orchestra::typeAlsa = "alsa";
const etk::String audio::orchestra::typePulse = "pulse";
const etk::String audio::orchestra::typeOss = "oss";
const etk::String audio::orchestra::typeJack = "jack";
const etk::String audio::orchestra::typeCoreOSX = "coreOSX";
const etk::String audio::orchestra::typeCoreIOS = "coreIOS";
const etk::String audio::orchestra::typeAsio = "asio";
const etk::String audio::orchestra::typeDs = "ds";
const etk::String audio::orchestra::typeJava = "java";
const etk::String audio::orchestra::typeDummy = "dummy";

View File

@ -1,44 +0,0 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_TYPE_H__
#define __AUDIO_ORCHESTRA_TYPE_H__
#include <etk/types.h>
#include <etk/stdTools.h>
namespace audio {
namespace orchestra {
/**
* @brief Audio API specifier arguments.
*/
enum type {
type_undefined, //!< Error API.
type_alsa, //!< LINUX The Advanced Linux Sound Architecture.
type_pulse, //!< LINUX The Linux PulseAudio.
type_oss, //!< LINUX The Linux Open Sound System.
type_jack, //!< UNIX The Jack Low-Latency Audio Server.
type_coreOSX, //!< Macintosh OSX Core Audio.
type_coreIOS, //!< Macintosh iOS Core Audio.
type_asio, //!< WINDOWS The Steinberg Audio Stream I/O.
type_ds, //!< WINDOWS The Microsoft Direct Sound.
type_java, //!< ANDROID Interface.
type_dummy, //!< Empty wrapper (non-functional).
type_user1, //!< User interface 1.
type_user2, //!< User interface 2.
type_user3, //!< User interface 3.
type_user4, //!< User interface 4.
};
std::ostream& operator <<(std::ostream& _os, const enum audio::orchestra::type& _obj);
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::type>& _obj);
std::string getTypeString(enum audio::orchestra::type _value);
enum audio::orchestra::type getTypeFromString(const std::string& _value);
}
}
#endif

30
audio/orchestra/type.hpp Normal file
View File

@ -0,0 +1,30 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#pragma once
#include <etk/types.hpp>
#include <etk/stdTools.hpp>
namespace audio {
namespace orchestra {
/**
* @brief Audio API specifier arguments.
*/
extern const etk::String typeUndefined; //!< Error API.
extern const etk::String typeAlsa; //!< LINUX The Advanced Linux Sound Architecture.
extern const etk::String typePulse; //!< LINUX The Linux PulseAudio.
extern const etk::String typeOss; //!< LINUX The Linux Open Sound System.
extern const etk::String typeJack; //!< UNIX The Jack Low-Latency Audio Server.
extern const etk::String typeCoreOSX; //!< Macintosh OSX Core Audio.
extern const etk::String typeCoreIOS; //!< Macintosh iOS Core Audio.
extern const etk::String typeAsio; //!< WINDOWS The Steinberg Audio Stream I/O.
extern const etk::String typeDs; //!< WINDOWS The Microsoft Direct Sound.
extern const etk::String typeJava; //!< ANDROID Interface.
extern const etk::String typeDummy; //!< Empty wrapper (non-functional).
}
}

1
authors.txt Normal file
View File

@ -0,0 +1 @@
MR Edouard DUPIN <yui.heero@gmail.com>

View File

@ -1,91 +0,0 @@
cmake_minimum_required(VERSION 2.8.3)
project(audio_orchestra)
set(CMAKE_VERBOSE_MAKEFILE ON)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
etk
audio
)
find_package(ALSA REQUIRED)
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if you package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
INCLUDE_DIRS ../
LIBRARIES ${PROJECT_NAME}
CATKIN_DEPENDS etk audio
DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
..
${catkin_INCLUDE_DIRS}
)
## Declare a cpp library
add_library(${PROJECT_NAME}
../audio/orchestra/debug.cpp
../audio/orchestra/status.cpp
../audio/orchestra/type.cpp
../audio/orchestra/mode.cpp
../audio/orchestra/state.cpp
../audio/orchestra/error.cpp
../audio/orchestra/base.cpp
../audio/orchestra/Interface.cpp
../audio/orchestra/Flags.cpp
../audio/orchestra/Api.cpp
../audio/orchestra/DeviceInfo.cpp
../audio/orchestra/StreamOptions.cpp
../audio/orchestra/api/Dummy.cpp
../audio/orchestra/api/Alsa.cpp
../audio/orchestra/api/Jack.cpp
../audio/orchestra/api/Pulse.cpp
../audio/orchestra/api/Oss.cpp
)
add_definitions(-D__LINUX_ALSA__)
add_definitions(-D__DUMMY__)
## Add cmake target dependencies of the executable/library
## as an example, message headers may need to be generated before nodes
#add_dependencies(${PROJECT_NAME} test_perfo_core_generate_messages_cpp)
## Specify libraries to link a library or executable target against
target_link_libraries(${PROJECT_NAME}
${ALSA_LIBRARIES}
${catkin_LIBRARIES}
)
#############
## Install ##
#############
## Mark executables and/or libraries for installation
install(TARGETS ${PROJECT_NAME}
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
)
## Mark cpp header files for installation
install(DIRECTORY ../audio/orchestra/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
FILES_MATCHING PATTERN "*.h"
)

View File

@ -1,13 +0,0 @@
<?xml version="1.0"?>
<package>
<name>audio_orchestra</name>
<version>0.3.0</version>
<description>Ewol RTAudio fork</description>
<maintainer email="yui.heero@gmail.com">Edouard DUPIN</maintainer>
<license>Apache-2.0</license>
<build_depend>etk</build_depend>
<build_depend>audio</build_depend>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>etk</run_depend>
<run_depend>audio</run_depend>
</package>

83
doc/build.md Normal file
View File

@ -0,0 +1,83 @@
Build lib & build sample {#audio_orchestra_build}
========================
@tableofcontents
Download: {#audio_orchestra_build_download}
=========
ege use some tools to manage source and build it:
need google repo: {#audio_orchestra_build_download_repo}
-----------------
see: http://source.android.com/source/downloading.html#installing-repo
On all platform:
```{.sh}
mkdir ~/.bin
PATH=~/.bin:$PATH
curl https://storage.googleapis.com/git-repo-downloads/repo > ~/.bin/repo
chmod a+x ~/.bin/repo
```
On ubuntu
```{.sh}
sudo apt-get install repo
```
On archlinux
```{.sh}
sudo pacman -S repo
```
lutin (build-system): {#audio_orchestra_build_download_lutin}
---------------------
```{.sh}
pip install lutin --user
# optionnal dependency of lutin (manage image changing size for application release)
pip install pillow --user
```
dependency: {#audio_orchestra_build_download_dependency}
-----------
```{.sh}
mkdir -p WORKING_DIRECTORY/framework
cd WORKING_DIRECTORY/framework
repo init -u git://github.com/atria-soft/manifest.git
repo sync -j8
cd ../..
```
sources: {#audio_orchestra_build_download_sources}
--------
They are already download in the repo manifest in:
```{.sh}
cd WORKING_DIRECTORY/framework/atria-soft/audio_orchestra
```
Build: {#audio_orchestra_build_build}
======
you must stay in zour working directory...
```{.sh}
cd WORKING_DIRECTORY
```
library: {#audio_orchestra_build_build_library}
--------
```{.sh}
lutin -mdebug audio-orchestra
```
Sample: {#audio_orchestra_build_build_sample}
-------
No smaple availlable for now ...

50
doc/mainpage.md Normal file
View File

@ -0,0 +1,50 @@
AUDIO-ORCHESTRA library {#mainpage}
=======================
@tableofcontents
What is AUDIO-ORCHESTRA: {#audio_orchestra_mainpage_what}
========================
AUDIO-ORCHESTRA, is a fork of RTAudio lib (with port for Android, and IOs).
This is a cross API of Audio wrapping the Hardware.
This library is not friendy usable, use audio-river to have a correct and simple multiple-flow API
What it does: {#audio_orchestra_mainpage_what_it_does}
-------------
- Open a strem audio in Input or output
- synchronise 2 flow
- open on some platform: Android, Linux, MacOs, Ios, Windows
AUDIO-ORCHESTRA is dependent of the STL (compatible with MacOs stl (CXX))
What languages are supported? {#audio_orchestra_mainpage_language}
=============================
AUDIO-ORCHESTRA is written in C++.
Are there any licensing restrictions? {#audio_orchestra_mainpage_license_restriction}
=====================================
AUDIO-ORCHESTRA is **FREE software** and _all sub-library are FREE and staticly linkable !!!_
License (MIT) {#audio_orchestra_mainpage_license}
=============
Copyright AUDIO-ORCHESTRA Edouard DUPIN
MIT ...
Other pages {#audio_orchestra_mainpage_sub_page}
===========
- @ref audio_orchestra_build
- [**ewol coding style**](http://atria-soft.github.io/ewol/ewol_coding_style.html)

32
doxy_audio-orchestra.py Normal file
View File

@ -0,0 +1,32 @@
#!/usr/bin/python
import os
import doxy.module as module
import doxy.debug as debug
import doxy.tools as tools
def create(target, module_name):
my_module = module.Module(__file__, module_name)
my_module.set_version("version.txt")
my_module.set_title("Orchestra: audio interface wrapper")
my_module.set_website("http://musicdsp.github.io/" + module_name)
my_module.set_website_sources("http://github.com/musicdsp/" + module_name)
my_module.add_path([
"audio",
"doc"
])
my_module.add_depend([
'etk',
'audio',
])
my_module.add_exclude_symbols([
'*operator<<*',
])
my_module.add_exclude_file([
'debug.h',
])
my_module.add_file_patterns([
'*.h',
'*.md',
])
return my_module

182
lutin_audio-orchestra.py Normal file
View File

@ -0,0 +1,182 @@
#!/usr/bin/python
import lutin.tools as tools
import realog.debug as debug
def get_type():
return "LIBRARY"
def get_desc():
return "Generic wrapper on all audio interface"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return "authors.txt"
def get_version():
return "version.txt"
def configure(target, my_module):
my_module.add_src_file([
'audio/orchestra/debug.cpp',
'audio/orchestra/status.cpp',
'audio/orchestra/type.cpp',
'audio/orchestra/mode.cpp',
'audio/orchestra/state.cpp',
'audio/orchestra/error.cpp',
'audio/orchestra/base.cpp',
'audio/orchestra/Interface.cpp',
'audio/orchestra/Flags.cpp',
'audio/orchestra/Api.cpp',
'audio/orchestra/DeviceInfo.cpp',
'audio/orchestra/StreamOptions.cpp',
'audio/orchestra/api/Dummy.cpp'
])
my_module.add_header_file([
'audio/orchestra/debug.hpp',
'audio/orchestra/status.hpp',
'audio/orchestra/type.hpp',
'audio/orchestra/mode.hpp',
'audio/orchestra/state.hpp',
'audio/orchestra/error.hpp',
'audio/orchestra/base.hpp',
'audio/orchestra/Interface.hpp',
'audio/orchestra/Flags.hpp',
'audio/orchestra/Api.hpp',
'audio/orchestra/DeviceInfo.hpp',
'audio/orchestra/StreamOptions.hpp',
'audio/orchestra/CallbackInfo.hpp',
'audio/orchestra/StreamParameters.hpp'
])
my_module.add_depend([
'audio',
'etk'
])
# add all the time the dummy interface
my_module.add_flag('c++', ['-DORCHESTRA_BUILD_DUMMY'], export=True)
# TODO : Add a FILE interface:
if "Windows" in target.get_type():
my_module.add_src_file([
'audio/orchestra/api/Asio.cpp',
'audio/orchestra/api/Ds.cpp',
])
# load optionnal API:
my_module.add_optionnal_depend('asio', ["c++", "-DORCHESTRA_BUILD_ASIO"])
my_module.add_optionnal_depend('ds', ["c++", "-DORCHESTRA_BUILD_DS"])
my_module.add_optionnal_depend('wasapi', ["c++", "-DORCHESTRA_BUILD_WASAPI"])
elif "Linux" in target.get_type():
my_module.add_src_file([
'audio/orchestra/api/Alsa.cpp',
'audio/orchestra/api/Jack.cpp',
'audio/orchestra/api/Pulse.cpp',
'audio/orchestra/api/PulseDeviceList.cpp'
])
my_module.add_optionnal_depend('alsa', ["c++", "-DORCHESTRA_BUILD_ALSA"])
my_module.add_optionnal_depend('jack', ["c++", "-DORCHESTRA_BUILD_JACK"])
my_module.add_optionnal_depend('pulse', ["c++", "-DORCHESTRA_BUILD_PULSE"])
elif "MacOs" in target.get_type():
my_module.add_src_file([
'audio/orchestra/api/Core.cpp'
])
# MacOsX core
my_module.add_optionnal_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_MACOSX_CORE"])
elif "IOs" in target.get_type():
my_module.add_src_file('audio/orchestra/api/CoreIos.mm')
# IOsX core
my_module.add_optionnal_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_IOS_CORE"])
elif "Android" in target.get_type():
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraConstants.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraManagerCallback.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraNative.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraInterfaceInput.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraInterfaceOutput.java')
my_module.add_src_file('android/org/musicdsp/orchestra/OrchestraManager.java')
# create inter language interfacef
my_module.add_src_file('org.musicdsp.orchestra.OrchestraConstants.javah')
my_module.add_path('android', type='java')
my_module.add_depend(['SDK', 'jvm-basics', 'ejson'])
my_module.add_flag('c++', ['-DORCHESTRA_BUILD_JAVA'], export=True)
my_module.add_src_file('audio/orchestra/api/Android.cpp')
my_module.add_src_file('audio/orchestra/api/AndroidNativeInterface.cpp')
# add tre creator of the basic java class ...
target.add_action("BINARY", 11, "audio-orchestra-out-wrapper", tool_generate_add_java_section_in_class)
else:
debug.warning("unknow target for audio_orchestra : " + target.name);
my_module.add_path(".")
return True
##################################################################
##
## Android specific section
##
##################################################################
def tool_generate_add_java_section_in_class(target, module, package_name):
module.add_pkg("GENERATE_SECTION__IMPORT", [
"import org.musicdsp.orchestra.OrchestraManager;"
])
module.add_pkg("GENERATE_SECTION__DECLARE", [
"private OrchestraManager m_audioManagerHandle;"
])
module.add_pkg("GENERATE_SECTION__CONSTRUCTOR", [
"// load audio maneger if it does not work, it is not critical ...",
"try {",
" m_audioManagerHandle = new OrchestraManager();",
"} catch (RuntimeException e) {",
" Log.e(\"" + package_name + "\", \"Can not load Audio interface (maybe not really needed) :\" + e);",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_CREATE", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onCreate();",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_START", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onStart();",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_RESTART", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onRestart();",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_RESUME", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onResume();",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_PAUSE", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onPause();",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_STOP", [
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onStop();",
"}"
])
module.add_pkg("GENERATE_SECTION__ON_DESTROY", [
"// Destroy the AdView.",
"if (m_audioManagerHandle != null) {",
" m_audioManagerHandle.onDestroy();",
"}"
])

View File

@ -1,77 +0,0 @@
#!/usr/bin/python
import lutin.module as module
import lutin.tools as tools
import lutin.debug as debug
def get_desc():
return "audio_orchestra : Generic wrapper on all audio interface"
def create(target):
myModule = module.Module(__file__, 'audio_orchestra', 'LIBRARY')
myModule.add_src_file([
'audio/orchestra/debug.cpp',
'audio/orchestra/status.cpp',
'audio/orchestra/type.cpp',
'audio/orchestra/mode.cpp',
'audio/orchestra/state.cpp',
'audio/orchestra/error.cpp',
'audio/orchestra/base.cpp',
'audio/orchestra/Interface.cpp',
'audio/orchestra/Flags.cpp',
'audio/orchestra/Api.cpp',
'audio/orchestra/DeviceInfo.cpp',
'audio/orchestra/StreamOptions.cpp',
'audio/orchestra/api/Dummy.cpp'
])
myModule.add_module_depend(['audio', 'etk'])
# add all the time the dummy interface
myModule.add_export_flag('c++', ['-DORCHESTRA_BUILD_DUMMY'])
# TODO : Add a FILE interface:
if target.name=="Windows":
myModule.add_src_file([
'audio/orchestra/api/Asio.cpp',
'audio/orchestra/api/Ds.cpp',
])
# load optionnal API:
myModule.add_optionnal_module_depend('asio', ["c++", "-DORCHESTRA_BUILD_ASIO"])
myModule.add_optionnal_module_depend('ds', ["c++", "-DORCHESTRA_BUILD_DS"])
myModule.add_optionnal_module_depend('wasapi', ["c++", "-DORCHESTRA_BUILD_WASAPI"])
elif target.name=="Linux":
myModule.add_src_file([
'audio/orchestra/api/Alsa.cpp',
'audio/orchestra/api/Jack.cpp',
'audio/orchestra/api/Pulse.cpp',
'audio/orchestra/api/Oss.cpp'
])
myModule.add_optionnal_module_depend('alsa', ["c++", "-DORCHESTRA_BUILD_ALSA"])
myModule.add_optionnal_module_depend('jack', ["c++", "-DORCHESTRA_BUILD_JACK"])
myModule.add_optionnal_module_depend('pulse', ["c++", "-DORCHESTRA_BUILD_PULSE"])
myModule.add_optionnal_module_depend('oss', ["c++", "-DORCHESTRA_BUILD_OSS"])
elif target.name=="MacOs":
myModule.add_src_file([
'audio/orchestra/api/Core.cpp',
'audio/orchestra/api/Oss.cpp'
])
# MacOsX core
myModule.add_optionnal_module_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_MACOSX_CORE"])
elif target.name=="IOs":
myModule.add_src_file('audio/orchestra/api/CoreIos.mm')
# IOsX core
myModule.add_optionnal_module_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_IOS_CORE"])
elif target.name=="Android":
myModule.add_src_file('audio/orchestra/api/Android.cpp')
# specidic java interface for android:
myModule.add_optionnal_module_depend('ewolAndroidAudio', ["c++", "-DORCHESTRA_BUILD_JAVA"])
#myModule.add_module_depend(['ewol'])
else:
debug.warning("unknow target for audio_orchestra : " + target.name);
myModule.add_export_path(tools.get_current_path(__file__))
# add the currrent module at the
return myModule

View File

@ -0,0 +1,37 @@
#!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
def get_type():
return "BINARY"
def get_sub_type():
return "TOOLS"
def get_desc():
return "'in' tool for orchestra"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return ["Mr DUPIN Edouard <yui.heero@gmail.com>"]
def configure(target, my_module):
my_module.add_src_file([
'orchestra-in.cpp'
])
my_module.add_depend([
'audio-orchestra',
'test-debug'
])
return True

View File

@ -0,0 +1,37 @@
#!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
def get_type():
return "BINARY"
def get_sub_type():
return "TOOLS"
def get_desc():
return "'list' i/o tool for orchestra"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return ["Mr DUPIN Edouard <yui.heero@gmail.com>"]
def configure(target, my_module):
my_module.add_src_file([
'orchestra-list.cpp'
])
my_module.add_depend([
'audio-orchestra',
'test-debug'
])
return True

View File

@ -0,0 +1,37 @@
#!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
def get_type():
return "BINARY"
def get_sub_type():
return "TOOLS"
def get_desc():
return "'out' tool for orchestra"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return ["Mr DUPIN Edouard <yui.heero@gmail.com>"]
def configure(target, my_module):
my_module.add_src_file([
'orchestra-out.cpp'
])
my_module.add_depend([
'audio-orchestra',
'test-debug'
])
return True

28
tools/orchestra-in.cpp Normal file
View File

@ -0,0 +1,28 @@
/** @file
* @author Edouard DUPIN
* @copyright 2015, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <etk/etk.hpp>
#include <test-debug/debug.hpp>
#include <audio/orchestra/Interface.hpp>
int main(int _argc, const char **_argv) {
// the only one init for etk:
etk::init(_argc, _argv);
for (int32_t iii=0; iii<_argc ; ++iii) {
etk::String data = _argv[iii];
if ( data == "-h"
|| data == "--help") {
TEST_PRINT("Help : ");
TEST_PRINT(" ./xxx ---");
exit(0);
}
}
audio::orchestra::Interface interface;
TEST_PRINT("TODO : Need to write it");
return 0;
}

39
tools/orchestra-list.cpp Normal file
View File

@ -0,0 +1,39 @@
/** @file
* @author Edouard DUPIN
* @copyright 2015, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <etk/etk.hpp>
#include <test-debug/debug.hpp>
#include <audio/orchestra/Interface.hpp>
int main(int _argc, const char **_argv) {
// the only one init for etk:
etk::init(_argc, _argv);
for (int32_t iii=0; iii<_argc ; ++iii) {
etk::String data = _argv[iii];
if ( data == "-h"
|| data == "--help") {
TEST_PRINT("Help : ");
TEST_PRINT(" ./xxx ---");
exit(0);
}
}
audio::orchestra::Interface interface;
etk::Vector<etk::String> apis = interface.getListApi();
TEST_PRINT("Find : " << apis.size() << " apis.");
for (auto &it : apis) {
interface.instanciate(it);
TEST_PRINT("Device list for : '" << it << "'");
for (int32_t iii=0; iii<interface.getDeviceCount(); ++iii) {
audio::orchestra::DeviceInfo info = interface.getDeviceInfo(iii);
TEST_PRINT(" " << iii << " name :" << info.name);
info.display(2);
}
interface.clear();
}
return 0;
}

27
tools/orchestra-out.cpp Normal file
View File

@ -0,0 +1,27 @@
/** @file
* @author Edouard DUPIN
* @copyright 2015, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
*/
#include <etk/etk.hpp>
#include <test-debug/debug.hpp>
#include <audio/orchestra/Interface.hpp>
int main(int _argc, const char **_argv) {
// the only one init for etk:
etk::init(_argc, _argv);
for (int32_t iii=0; iii<_argc ; ++iii) {
etk::String data = _argv[iii];
if ( data == "-h"
|| data == "--help") {
TEST_PRINT("Help : ");
TEST_PRINT(" ./xxx ---");
exit(0);
}
}
audio::orchestra::Interface interface;
TEST_PRINT("TODO : Need to write it");
return 0;
}

1
version.txt Normal file
View File

@ -0,0 +1 @@
1.0.0