[DEV] first audio output for IOs

This commit is contained in:
Edouard DUPIN 2014-05-09 09:14:22 +02:00
parent f8b2271dff
commit b632782e5b
8 changed files with 601 additions and 24 deletions

View File

@ -25,6 +25,7 @@ std::ostream& operator <<(std::ostream& _os, const airtaudio::api::type& _obj){
case airtaudio::api::LINUX_OSS: _os << "LINUX_OSS"; break;
case airtaudio::api::UNIX_JACK: _os << "UNIX_JACK"; break;
case airtaudio::api::MACOSX_CORE: _os << "MACOSX_CORE"; break;
case airtaudio::api::IOS_CORE: _os << "IOS_CORE"; break;
case airtaudio::api::WINDOWS_ASIO: _os << "WINDOWS_ASIO"; break;
case airtaudio::api::WINDOWS_DS: _os << "WINDOWS_DS"; break;
case airtaudio::api::RTAUDIO_DUMMY: _os << "RTAUDIO_DUMMY"; break;

View File

@ -24,6 +24,7 @@ namespace airtaudio {
LINUX_OSS, //!< The Linux Open Sound System API.
UNIX_JACK, //!< The Jack Low-Latency Audio Server API.
MACOSX_CORE, //!< Macintosh OS-X Core Audio API.
IOS_CORE, //!< Macintosh OS-X Core Audio API.
WINDOWS_ASIO, //!< The Steinberg Audio Stream I/O API.
WINDOWS_DS, //!< The Microsoft Direct Sound API.
RTAUDIO_DUMMY, //!< A compilable but non-functional API.

View File

@ -64,7 +64,10 @@ airtaudio::Interface::Interface(void) :
addInterface(airtaudio::api::WINDOWS_DS, airtaudio::api::Ds::Create);
#endif
#if defined(__MACOSX_CORE__)
addInterface(airtaudio::api::MACOSX_CORE, airtaudio::api::Core::Create);
addInterface(airtaudio::api::MACOSX_CORE, airtaudio::api::Core::Create);
#endif
#if defined(__IOS_CORE__)
addInterface(airtaudio::api::IOS_CORE, airtaudio::api::CoreIos::Create);
#endif
#if defined(__ANDROID_JAVA__)
addInterface(airtaudio::api::ANDROID_JAVA, airtaudio::api::Android::Create);

View File

@ -19,6 +19,7 @@
#include <airtaudio/api/Android.h>
#include <airtaudio/api/Asio.h>
#include <airtaudio/api/Core.h>
#include <airtaudio/api/CoreIos.h>
#include <airtaudio/api/Ds.h>
#include <airtaudio/api/Dummy.h>
#include <airtaudio/api/Jack.h>

View File

@ -6,14 +6,11 @@
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_CORE_H__) && (defined(__MACOSX_CORE__) || defined(__IOS_CORE__))
#if !defined(__AIRTAUDIO_API_CORE_H__) && defined(__MACOSX_CORE__)
#define __AIRTAUDIO_API_CORE_H__
#if defined(__IOS_CORE__)
#include <CoreAudio/CoreAudioTypes.h>
#else
#include <CoreAudio/AudioHardware.h>
#endif
#include <CoreAudio/AudioHardware.h>
namespace airtaudio {
namespace api {
class Core: public airtaudio::Api {

55
airtaudio/api/CoreIos.h Normal file
View File

@ -0,0 +1,55 @@
/**
* @author Edouard DUPIN
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_CORE_IOS_H__) && defined(__IOS_CORE__)
#define __AIRTAUDIO_API_CORE_IOS_H__
namespace airtaudio {
namespace api {
class CoreIosPrivate;
class CoreIos: public airtaudio::Api {
public:
static airtaudio::Api* Create(void);
public:
CoreIos(void);
~CoreIos(void);
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::IOS_CORE;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
enum airtaudio::errorType closeStream(void);
enum airtaudio::errorType startStream(void);
enum airtaudio::errorType stopStream(void);
enum airtaudio::errorType abortStream(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent(void);
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo(void);
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
private:
void callBackEvent(void* _data,
int32_t _frameRate);
static void androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData);
CoreIosPrivate* m_private;
};
};
};
#endif

508
airtaudio/api/CoreIos.mm Normal file
View File

@ -0,0 +1,508 @@
/**
* @author Edouard DUPIN
*
* @license like MIT (see license file)
*/
#ifdef __IOS_CORE__
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#include <unistd.h>
#include <airtaudio/Interface.h>
#include <airtaudio/debug.h>
#include <limits.h>
airtaudio::Api* airtaudio::api::CoreIos::Create(void) {
ATA_INFO("Create CoreIos device ... ");
return new airtaudio::api::CoreIos();
}
// ============================================================================================================
@interface IosAudioController : NSObject {
AudioComponentInstance audioUnit;
AudioBuffer tempBuffer; // this will hold the latest data from the microphone
}
@property (readonly) AudioComponentInstance audioUnit;
@property (readonly) AudioBuffer tempBuffer;
- (void) start;
- (void) stop;
- (void) processAudio: (AudioBufferList*) bufferList;
@end
// ============================================================================================================
#define kOutputBus 0
#define kInputBus 1
IosAudioController* iosAudio;
void checkStatus(int status){
if (status) {
printf("Status not 0! %d\n", status);
// exit(1);
}
}
/**
This callback is called when new audio data from the microphone is
available.
*/
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Because of the way our audio format (setup below) is chosen:
// we only need 1 buffer, since it is mono
// Samples are 16 bits = 2 bytes.
// 1 frame includes only 1 sample
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
buffer.mData = malloc( inNumberFrames * 2 );
// Put buffer in a AudioBufferList
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender([iosAudio audioUnit],
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
checkStatus(status);
// Now, we have the samples we just read sitting in buffers in bufferList
// Process the new data
[iosAudio processAudio:&bufferList];
// release the malloc'ed data in the buffer we created earlier
free(bufferList.mBuffers[0].mData);
return noErr;
}
/**
This callback is called when the audioUnit needs new data to play through the
speakers. If you don't have any, just don't write anything in the buffers
*/
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how
// much data is in the buffer.
for (int i=0; i < ioData->mNumberBuffers; i++) { // in practice we will only ever have 1 buffer, since audio format is mono
AudioBuffer buffer = ioData->mBuffers[i];
// NSLog(@" Buffer %d has %d channels and wants %d bytes of data.", i, buffer.mNumberChannels, buffer.mDataByteSize);
// copy temporary buffer data to output buffer
UInt32 size = std::min(buffer.mDataByteSize, [iosAudio tempBuffer].mDataByteSize); // dont copy more data then we have, or then fits
memcpy(buffer.mData, [iosAudio tempBuffer].mData, size);
buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer
// uncomment to hear random noise
/*
UInt16 *frameBuffer = buffer.mData;
for (int j = 0; j < inNumberFrames; j++) {
frameBuffer[j] = rand();
}
*/
}
return noErr;
}
@implementation IosAudioController
@synthesize audioUnit, tempBuffer;
/**
Initialize the audioUnit and allocate our own temporary buffer.
The temporary buffer will hold the latest data coming in from the microphone,
and will be copied to the output when this is requested.
*/
- (id) init {
self = [super init];
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
// Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
// Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
tempBuffer.mNumberChannels = 1;
tempBuffer.mDataByteSize = 512 * 2;
tempBuffer.mData = malloc( 512 * 2 );
// Initialise
status = AudioUnitInitialize(audioUnit);
checkStatus(status);
return self;
}
/**
Start the audioUnit. This means data will be provided from
the microphone, and requested for feeding to the speakers, by
use of the provided callbacks.
*/
- (void) start {
ATA_INFO("Might start the stream ...");
OSStatus status = AudioOutputUnitStart(audioUnit);
checkStatus(status);
}
/**
Stop the audioUnit
*/
- (void) stop {
ATA_INFO("Might stop the stream ...");
OSStatus status = AudioOutputUnitStop(audioUnit);
checkStatus(status);
}
/**
Change this funtion to decide what is done with incoming
audio data from the microphone.
Right now we copy it to our own temporary buffer.
*/
- (void) processAudio: (AudioBufferList*) bufferList{
AudioBuffer sourceBuffer = bufferList->mBuffers[0];
// fix tempBuffer size if it's the wrong size
if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
free(tempBuffer.mData);
tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
}
// copy incoming audio data to temporary buffer
memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
}
/**
Clean up.
*/
- (void) dealloc {
[super dealloc];
AudioUnitUninitialize(audioUnit);
free(tempBuffer.mData);
}
@end
// ============================================================================================================
namespace airtaudio {
namespace api {
class CoreIosPrivate {
};
};
};
airtaudio::api::CoreIos::CoreIos(void) :
m_private(new airtaudio::api::CoreIosPrivate) {
ATA_INFO("new CoreIos");
int32_t deviceCount = 2;
ATA_ERROR("Get count devices : " << 2);
airtaudio::DeviceInfo tmp;
// Add default output format :
tmp.name = "out";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 2;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.nativeFormats = SINT16;
m_devices.push_back(tmp);
// add default input format:
tmp.name = "in";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 0;
tmp.inputChannels = 2;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.nativeFormats = SINT16;
m_devices.push_back(tmp);
ATA_INFO("Create CoreIOs interface (end)");
}
airtaudio::api::CoreIos::~CoreIos(void) {
ATA_INFO("Destroy CoreIOs interface");
delete m_private;
m_private = NULL;
}
uint32_t airtaudio::api::CoreIos::getDeviceCount(void) {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
airtaudio::DeviceInfo airtaudio::api::CoreIos::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum airtaudio::errorType airtaudio::api::CoreIos::closeStream(void) {
ATA_INFO("Close Stream");
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::CoreIos::startStream(void) {
ATA_INFO("Start Stream");
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::CoreIos::stopStream(void) {
ATA_INFO("Stop stream");
//ewol::Context& tmpContext = ewol::getContext();
//tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return airtaudio::errorNone;
}
enum airtaudio::errorType airtaudio::api::CoreIos::abortStream(void) {
ATA_INFO("Abort Stream");
[iosAudio stop];
//ewol::Context& tmpContext = ewol::getContext();
//tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return airtaudio::errorNone;
}
void airtaudio::api::CoreIos::callBackEvent(void* _data,
int32_t _frameRate) {
/*
int32_t doStopStream = 0;
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
if (m_stream.doConvertBuffer[OUTPUT] == true) {
doStopStream = callback(m_stream.userBuffer[OUTPUT],
NULL,
_frameRate,
streamTime,
status,
m_stream.callbackInfo.userData);
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
} else {
doStopStream = callback(_data,
NULL,
_frameRate,
streamTime,
status,
m_stream.callbackInfo.userData);
}
if (doStopStream == 2) {
abortStream();
return;
}
airtaudio::Api::tickStreamTime();
*/
}
void airtaudio::api::CoreIos::androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData) {
/*
if (_userData == NULL) {
ATA_INFO("callback event ... NULL pointer");
return;
}
airtaudio::api::Android* myClass = static_cast<airtaudio::api::Android*>(_userData);
myClass->callBackEvent(_data, _frameRate/2);
*/
}
bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != OUTPUT) {
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
return false;
}
bool ret = true;
iosAudio = [[IosAudioController alloc] init];
[iosAudio start];
/*
m_stream.userFormat = _format;
m_stream.nUserChannels[_mode] = _channels;
ewol::Context& tmpContext = ewol::getContext();
if (_format == SINT8) {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
} else {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
}
m_stream.bufferSize = 256;
m_stream.sampleRate = _sampleRate;
m_stream.doByteSwap[_mode] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to bu update later ...
m_stream.deviceFormat[_mode] = SINT16;
m_stream.nDeviceChannels[_mode] = 2;
m_stream.deviceInterleaved[_mode] = true;
m_stream.doConvertBuffer[_mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
m_stream.doConvertBuffer[_mode] = true;
}
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
&& m_stream.nUserChannels[_mode] > 1) {
m_stream.doConvertBuffer[_mode] = true;
}
if (m_stream.doConvertBuffer[_mode] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[_mode] == NULL) {
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
*/
return ret;
}
#endif

View File

@ -15,24 +15,27 @@ def create(target):
'airtaudio/base.cpp',
'airtaudio/Interface.cpp',
'airtaudio/Api.cpp',
'airtaudio/api/Alsa.cpp',
'airtaudio/api/Android.cpp',
'airtaudio/api/Asio.cpp',
'airtaudio/api/Core.cpp',
'airtaudio/api/Ds.cpp',
'airtaudio/api/Dummy.cpp',
'airtaudio/api/Jack.cpp',
'airtaudio/api/Oss.cpp',
'airtaudio/api/Pulse.cpp'
])
myModule.add_export_flag_CC(['-D__AIRTAUDIO_API_DUMMY_H__'])
if target.name=="Windows":
myModule.add_src_file([
'airtaudio/api/Asio.cpp',
'airtaudio/api/Ds.cpp',
])
# ASIO API on Windows
myModule.add_export_flag_CC(['__WINDOWS_ASIO__'])
# Windows DirectSound API
#myModule.add_export_flag_CC(['__WINDOWS_DS__'])
myModule.add_module_depend(['etk'])
elif target.name=="Linux":
myModule.add_src_file([
'airtaudio/api/Alsa.cpp',
'airtaudio/api/Jack.cpp',
'airtaudio/api/Pulse.cpp',
'airtaudio/api/Oss.cpp'
])
# Linux Alsa API
#myModule.add_export_flag_CC(['-D__LINUX_ALSA__'])
#myModule.add_export_flag_LD("-lasound")
@ -43,28 +46,36 @@ def create(target):
myModule.add_export_flag_CC(['-D__LINUX_PULSE__'])
myModule.add_export_flag_LD("-lpulse-simple")
myModule.add_export_flag_LD("-lpulse")
myModule.add_export_flag_CC(['-D__LINUX_OSS__'])
# ...
myModule.add_module_depend(['etk'])
elif target.name=="MacOs":
myModule.add_src_file([
'airtaudio/api/Core.cpp',
'airtaudio/api/Oss.cpp'
])
# MacOsX core
myModule.add_export_flag_CC(['-D__MACOSX_CORE__'])
myModule.add_export_flag_LD("-framework CoreAudio")
myModule.add_module_depend(['etk'])
elif target.name=="IOs":
myModule.add_src_file('airtaudio/api/CoreIos.mm')
# IOsX core
#myModule.add_export_flag_CC(['-D__IOS_CORE__'])
#myModule.add_export_flag_LD("-framework CoreAudio")
pass
myModule.add_export_flag_CC(['-D__IOS_CORE__'])
myModule.add_export_flag_LD("-framework CoreAudio")
myModule.add_export_flag_LD("-framework AudioToolbox")
myModule.add_module_depend(['etk'])
elif target.name=="Android":
myModule.add_src_file('airtaudio/api/Android.cpp')
# MacOsX core
myModule.add_export_flag_CC(['-D__ANDROID_JAVA__'])
myModule.add_module_depend(['ewol'])
else:
debug.warning("unknow target for RTAudio : " + target.name);
debug.warning("unknow target for AIRTAudio : " + target.name);
myModule.add_export_path(tools.get_current_path(__file__))
myModule.add_path(tools.get_current_path(__file__)+"/rtaudio/")
myModule.add_path(tools.get_current_path(__file__)+"/rtaudio/include/")
# TODO : Remove this when debug will be unified ...
# name of the dependency
myModule.add_module_depend(['ewol'])
# add the currrent module at the
return myModule