[DEV] generator work

This commit is contained in:
Edouard DUPIN 2015-03-16 21:03:23 +01:00
parent f6e2b2f1a1
commit c27f60d3af
13 changed files with 502 additions and 310 deletions

View File

@ -10,6 +10,7 @@ find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
audio_msg
message_generation
river
)

View File

@ -13,279 +13,9 @@
#include <sstream>
#include "debug.h"
#include <etk/stdTools.h>
#include <etk/tool.h>
ros::Time startTime;
/*
static const double delayTimeBeforRemoveOutput = 30.0; // 30 second delay befor removing output that not create sound.
class IOPorperty {
public:
float frequencyIn;
float frequencyOut;
std::vector<uint8_t> inputChannelMap;
std::vector<uint8_t> outputChannelMap;
};
IOPorperty audioProperties;
class InterfacePorperty {
public:
int64_t uid;
std::string descriptiveString;
float frequency;
std::vector<uint8_t> channelMap;
std::vector<int16_t> data;
double playTime;
double requestTime;
InterfacePorperty():
uid(0),
frequency(0),
playTime(0),
requestTime(0) {
static int64_t suid = 25;
uid = suid++;
}
};
std::vector<InterfacePorperty> listFlow;
*/
std11::mutex mutex;
/*
// RT audio input callback
int inoutInput(void* _outputBuffer,
void* _inputBuffer,
unsigned int _nBufferFrames,
double _streamTime,
RtAudioStreamStatus _status,
void* _data) {
IOPorperty* audioProperty = (IOPorperty*)_data;
// Since the number of input and output channels is equal, we can do
// a simple buffer copy operation here.
if (_status) {
ROS_ERROR("Stream overflow detected.");
}
// check if someone suscribe to the output:
if (stream_microphone.getNumSubscribers() > 0) {
//ROS_INFO("Publish data ... %d frames time %lf", _nBufferFrames, _streamTime);
audio_msg::AudioBuffer msg;
// create the Ros timestamp
msg.header.stamp = startTime + ros::Duration(_streamTime, (_streamTime-(int32_t)_streamTime)*1000000000.0);
// set message frequency
msg.frequency = audioProperty->frequencyIn;
// set channel mas properties
msg.channelMap = audioProperty->inputChannelMap;
// copy data:
msg.data.resize(_nBufferFrames*audioProperty->inputChannelMap.size());
memcpy(&msg.data[0], _inputBuffer, _nBufferFrames*audioProperty->inputChannelMap.size()*sizeof(int16_t));
// publish message
stream_microphone.publish(msg);
}
return 0;
}
// RT audio out callback
int inoutOutput(void* _outputBuffer,
void* _inputBuffer,
unsigned int _nBufferFrames,
double _streamTime,
RtAudioStreamStatus _status,
void* _data) {
IOPorperty* audioProperty = (IOPorperty*)_data;
// Since the number of input and output channels is equal, we can do
// a simple buffer copy operation here.
if (_status) {
ROS_ERROR("Stream underflow detected.");
}
std::vector<int32_t> output;
int32_t newsize = (int64_t)audioProperty->outputChannelMap.size()*(int64_t)_nBufferFrames;
//ROS_INFO("read %d*%d=%d ", audioProperty->outputChannelMap.size(), _nBufferFrames, newsize);
output.resize(newsize, 0);
mutex.lock();
std::vector<InterfacePorperty>::iterator it = listFlow.begin();
while (it != listFlow.end()) {
// copy data...
size_t availlable = it->data.size();
availlable = availlable<output.size()?availlable:output.size();
if (availlable > 0) {
for (size_t iii=0; iii<availlable; ++iii) {
output[iii] += it->data[iii];
}
//ROS_INFO("write %ld (better have : %ld)", availlable, output.size());
it->data.erase(it->data.begin(), it->data.begin()+availlable);
it->playTime = _streamTime;
} else {
ROS_INFO("[%d] underflow %d", (int)it->uid, (int)output.size());
if (it->playTime <= 0) {
double delay = it->playTime + _streamTime;
if (delay > delayTimeBeforRemoveOutput) {
ROS_ERROR("[%d] underflow ==> too much : Remove interface (after %lf s)", (int)it->uid, delay);
it = listFlow.erase(it);
continue;
}
} else {
it->playTime = -_streamTime;
}
}
++it;
}
mutex.unlock();
if (_outputBuffer == NULL) {
ROS_ERROR("NULL output");
return 0;
}
int16_t* outputPointer = (int16_t*)_outputBuffer;
for (size_t iii=0; iii<output.size(); ++iii) {
*outputPointer++ = output[iii]>32767?32767:(output[iii]<-32767?-32767:(int16_t)output[iii]);
}
// check if someone is connected
if (stream_speaker.getNumSubscribers() > 0) {
//ROS_INFO("Get data ... %d frames time %lf", _nBufferFrames, _streamTime);
audio_msg::AudioBuffer msg;
// Create the ROS message time
msg.header.stamp = startTime + ros::Duration(_streamTime, (_streamTime-(int32_t)_streamTime)*1000000000.0);
// set message frequency
msg.frequency = audioProperty->frequencyOut;
// set channel mas properties
msg.channelMap = audioProperty->outputChannelMap;
// copy data:
msg.data.resize(_nBufferFrames*audioProperty->outputChannelMap.size());
memcpy(&msg.data[0], _outputBuffer, _nBufferFrames*audioProperty->outputChannelMap.size()*sizeof(int16_t));
// publish message
stream_speaker.publish(msg);
}
return 0;
}
*/
bool f_create(audio_core::create::Request &req,
audio_core::create::Response &res) {
/*
InterfacePorperty newInterface;
newInterface.descriptiveString = req.typeFlow;
newInterface.frequency = req.frequency;
newInterface.channelMap = req.channelMap;
double requestTime = (double)req.stamp.sec + (double)req.stamp.nsec*0.0000000001;
newInterface.requestTime = requestTime;
newInterface.playTime = 0.0;
res.handle = newInterface.uid;
mutex.lock();
listFlow.push_back(newInterface);
mutex.unlock();
ROS_INFO("create : [%d] type : %s", (int)res.handle, req.typeFlow.c_str());
*/
return true;
}
bool f_remove(audio_core::remove::Request &req,
audio_core::remove::Response &res) {
/*
mutex.lock();
std::vector<InterfacePorperty>::iterator it = listFlow.begin();
while (it != listFlow.end()) {
if (it->uid == req.handle) {
it = listFlow.erase(it);
} else {
++it;
}
}
mutex.unlock();
ROS_INFO("remove : [%d]", (int)req.handle);
*/
return true;
}
bool f_write(audio_core::write::Request &req,
audio_core::write::Response &res) {
//ROS_INFO("write : [%ld] nbSample=%d", req.handle, (int32_t)req.data.size());
/*
mutex.lock();
std::vector<InterfacePorperty>::iterator it = listFlow.begin();
while (it != listFlow.end()) {
if (it->uid == req.handle) {
std::vector<int16_t> data = convert(it->channelMap, it->frequency, req.data, audioProperties.outputChannelMap, audioProperties.frequencyOut);
size_t oldSize = it->data.size();
it->data.resize(oldSize + data.size());
memcpy(&it->data[oldSize], &data[0], data.size()*sizeof(int16_t));
res.waitTime = (float)(it->data.size() / audioProperties.outputChannelMap.size()) / (float)audioProperties.frequencyOut * 1000000.0f;
if (res.waitTime > 200000) {
res.waitTime -= 200000;
} else {
res.waitTime = 0;
}
break;
}
++it;
}
mutex.unlock();
*/
return true;
}
bool f_getBufferTime(audio_core::getBufferTime::Request &req,
audio_core::getBufferTime::Response &res) {
//ROS_INFO("get Time: [%ld]", req.handle, (int32_t)req.data.size());
/*
mutex.lock();
std::vector<InterfacePorperty>::iterator it = listFlow.begin();
while (it != listFlow.end()) {
if (it->uid == req.handle) {
res.time = (float)(it->data.size() / audioProperties.outputChannelMap.size()) / (float)audioProperties.frequencyOut * 1000000.0f;
break;
}
++it;
}
mutex.unlock();
*/
return true;
}
/*
output.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT);
output.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT);
output.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_LEFT);
output.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_RIGHT);
output.push_back(audio_msg::AudioBuffer::CHANNEL_SURROUND_LEFT);
output.push_back(audio_msg::AudioBuffer::CHANNEL_SURROUND_RIGHT);
output.push_back(audio_msg::AudioBuffer::CHANNEL_LFE);
output.push_back(audio_msg::AudioBuffer::CHANNEL_SUBWOOFER);
*/
class InterfaceOutput {
public:
std::vector<audio::channel> m_channelMap;
std11::shared_ptr<river::Manager> m_manager;
std11::shared_ptr<river::Interface> m_interface;
public:
InterfaceOutput(std11::shared_ptr<river::Manager> _manager) :
m_manager(_manager) {
//Set stereo output:
m_channelMap.push_back(audio::channel_frontLeft);
m_channelMap.push_back(audio::channel_frontRight);
m_interface = m_manager->createOutput(48000,
m_channelMap,
audio::format_int16,
"speaker");
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
return;
}
m_interface->setReadwrite();
}
void start() {
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
return;
}
m_interface->start();
}
void stop() {
//m_interface->write(&data[0], data.size()/m_channelMap.size());
m_interface->stop();
}
};
static std11::mutex mutex;
class InterfaceInput {
public:
@ -294,7 +24,7 @@ class InterfaceInput {
ros::Publisher m_stream;
std11::mutex mutex;
public:
InterfaceInput(std11::shared_ptr<river::Manager> _manager, const std::string& _input="microphone", const std::string& _publisher="microphone") :
InterfaceInput(std11::shared_ptr<river::Manager> _manager, const std::string& _input="microphone", const std::string& _publisher="microphone", bool _feedback=false) :
m_manager(_manager) {
ros::NodeHandle nodeHandlePrivate("~");
m_stream = nodeHandlePrivate.advertise<audio_msg::AudioBuffer>(_publisher, 100);
@ -302,10 +32,17 @@ class InterfaceInput {
std::vector<audio::channel> channelMap;
channelMap.push_back(audio::channel_frontLeft);
channelMap.push_back(audio::channel_frontRight);
if (_feedback == false) {
m_interface = m_manager->createInput(48000,
channelMap,
audio::format_int16,
_input);
} else {
m_interface = m_manager->createFeedback(48000,
channelMap,
audio::format_int16,
_input);
}
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
return;
@ -340,34 +77,187 @@ class InterfaceInput {
}
// check if someone suscribe to the output:
if (m_stream.getNumSubscribers() > 0) {
//ROS_INFO("Publish data ... %d frames time %lf", _nBufferFrames, _streamTime);
audio_msg::AudioBuffer msg;
// create the Ros timestamp
msg.header.stamp = ros::Time::now();//_time;
// set message frequency
msg.frequency = _frequency;
// set channel mas properties
//msg.channelMap = audioProperty->inputChannelMap;
msg.channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT);
msg.channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT);
// set channel map properties
msg.channelMap = audio::convertChannel(_map);
// Set the format of flow
msg.channelFormat = audio::convertFormat(_format);
// copy data:
msg.data.resize(_nbChunk*_map.size());
memcpy(&msg.data[0], _data, _nbChunk*_map.size()*sizeof(int16_t));
msg.data.resize(_nbChunk*_map.size()*audio::getFormatBytes(_format));
// copy dat aon the ROS buffer interface
memcpy(&msg.data[0], _data, _nbChunk*_map.size()*audio::getFormatBytes(_format));
// publish message
m_stream.publish(msg);
}
}
void start() {
};
class InterfaceOutput {
public:
std11::shared_ptr<river::Manager> m_manager;
std11::shared_ptr<river::Interface> m_interface;
int64_t m_id;
public:
InterfaceOutput(std11::shared_ptr<river::Manager> _manager,
const std::string& _name,
enum audio::format _format,
uint32_t _frequency,
const std::vector<audio::channel>& _map) :
m_manager(_manager) {
static int64_t uid = etk::tool::irand(0, 56000);
m_id = uid++;
uid += etk::tool::irand(0, 10);
m_interface = m_manager->createOutput(_frequency,
_map,
_format,
"speaker");
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
m_id = -1;
return;
}
m_interface->setReadwrite();
m_interface->start();
}
~InterfaceOutput() {
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
return;
}
// wait 2 second ...
usleep(20000000);
m_interface->stop();
m_interface.reset();
}
int64_t getId() {
return m_id;
}
uint64_t write(const audio_core::writeRequest_<std::allocator<void> >::_data_type& _data) {
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
return 1000000;
}
m_interface->write(&_data[0], _data.size()/m_interface->getInterfaceFormat().getChunkSize());
// TODO : Do it better ...
return m_interface->getBufferFillSizeMicrosecond().count()*4/5;
}
uint64_t getTimeBuffer() {
if(m_interface == nullptr) {
APPL_ERROR("nullptr interface");
return 0;
}
return m_interface->getBufferFillSizeMicrosecond().count();
}
};
std11::shared_ptr<river::Manager> g_manager;
static std::vector<std11::shared_ptr<InterfaceOutput>> g_listInterafceOut;
bool f_create(audio_core::create::Request& _req,
audio_core::create::Response& _res) {
std11::shared_ptr<InterfaceOutput> newInterface;
newInterface = std11::make_shared<InterfaceOutput>(g_manager,
_req.flowName,
audio::convertFormat(_req.channelFormat),
_req.frequency,
audio::convertChannel(_req.channelMap));
if (newInterface == nullptr) {
_res.handle = -1;
return false;
}
_res.handle = newInterface->getId();
mutex.lock();
g_listInterafceOut.push_back(newInterface);
mutex.unlock();
APPL_INFO("create : [" << _res.handle << "] type: '" << _req.flowName << "'");
return true;
}
bool f_remove(audio_core::remove::Request& _req,
audio_core::remove::Response& _res) {
std11::shared_ptr<InterfaceOutput> interface;
mutex.lock();
for(size_t iii=0; iii<g_listInterafceOut.size(); ++iii) {
if (g_listInterafceOut[iii] == nullptr) {
continue;
}
if (g_listInterafceOut[iii]->getId() == _req.handle) {
interface = g_listInterafceOut[iii];
// remove handle in the list
g_listInterafceOut[iii].reset();
g_listInterafceOut.erase(g_listInterafceOut.begin() + iii);
break;
}
}
mutex.unlock();
if (interface == nullptr) {
APPL_ERROR("remove : [" << _req.handle << "] Can not remove this ==> already removed.");
return false;
}
// Remove interface :
APPL_INFO("remove : [" << _req.handle << "] (start)");
interface.reset();
APPL_INFO("remove : [" << _req.handle << "] (end)");
return true;
}
bool f_write(audio_core::write::Request& _req,
audio_core::write::Response& _res) {
std11::shared_ptr<InterfaceOutput> interface;
// reset ouput
_res.waitTime = 0;
mutex.lock();
// Find the handle:
for(size_t iii=0; iii<g_listInterafceOut.size(); ++iii) {
if (g_listInterafceOut[iii] == nullptr) {
continue;
}
if (g_listInterafceOut[iii]->getId() == _req.handle) {
interface = g_listInterafceOut[iii];
break;
}
}
mutex.unlock();
if (interface == nullptr) {
APPL_ERROR("write : [" << _req.handle << "] Can not write ==> handle does not exist...");
return false;
}
APPL_INFO("write : [" << _req.handle << "] (start)");
_res.waitTime = interface->write(_req.data);
APPL_INFO("write : [" << _req.handle << "] (end)");
return true;
}
bool f_getBufferTime(audio_core::getBufferTime::Request& _req,
audio_core::getBufferTime::Response& _res) {
std11::shared_ptr<InterfaceOutput> interface;
// reset ouput
_res.microseconds = 0;
mutex.lock();
// Find the handle:
for(size_t iii=0; iii<g_listInterafceOut.size(); ++iii) {
if (g_listInterafceOut[iii] == nullptr) {
continue;
}
if (g_listInterafceOut[iii]->getId() == _req.handle) {
interface = g_listInterafceOut[iii];
break;
}
}
mutex.unlock();
if (interface == nullptr) {
APPL_ERROR("getBufferTime : [" << _req.handle << "] Can not get time ==> handle does not exist...");
return false;
}
_res.microseconds = interface->getTimeBuffer();
return true;
}
/**
@ -400,9 +290,11 @@ int main(int _argc, char **_argv) {
ros::NodeHandle nodeHandlePrivate("~");
std11::shared_ptr<river::Manager> m_manager = river::Manager::create("ROS node");
std11::shared_ptr<InterfaceInput> m_input = std11::make_shared<InterfaceInput>(m_manager, "microphone", "mic");
g_manager = river::Manager::create("ROS node");
// start publishing of Microphone
std11::shared_ptr<InterfaceInput> m_input = std11::make_shared<InterfaceInput>(g_manager, "microphone", "microphone", false);
// start publishing of Speaker feedback
std11::shared_ptr<InterfaceInput> m_feedback = std11::make_shared<InterfaceInput>(g_manager, "speaker", "speaker", true);
/*
* A count of how many messages we have sent. This is used to create
@ -410,7 +302,8 @@ int main(int _argc, char **_argv) {
*/
ros::spin();
m_input.reset();
m_manager.reset();
m_feedback.reset();
g_manager.reset();
river::unInit();
return 0;

View File

@ -1,10 +1,14 @@
# decriptive flow type
string typeFlow
string flowName
# speaker timestamp playing request (0 to As soon as possible)
time stamp
# data frequency
int32 frequency
# channel order of the current buffer
uint8[] channelMap
# Channel format of the data
uint8 channelFormat
---
int64 handle

View File

@ -1,4 +1,4 @@
# audio interface UID
int64 handle
---
uint32 time
uint64 microseconds

View File

@ -1,3 +1,3 @@
int64 handle
---
int16[] data
uint8[] data

View File

@ -1,6 +1,6 @@
# audio interface UID
int64 handle
# interlaced data of the audio buffer
int16[] data
int8[] data
---
uint32 waitTime

View File

@ -1,19 +1,43 @@
#################################
# enum: channel order properties:
#################################
uint8 CHANNEL_UNKNOW=0
uint8 CHANNEL_FRONT_LEFT=1
uint8 CHANNEL_FRONT_CENTER=2
uint8 CHANNEL_FRONT_RIGHT=3
uint8 CHANNEL_REAR_LEFT=4
uint8 CHANNEL_REAR_CENTER=5
uint8 CHANNEL_REAR_RIGHT=6
uint8 CHANNEL_SURROUND_LEFT=7
uint8 CHANNEL_SURROUND_RIGHT=8
uint8 CHANNEL_SUBWOOFER=9
uint8 CHANNEL_LFE=10
#######################
# enum: Channel Format:
#######################
uint8 FORMAT_UNKNOW=0
uint8 FORMAT_INT8=1
uint8 FORMAT_INT8_ON_INT16=2
uint8 FORMAT_INT16=3
uint8 FORMAT_INT16_ON_INT32=4
uint8 FORMAT_INT24=5
uint8 FORMAT_INT32=6
uint8 FORMAT_INT32_ON_INT64=7
uint8 FORMAT_INT64=8
uint8 FORMAT_FLOAT=9
uint8 FORMAT_DOUBLE=10
##########
# Message:
##########
# timestanp the audio buffer
Header header
# current frequency of the audio interface
uint16 frequency
# channel order properties :
uint8 CHANNEL_FRONT_LEFT=0
uint8 CHANNEL_FRONT_CENTER=1
uint8 CHANNEL_FRONT_RIGHT=2
uint8 CHANNEL_REAR_LEFT=3
uint8 CHANNEL_REAR_CENTER=4
uint8 CHANNEL_REAR_RIGHT=5
uint8 CHANNEL_SURROUND_LEFT=6
uint8 CHANNEL_SURROUND_RIGHT=7
uint8 CHANNEL_SUBWOOFER=8
uint8 CHANNEL_LFE=9
# channel order of the current buffer
uint8[] channelMap
# Channel format of the data
uint8 channelFormat
# interlaced data of the audio buffer
int16[] data
uint8[] data

View File

@ -0,0 +1,58 @@
cmake_minimum_required(VERSION 2.8.3)
project(test_audio_generator)
set(CMAKE_VERBOSE_MAKEFILE ON)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
message_generation
)
###################################
## catkin specific configuration ##
###################################
catkin_package(
CATKIN_DEPENDS roscpp std_msgs
DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
${catkin_INCLUDE_DIRS}
)
## Declare a cpp executable
add_executable(${PROJECT_NAME}_node
src/main.cpp
)
target_link_libraries(${PROJECT_NAME}_node ${ALSA_LIBRARIES} pthread)
## Specify libraries to link a library or executable target against
target_link_libraries(${PROJECT_NAME}_node
${catkin_LIBRARIES}
)
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executables and/or libraries for installation
install(TARGETS ${PROJECT_NAME}_node
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)

View File

@ -0,0 +1,22 @@
<?xml version="1.0"?>
<package>
<name>test_audio_generator</name>
<version>0.0.1</version>
<description>Generic sinus generator</description>
<maintainer email="yui.heero@gmail.com">Edouard DUPIN</maintainer>
<license>Apache-2.0</license>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>message_generation</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>audio_core</build_depend>
<build_depend>audio_msg</build_depend>
<run_depend>message_runtime</run_depend>
<run_depend>roscpp</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>audio_core</run_depend>
<run_depend>audio_msg</run_depend>
</package>

View File

@ -0,0 +1,182 @@
#include "ros/ros.h"
#include "audio_msg/AudioBuffer.h"
#include "audio_core/create.h"
#include "audio_core/remove.h"
#include "audio_core/write.h"
#include "audio_core/getBufferTime.h"
#include <sstream>
FILE* filee = NULL;
/**
* This tutorial demonstrates simple receipt of messages over the ROS system.
*/
void audioCallback(const audio_msg::AudioBuffer::ConstPtr& msg) {
fwrite(&msg->data[0] , sizeof(int16_t), msg->data.size(), filee);
ROS_INFO("get message: freq=%d nbChannel=%u nbSample=%ld", (int32_t)msg->frequency, (int32_t)msg->channelMap.size(), msg->data.size());
}
int64_t audioInterface_create(ros::NodeHandle& _n, ros::Time _timeUs, int32_t _freq, const std::vector<uint8_t> _channelMap) {
ros::ServiceClient client = _n.serviceClient<audio_core::create>("create");
audio_core::create srv;
srv.request.stamp = _timeUs;
srv.request.frequency = _freq;
srv.request.channelMap = _channelMap;
srv.request.channelFormat = audio_msg::AudioBuffer::FORMAT_INT16;
if (client.call(srv)) {
ROS_INFO("uid: %ld", srv.response.handle);
return srv.response.handle;
} else {
ROS_ERROR("Failed to call service create");
return -1;
}
}
bool audioInterface_remove(ros::NodeHandle& _n, int64_t _uid) {
ros::ServiceClient client = _n.serviceClient<audio_core::remove>("remove");
audio_core::remove srv;
srv.request.handle = _uid;
if (client.call(srv)) {
ROS_INFO("remove uid: %ld", _uid);
return true;
} else {
ROS_ERROR("Failed to call service remove");
return false;
}
}
/**
* @brief return wait time
*/
uint32_t audioInterface_write(ros::NodeHandle& _n, int64_t _uid, const std::vector<int16_t>& _value) {
ros::ServiceClient client = _n.serviceClient<audio_core::write>("write");
audio_core::write srv;
srv.request.handle = _uid;
srv.request.data.resize(_value.size()*2);
memcpy(&srv.request.data[0], &_value[0], srv.request.data.size());
if (client.call(srv)) {
ROS_INFO("write need wait time : %d", srv.response.waitTime);
return srv.response.waitTime;
} else {
ROS_ERROR("Failed to call service write");
assert(0);
}
}
uint32_t audioInterface_getBufferTime(ros::NodeHandle& _n, int64_t _uid) {
ros::ServiceClient client = _n.serviceClient<audio_core::getBufferTime>("getBufferTime");
audio_core::getBufferTime srv;
srv.request.handle = _uid;
if (client.call(srv)) {
//ROS_INFO("write need wait time : %d", srv.response.time);
return srv.response.microseconds;
}
ROS_ERROR("Failed to call service getBufferTime");
return 0;
}
void usage() {
ROS_INFO("test_audio_generator usage [channel] [sampleRate] [nbChannels] [frequency] [timeToPlay]:");
ROS_INFO(" [channel] output to write data");
ROS_INFO(" [sampleRate] number of sample per second");
ROS_INFO(" [nbChannels] nb channel");
ROS_INFO(" [frequency] frequency to generate");
ROS_INFO(" [timeToPlay] time to generate signal");
ROS_INFO("ex");
ROS_INFO(" rosrun test_audio_generator test_audio_generator_node /audio/ 48000 2 440 10");
exit (-1);
}
int main(int argc, char **argv) {
if (argc != 6 ) {
ROS_ERROR ("not enought argument : %d", argc);
usage();
}
std::string p_channelToPlay = argv[1];
int32_t p_sampleRate = atoi(argv[2]);
int32_t p_nbChannels = atoi(argv[3]);
int32_t p_frequency = atoi(argv[4]);
int32_t p_timeToPlay = atoi(argv[5]);
sscanf(argv[2], "%d", &p_sampleRate);
sscanf(argv[3], "%d", &p_nbChannels);
sscanf(argv[4], "%d", &p_frequency);
sscanf(argv[5], "%d", &p_timeToPlay);
ROS_INFO(" rosrun test_audio_generator test_audio_generator_node %s %d %d %d %d", p_channelToPlay.c_str(), p_sampleRate, p_nbChannels, p_frequency, p_timeToPlay);
ros::init(argc, argv, "test_audio_generator");
ros::NodeHandle n;
double phase = 0;
// send data:
ros::Time timee = ros::Time();
int32_t baseDataSize = 0;
if (p_sampleRate <= 32000) {
baseDataSize = 1024*2;
} else {
baseDataSize = 1024*8;
}
std::vector<uint8_t> channelMap;
if (p_nbChannels == 1) {
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_CENTER);
} else if (p_nbChannels == 2) {
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT);
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT);
} else if (p_nbChannels == 3) {
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT);
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_CENTER);
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT);
} else if (p_nbChannels == 4) {
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT);
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT);
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_LEFT);
channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_RIGHT);
} else {
ROS_ERROR("nb chnnale supported error : %d not in [1,2,3,4]", p_nbChannels);
exit(-1);
}
// connect:
int64_t uid = audioInterface_create(n, timee, p_sampleRate, channelMap);
std::vector<int16_t> data;
data.resize(baseDataSize*channelMap.size());
double baseCycle = 2.0*M_PI/(double)p_sampleRate * (double)p_frequency;
int32_t generateTime = (p_timeToPlay * p_sampleRate) / baseDataSize;
for (int32_t kkk=0; kkk<generateTime; ++kkk) {
for (int32_t iii=0; iii<data.size()/channelMap.size(); iii++) {
for (int32_t jjj=0; jjj<channelMap.size(); jjj++) {
data[channelMap.size()*iii+jjj] = cos(phase) * 30000;
}
phase += baseCycle;
if (phase >= 2*M_PI) {
phase -= 2*M_PI;
}
}
//ROS_INFO("send data");
int32_t needSleep = audioInterface_write(n, uid, data);
if (needSleep > 0) {
ROS_INFO("need sleep %d", needSleep);
usleep(needSleep);
} else {
ROS_INFO("not sleep");
usleep(needSleep);
}
}
// wait end if playing :
usleep(200000);
uint32_t waitTime = audioInterface_getBufferTime(n, uid);
while (waitTime>0) {
ROS_INFO("wait end of playing ... %u us", waitTime);
usleep(waitTime/2);
waitTime = audioInterface_getBufferTime(n, uid);
}
// close:
audioInterface_remove(n, uid);
return 0;
}

View File

@ -11,6 +11,7 @@ find_package(catkin REQUIRED COMPONENTS
rospy
std_msgs
message_generation
audio
)
################################################

View File

@ -12,11 +12,13 @@
<build_depend>roscpp</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>audio_msg</build_depend>
<build_depend>audio</build_depend>
<run_depend>message_runtime</run_depend>
<run_depend>roscpp</run_depend>
<run_depend>std_msgs</run_depend>
<run_depend>audio_core</run_depend>
<run_depend>audio_msg</run_depend>
<run_depend>audio</run_depend>
</package>

View File

@ -1,5 +1,7 @@
#include "ros/ros.h"
#include "audio_msg/AudioBuffer.h"
#include <audio/format.h>
#include <audio/channel.h>
#include <sstream>
@ -7,9 +9,12 @@ FILE* filee = NULL;
/**
* This tutorial demonstrates simple receipt of messages over the ROS system.
*/
void audioCallback(const audio_msg::AudioBuffer::ConstPtr& msg) {
fwrite(&msg->data[0] , sizeof(int16_t), msg->data.size(), filee);
ROS_INFO_STREAM("get message: freq=" << msg->frequency << " nbChannel=" << msg->channelMap.size() << " nbSample=" << msg->data.size()/msg->channelMap.size());
void audioCallback(const audio_msg::AudioBuffer::ConstPtr& _msg) {
audio::format format = audio::convertFormat(_msg->channelFormat);
int32_t nbByteSample = audio::getFormatBytes(format);
std::vector<enum audio::channel> channel = audio::convertChannel(_msg->channelMap);
fwrite(&_msg->data[0], nbByteSample, _msg->data.size()/nbByteSample, filee);
ROS_INFO_STREAM("get message: freq=" << _msg->frequency << " nbChannel=" << channel << " nbSample=" << _msg->data.size()/(_msg->channelMap.size()*nbByteSample) << " format=" << format);
}
void usage() {