diff --git a/audio_core/CMakeLists.txt b/audio_core/CMakeLists.txt index 7637dee..2069667 100644 --- a/audio_core/CMakeLists.txt +++ b/audio_core/CMakeLists.txt @@ -10,6 +10,7 @@ find_package(catkin REQUIRED COMPONENTS roscpp rospy std_msgs + audio_msg message_generation river ) diff --git a/audio_core/src/main.cpp b/audio_core/src/main.cpp index b5f6dcd..b4405dd 100644 --- a/audio_core/src/main.cpp +++ b/audio_core/src/main.cpp @@ -13,279 +13,9 @@ #include #include "debug.h" #include +#include -ros::Time startTime; -/* -static const double delayTimeBeforRemoveOutput = 30.0; // 30 second delay befor removing output that not create sound. - -class IOPorperty { - public: - float frequencyIn; - float frequencyOut; - std::vector inputChannelMap; - std::vector outputChannelMap; -}; -IOPorperty audioProperties; - - -class InterfacePorperty { - public: - int64_t uid; - std::string descriptiveString; - float frequency; - std::vector channelMap; - std::vector data; - double playTime; - double requestTime; - InterfacePorperty(): - uid(0), - frequency(0), - playTime(0), - requestTime(0) { - static int64_t suid = 25; - uid = suid++; - } -}; - -std::vector listFlow; -*/ -std11::mutex mutex; -/* -// RT audio input callback -int inoutInput(void* _outputBuffer, - void* _inputBuffer, - unsigned int _nBufferFrames, - double _streamTime, - RtAudioStreamStatus _status, - void* _data) { - IOPorperty* audioProperty = (IOPorperty*)_data; - // Since the number of input and output channels is equal, we can do - // a simple buffer copy operation here. - if (_status) { - ROS_ERROR("Stream overflow detected."); - } - // check if someone suscribe to the output: - if (stream_microphone.getNumSubscribers() > 0) { - //ROS_INFO("Publish data ... %d frames time %lf", _nBufferFrames, _streamTime); - audio_msg::AudioBuffer msg; - // create the Ros timestamp - msg.header.stamp = startTime + ros::Duration(_streamTime, (_streamTime-(int32_t)_streamTime)*1000000000.0); - // set message frequency - msg.frequency = audioProperty->frequencyIn; - // set channel mas properties - msg.channelMap = audioProperty->inputChannelMap; - // copy data: - msg.data.resize(_nBufferFrames*audioProperty->inputChannelMap.size()); - memcpy(&msg.data[0], _inputBuffer, _nBufferFrames*audioProperty->inputChannelMap.size()*sizeof(int16_t)); - // publish message - stream_microphone.publish(msg); - } - return 0; -} - -// RT audio out callback -int inoutOutput(void* _outputBuffer, - void* _inputBuffer, - unsigned int _nBufferFrames, - double _streamTime, - RtAudioStreamStatus _status, - void* _data) { - IOPorperty* audioProperty = (IOPorperty*)_data; - // Since the number of input and output channels is equal, we can do - // a simple buffer copy operation here. - if (_status) { - ROS_ERROR("Stream underflow detected."); - } - std::vector output; - int32_t newsize = (int64_t)audioProperty->outputChannelMap.size()*(int64_t)_nBufferFrames; - //ROS_INFO("read %d*%d=%d ", audioProperty->outputChannelMap.size(), _nBufferFrames, newsize); - output.resize(newsize, 0); - mutex.lock(); - std::vector::iterator it = listFlow.begin(); - while (it != listFlow.end()) { - // copy data... - size_t availlable = it->data.size(); - availlable = availlable 0) { - for (size_t iii=0; iiidata[iii]; - } - //ROS_INFO("write %ld (better have : %ld)", availlable, output.size()); - it->data.erase(it->data.begin(), it->data.begin()+availlable); - it->playTime = _streamTime; - } else { - ROS_INFO("[%d] underflow %d", (int)it->uid, (int)output.size()); - if (it->playTime <= 0) { - double delay = it->playTime + _streamTime; - if (delay > delayTimeBeforRemoveOutput) { - ROS_ERROR("[%d] underflow ==> too much : Remove interface (after %lf s)", (int)it->uid, delay); - it = listFlow.erase(it); - continue; - } - } else { - it->playTime = -_streamTime; - } - } - ++it; - } - mutex.unlock(); - if (_outputBuffer == NULL) { - ROS_ERROR("NULL output"); - return 0; - } - int16_t* outputPointer = (int16_t*)_outputBuffer; - for (size_t iii=0; iii32767?32767:(output[iii]<-32767?-32767:(int16_t)output[iii]); - } - // check if someone is connected - if (stream_speaker.getNumSubscribers() > 0) { - //ROS_INFO("Get data ... %d frames time %lf", _nBufferFrames, _streamTime); - audio_msg::AudioBuffer msg; - // Create the ROS message time - msg.header.stamp = startTime + ros::Duration(_streamTime, (_streamTime-(int32_t)_streamTime)*1000000000.0); - // set message frequency - msg.frequency = audioProperty->frequencyOut; - // set channel mas properties - msg.channelMap = audioProperty->outputChannelMap; - // copy data: - msg.data.resize(_nBufferFrames*audioProperty->outputChannelMap.size()); - memcpy(&msg.data[0], _outputBuffer, _nBufferFrames*audioProperty->outputChannelMap.size()*sizeof(int16_t)); - // publish message - stream_speaker.publish(msg); - } - return 0; -} -*/ - -bool f_create(audio_core::create::Request &req, - audio_core::create::Response &res) { - /* - InterfacePorperty newInterface; - newInterface.descriptiveString = req.typeFlow; - newInterface.frequency = req.frequency; - newInterface.channelMap = req.channelMap; - double requestTime = (double)req.stamp.sec + (double)req.stamp.nsec*0.0000000001; - newInterface.requestTime = requestTime; - newInterface.playTime = 0.0; - res.handle = newInterface.uid; - mutex.lock(); - listFlow.push_back(newInterface); - mutex.unlock(); - ROS_INFO("create : [%d] type : %s", (int)res.handle, req.typeFlow.c_str()); - */ - return true; -} - -bool f_remove(audio_core::remove::Request &req, - audio_core::remove::Response &res) { - /* - mutex.lock(); - std::vector::iterator it = listFlow.begin(); - while (it != listFlow.end()) { - if (it->uid == req.handle) { - it = listFlow.erase(it); - } else { - ++it; - } - } - mutex.unlock(); - ROS_INFO("remove : [%d]", (int)req.handle); - */ - return true; -} - - -bool f_write(audio_core::write::Request &req, - audio_core::write::Response &res) { - //ROS_INFO("write : [%ld] nbSample=%d", req.handle, (int32_t)req.data.size()); - /* - mutex.lock(); - std::vector::iterator it = listFlow.begin(); - while (it != listFlow.end()) { - if (it->uid == req.handle) { - std::vector data = convert(it->channelMap, it->frequency, req.data, audioProperties.outputChannelMap, audioProperties.frequencyOut); - size_t oldSize = it->data.size(); - it->data.resize(oldSize + data.size()); - memcpy(&it->data[oldSize], &data[0], data.size()*sizeof(int16_t)); - res.waitTime = (float)(it->data.size() / audioProperties.outputChannelMap.size()) / (float)audioProperties.frequencyOut * 1000000.0f; - if (res.waitTime > 200000) { - res.waitTime -= 200000; - } else { - res.waitTime = 0; - } - break; - } - ++it; - } - mutex.unlock(); - */ - return true; -} - -bool f_getBufferTime(audio_core::getBufferTime::Request &req, - audio_core::getBufferTime::Response &res) { - //ROS_INFO("get Time: [%ld]", req.handle, (int32_t)req.data.size()); - /* - mutex.lock(); - std::vector::iterator it = listFlow.begin(); - while (it != listFlow.end()) { - if (it->uid == req.handle) { - res.time = (float)(it->data.size() / audioProperties.outputChannelMap.size()) / (float)audioProperties.frequencyOut * 1000000.0f; - break; - } - ++it; - } - mutex.unlock(); - */ - return true; -} -/* - output.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT); - output.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT); - output.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_LEFT); - output.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_RIGHT); - output.push_back(audio_msg::AudioBuffer::CHANNEL_SURROUND_LEFT); - output.push_back(audio_msg::AudioBuffer::CHANNEL_SURROUND_RIGHT); - output.push_back(audio_msg::AudioBuffer::CHANNEL_LFE); - output.push_back(audio_msg::AudioBuffer::CHANNEL_SUBWOOFER); -*/ - - - -class InterfaceOutput { - public: - std::vector m_channelMap; - std11::shared_ptr m_manager; - std11::shared_ptr m_interface; - public: - InterfaceOutput(std11::shared_ptr _manager) : - m_manager(_manager) { - //Set stereo output: - m_channelMap.push_back(audio::channel_frontLeft); - m_channelMap.push_back(audio::channel_frontRight); - m_interface = m_manager->createOutput(48000, - m_channelMap, - audio::format_int16, - "speaker"); - if(m_interface == nullptr) { - APPL_ERROR("nullptr interface"); - return; - } - m_interface->setReadwrite(); - } - void start() { - if(m_interface == nullptr) { - APPL_ERROR("nullptr interface"); - return; - } - m_interface->start(); - } - void stop() { - //m_interface->write(&data[0], data.size()/m_channelMap.size()); - m_interface->stop(); - } -}; - +static std11::mutex mutex; class InterfaceInput { public: @@ -294,7 +24,7 @@ class InterfaceInput { ros::Publisher m_stream; std11::mutex mutex; public: - InterfaceInput(std11::shared_ptr _manager, const std::string& _input="microphone", const std::string& _publisher="microphone") : + InterfaceInput(std11::shared_ptr _manager, const std::string& _input="microphone", const std::string& _publisher="microphone", bool _feedback=false) : m_manager(_manager) { ros::NodeHandle nodeHandlePrivate("~"); m_stream = nodeHandlePrivate.advertise(_publisher, 100); @@ -302,10 +32,17 @@ class InterfaceInput { std::vector channelMap; channelMap.push_back(audio::channel_frontLeft); channelMap.push_back(audio::channel_frontRight); - m_interface = m_manager->createInput(48000, - channelMap, - audio::format_int16, - _input); + if (_feedback == false) { + m_interface = m_manager->createInput(48000, + channelMap, + audio::format_int16, + _input); + } else { + m_interface = m_manager->createFeedback(48000, + channelMap, + audio::format_int16, + _input); + } if(m_interface == nullptr) { APPL_ERROR("nullptr interface"); return; @@ -340,34 +77,187 @@ class InterfaceInput { } // check if someone suscribe to the output: if (m_stream.getNumSubscribers() > 0) { - //ROS_INFO("Publish data ... %d frames time %lf", _nBufferFrames, _streamTime); audio_msg::AudioBuffer msg; // create the Ros timestamp msg.header.stamp = ros::Time::now();//_time; // set message frequency msg.frequency = _frequency; - // set channel mas properties - //msg.channelMap = audioProperty->inputChannelMap; - msg.channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT); - msg.channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT); + // set channel map properties + msg.channelMap = audio::convertChannel(_map); + // Set the format of flow + msg.channelFormat = audio::convertFormat(_format); // copy data: - msg.data.resize(_nbChunk*_map.size()); - memcpy(&msg.data[0], _data, _nbChunk*_map.size()*sizeof(int16_t)); + msg.data.resize(_nbChunk*_map.size()*audio::getFormatBytes(_format)); + // copy dat aon the ROS buffer interface + memcpy(&msg.data[0], _data, _nbChunk*_map.size()*audio::getFormatBytes(_format)); // publish message m_stream.publish(msg); } } - void start() { +}; + + +class InterfaceOutput { + public: + std11::shared_ptr m_manager; + std11::shared_ptr m_interface; + int64_t m_id; + public: + InterfaceOutput(std11::shared_ptr _manager, + const std::string& _name, + enum audio::format _format, + uint32_t _frequency, + const std::vector& _map) : + m_manager(_manager) { + static int64_t uid = etk::tool::irand(0, 56000); + m_id = uid++; + uid += etk::tool::irand(0, 10); + m_interface = m_manager->createOutput(_frequency, + _map, + _format, + "speaker"); + if(m_interface == nullptr) { + APPL_ERROR("nullptr interface"); + m_id = -1; + return; + } + m_interface->setReadwrite(); + m_interface->start(); + } + ~InterfaceOutput() { if(m_interface == nullptr) { APPL_ERROR("nullptr interface"); return; } - // wait 2 second ... - usleep(20000000); m_interface->stop(); + m_interface.reset(); + } + int64_t getId() { + return m_id; + } + uint64_t write(const audio_core::writeRequest_ >::_data_type& _data) { + if(m_interface == nullptr) { + APPL_ERROR("nullptr interface"); + return 1000000; + } + m_interface->write(&_data[0], _data.size()/m_interface->getInterfaceFormat().getChunkSize()); + // TODO : Do it better ... + return m_interface->getBufferFillSizeMicrosecond().count()*4/5; + } + uint64_t getTimeBuffer() { + if(m_interface == nullptr) { + APPL_ERROR("nullptr interface"); + return 0; + } + return m_interface->getBufferFillSizeMicrosecond().count(); } }; +std11::shared_ptr g_manager; +static std::vector> g_listInterafceOut; + +bool f_create(audio_core::create::Request& _req, + audio_core::create::Response& _res) { + std11::shared_ptr newInterface; + + newInterface = std11::make_shared(g_manager, + _req.flowName, + audio::convertFormat(_req.channelFormat), + _req.frequency, + audio::convertChannel(_req.channelMap)); + if (newInterface == nullptr) { + _res.handle = -1; + return false; + } + _res.handle = newInterface->getId(); + mutex.lock(); + g_listInterafceOut.push_back(newInterface); + mutex.unlock(); + APPL_INFO("create : [" << _res.handle << "] type: '" << _req.flowName << "'"); + return true; +} + +bool f_remove(audio_core::remove::Request& _req, + audio_core::remove::Response& _res) { + std11::shared_ptr interface; + mutex.lock(); + for(size_t iii=0; iiigetId() == _req.handle) { + interface = g_listInterafceOut[iii]; + // remove handle in the list + g_listInterafceOut[iii].reset(); + g_listInterafceOut.erase(g_listInterafceOut.begin() + iii); + break; + } + } + mutex.unlock(); + if (interface == nullptr) { + APPL_ERROR("remove : [" << _req.handle << "] Can not remove this ==> already removed."); + return false; + } + // Remove interface : + APPL_INFO("remove : [" << _req.handle << "] (start)"); + interface.reset(); + APPL_INFO("remove : [" << _req.handle << "] (end)"); + return true; +} + + +bool f_write(audio_core::write::Request& _req, + audio_core::write::Response& _res) { + std11::shared_ptr interface; + // reset ouput + _res.waitTime = 0; + mutex.lock(); + // Find the handle: + for(size_t iii=0; iiigetId() == _req.handle) { + interface = g_listInterafceOut[iii]; + break; + } + } + mutex.unlock(); + if (interface == nullptr) { + APPL_ERROR("write : [" << _req.handle << "] Can not write ==> handle does not exist..."); + return false; + } + APPL_INFO("write : [" << _req.handle << "] (start)"); + _res.waitTime = interface->write(_req.data); + APPL_INFO("write : [" << _req.handle << "] (end)"); + return true; +} + +bool f_getBufferTime(audio_core::getBufferTime::Request& _req, + audio_core::getBufferTime::Response& _res) { + std11::shared_ptr interface; + // reset ouput + _res.microseconds = 0; + mutex.lock(); + // Find the handle: + for(size_t iii=0; iiigetId() == _req.handle) { + interface = g_listInterafceOut[iii]; + break; + } + } + mutex.unlock(); + if (interface == nullptr) { + APPL_ERROR("getBufferTime : [" << _req.handle << "] Can not get time ==> handle does not exist..."); + return false; + } + _res.microseconds = interface->getTimeBuffer(); + return true; +} + /** @@ -400,9 +290,11 @@ int main(int _argc, char **_argv) { ros::NodeHandle nodeHandlePrivate("~"); - std11::shared_ptr m_manager = river::Manager::create("ROS node"); - - std11::shared_ptr m_input = std11::make_shared(m_manager, "microphone", "mic"); + g_manager = river::Manager::create("ROS node"); + // start publishing of Microphone + std11::shared_ptr m_input = std11::make_shared(g_manager, "microphone", "microphone", false); + // start publishing of Speaker feedback + std11::shared_ptr m_feedback = std11::make_shared(g_manager, "speaker", "speaker", true); /* * A count of how many messages we have sent. This is used to create @@ -410,7 +302,8 @@ int main(int _argc, char **_argv) { */ ros::spin(); m_input.reset(); - m_manager.reset(); + m_feedback.reset(); + g_manager.reset(); river::unInit(); return 0; diff --git a/audio_core/srv/create.srv b/audio_core/srv/create.srv index 7cfdfe4..f36afea 100644 --- a/audio_core/srv/create.srv +++ b/audio_core/srv/create.srv @@ -1,10 +1,14 @@ + + # decriptive flow type -string typeFlow +string flowName # speaker timestamp playing request (0 to As soon as possible) time stamp # data frequency int32 frequency # channel order of the current buffer uint8[] channelMap +# Channel format of the data +uint8 channelFormat --- int64 handle \ No newline at end of file diff --git a/audio_core/srv/getBufferTime.srv b/audio_core/srv/getBufferTime.srv index 8b340fd..42c8a98 100644 --- a/audio_core/srv/getBufferTime.srv +++ b/audio_core/srv/getBufferTime.srv @@ -1,4 +1,4 @@ # audio interface UID int64 handle --- -uint32 time \ No newline at end of file +uint64 microseconds \ No newline at end of file diff --git a/audio_core/srv/read.srv b/audio_core/srv/read.srv index 87e2768..5b3fe6b 100644 --- a/audio_core/srv/read.srv +++ b/audio_core/srv/read.srv @@ -1,3 +1,3 @@ int64 handle --- -int16[] data \ No newline at end of file +uint8[] data \ No newline at end of file diff --git a/audio_core/srv/write.srv b/audio_core/srv/write.srv index b0010bc..8f3c009 100644 --- a/audio_core/srv/write.srv +++ b/audio_core/srv/write.srv @@ -1,6 +1,6 @@ # audio interface UID int64 handle # interlaced data of the audio buffer -int16[] data +int8[] data --- uint32 waitTime \ No newline at end of file diff --git a/audio_msg/msg/AudioBuffer.msg b/audio_msg/msg/AudioBuffer.msg index 65f49a8..110d2bb 100644 --- a/audio_msg/msg/AudioBuffer.msg +++ b/audio_msg/msg/AudioBuffer.msg @@ -1,19 +1,43 @@ +################################# +# enum: channel order properties: +################################# +uint8 CHANNEL_UNKNOW=0 +uint8 CHANNEL_FRONT_LEFT=1 +uint8 CHANNEL_FRONT_CENTER=2 +uint8 CHANNEL_FRONT_RIGHT=3 +uint8 CHANNEL_REAR_LEFT=4 +uint8 CHANNEL_REAR_CENTER=5 +uint8 CHANNEL_REAR_RIGHT=6 +uint8 CHANNEL_SURROUND_LEFT=7 +uint8 CHANNEL_SURROUND_RIGHT=8 +uint8 CHANNEL_SUBWOOFER=9 +uint8 CHANNEL_LFE=10 + +####################### +# enum: Channel Format: +####################### +uint8 FORMAT_UNKNOW=0 +uint8 FORMAT_INT8=1 +uint8 FORMAT_INT8_ON_INT16=2 +uint8 FORMAT_INT16=3 +uint8 FORMAT_INT16_ON_INT32=4 +uint8 FORMAT_INT24=5 +uint8 FORMAT_INT32=6 +uint8 FORMAT_INT32_ON_INT64=7 +uint8 FORMAT_INT64=8 +uint8 FORMAT_FLOAT=9 +uint8 FORMAT_DOUBLE=10 + +########## +# Message: +########## # timestanp the audio buffer Header header # current frequency of the audio interface uint16 frequency -# channel order properties : -uint8 CHANNEL_FRONT_LEFT=0 -uint8 CHANNEL_FRONT_CENTER=1 -uint8 CHANNEL_FRONT_RIGHT=2 -uint8 CHANNEL_REAR_LEFT=3 -uint8 CHANNEL_REAR_CENTER=4 -uint8 CHANNEL_REAR_RIGHT=5 -uint8 CHANNEL_SURROUND_LEFT=6 -uint8 CHANNEL_SURROUND_RIGHT=7 -uint8 CHANNEL_SUBWOOFER=8 -uint8 CHANNEL_LFE=9 # channel order of the current buffer uint8[] channelMap +# Channel format of the data +uint8 channelFormat # interlaced data of the audio buffer -int16[] data \ No newline at end of file +uint8[] data \ No newline at end of file diff --git a/test/test_audio_generator/CMakeLists.txt b/test/test_audio_generator/CMakeLists.txt new file mode 100644 index 0000000..83d13fa --- /dev/null +++ b/test/test_audio_generator/CMakeLists.txt @@ -0,0 +1,58 @@ +cmake_minimum_required(VERSION 2.8.3) +project(test_audio_generator) + +set(CMAKE_VERBOSE_MAKEFILE ON) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + roscpp + rospy + std_msgs + message_generation +) + +################################### +## catkin specific configuration ## +################################### +catkin_package( + CATKIN_DEPENDS roscpp std_msgs + DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( + ${catkin_INCLUDE_DIRS} +) + +## Declare a cpp executable +add_executable(${PROJECT_NAME}_node + src/main.cpp +) + +target_link_libraries(${PROJECT_NAME}_node ${ALSA_LIBRARIES} pthread) + +## Specify libraries to link a library or executable target against +target_link_libraries(${PROJECT_NAME}_node + ${catkin_LIBRARIES} +) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executables and/or libraries for installation +install(TARGETS ${PROJECT_NAME}_node + ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} + LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} + RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) diff --git a/test/test_audio_generator/package.xml b/test/test_audio_generator/package.xml new file mode 100644 index 0000000..6c1b46a --- /dev/null +++ b/test/test_audio_generator/package.xml @@ -0,0 +1,22 @@ + + + test_audio_generator + 0.0.1 + Generic sinus generator + Edouard DUPIN + Apache-2.0 + + catkin + + message_generation + roscpp + std_msgs + audio_core + audio_msg + + message_runtime + roscpp + std_msgs + audio_core + audio_msg + \ No newline at end of file diff --git a/test/test_audio_generator/src/main.cpp b/test/test_audio_generator/src/main.cpp new file mode 100644 index 0000000..d113dbb --- /dev/null +++ b/test/test_audio_generator/src/main.cpp @@ -0,0 +1,182 @@ +#include "ros/ros.h" +#include "audio_msg/AudioBuffer.h" +#include "audio_core/create.h" +#include "audio_core/remove.h" +#include "audio_core/write.h" +#include "audio_core/getBufferTime.h" + +#include + +FILE* filee = NULL; +/** + * This tutorial demonstrates simple receipt of messages over the ROS system. + */ +void audioCallback(const audio_msg::AudioBuffer::ConstPtr& msg) { + fwrite(&msg->data[0] , sizeof(int16_t), msg->data.size(), filee); + ROS_INFO("get message: freq=%d nbChannel=%u nbSample=%ld", (int32_t)msg->frequency, (int32_t)msg->channelMap.size(), msg->data.size()); +} + +int64_t audioInterface_create(ros::NodeHandle& _n, ros::Time _timeUs, int32_t _freq, const std::vector _channelMap) { + ros::ServiceClient client = _n.serviceClient("create"); + audio_core::create srv; + srv.request.stamp = _timeUs; + srv.request.frequency = _freq; + srv.request.channelMap = _channelMap; + srv.request.channelFormat = audio_msg::AudioBuffer::FORMAT_INT16; + if (client.call(srv)) { + ROS_INFO("uid: %ld", srv.response.handle); + return srv.response.handle; + } else { + ROS_ERROR("Failed to call service create"); + return -1; + } +} + +bool audioInterface_remove(ros::NodeHandle& _n, int64_t _uid) { + ros::ServiceClient client = _n.serviceClient("remove"); + audio_core::remove srv; + srv.request.handle = _uid; + if (client.call(srv)) { + ROS_INFO("remove uid: %ld", _uid); + return true; + } else { + ROS_ERROR("Failed to call service remove"); + return false; + } +} + +/** + * @brief return wait time + */ +uint32_t audioInterface_write(ros::NodeHandle& _n, int64_t _uid, const std::vector& _value) { + ros::ServiceClient client = _n.serviceClient("write"); + audio_core::write srv; + srv.request.handle = _uid; + srv.request.data.resize(_value.size()*2); + memcpy(&srv.request.data[0], &_value[0], srv.request.data.size()); + if (client.call(srv)) { + ROS_INFO("write need wait time : %d", srv.response.waitTime); + return srv.response.waitTime; + } else { + ROS_ERROR("Failed to call service write"); + assert(0); + } +} + +uint32_t audioInterface_getBufferTime(ros::NodeHandle& _n, int64_t _uid) { + ros::ServiceClient client = _n.serviceClient("getBufferTime"); + audio_core::getBufferTime srv; + srv.request.handle = _uid; + if (client.call(srv)) { + //ROS_INFO("write need wait time : %d", srv.response.time); + return srv.response.microseconds; + } + ROS_ERROR("Failed to call service getBufferTime"); + return 0; +} + +void usage() { + ROS_INFO("test_audio_generator usage [channel] [sampleRate] [nbChannels] [frequency] [timeToPlay]:"); + ROS_INFO(" [channel] output to write data"); + ROS_INFO(" [sampleRate] number of sample per second"); + ROS_INFO(" [nbChannels] nb channel"); + ROS_INFO(" [frequency] frequency to generate"); + ROS_INFO(" [timeToPlay] time to generate signal"); + ROS_INFO("ex"); + ROS_INFO(" rosrun test_audio_generator test_audio_generator_node /audio/ 48000 2 440 10"); + exit (-1); +} + +int main(int argc, char **argv) { + if (argc != 6 ) { + ROS_ERROR ("not enought argument : %d", argc); + usage(); + } + std::string p_channelToPlay = argv[1]; + int32_t p_sampleRate = atoi(argv[2]); + int32_t p_nbChannels = atoi(argv[3]); + int32_t p_frequency = atoi(argv[4]); + int32_t p_timeToPlay = atoi(argv[5]); + + sscanf(argv[2], "%d", &p_sampleRate); + sscanf(argv[3], "%d", &p_nbChannels); + sscanf(argv[4], "%d", &p_frequency); + sscanf(argv[5], "%d", &p_timeToPlay); + + ROS_INFO(" rosrun test_audio_generator test_audio_generator_node %s %d %d %d %d", p_channelToPlay.c_str(), p_sampleRate, p_nbChannels, p_frequency, p_timeToPlay); + + ros::init(argc, argv, "test_audio_generator"); + ros::NodeHandle n; + double phase = 0; + // send data: + ros::Time timee = ros::Time(); + int32_t baseDataSize = 0; + if (p_sampleRate <= 32000) { + baseDataSize = 1024*2; + } else { + baseDataSize = 1024*8; + } + + std::vector channelMap; + if (p_nbChannels == 1) { + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_CENTER); + } else if (p_nbChannels == 2) { + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT); + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT); + } else if (p_nbChannels == 3) { + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT); + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_CENTER); + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT); + } else if (p_nbChannels == 4) { + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_LEFT); + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_FRONT_RIGHT); + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_LEFT); + channelMap.push_back(audio_msg::AudioBuffer::CHANNEL_REAR_RIGHT); + } else { + ROS_ERROR("nb chnnale supported error : %d not in [1,2,3,4]", p_nbChannels); + exit(-1); + } + // connect: + int64_t uid = audioInterface_create(n, timee, p_sampleRate, channelMap); + std::vector data; + data.resize(baseDataSize*channelMap.size()); + + double baseCycle = 2.0*M_PI/(double)p_sampleRate * (double)p_frequency; + + int32_t generateTime = (p_timeToPlay * p_sampleRate) / baseDataSize; + + + for (int32_t kkk=0; kkk= 2*M_PI) { + phase -= 2*M_PI; + } + } + //ROS_INFO("send data"); + int32_t needSleep = audioInterface_write(n, uid, data); + if (needSleep > 0) { + ROS_INFO("need sleep %d", needSleep); + usleep(needSleep); + } else { + ROS_INFO("not sleep"); + usleep(needSleep); + } + } + // wait end if playing : + usleep(200000); + uint32_t waitTime = audioInterface_getBufferTime(n, uid); + while (waitTime>0) { + ROS_INFO("wait end of playing ... %u us", waitTime); + usleep(waitTime/2); + waitTime = audioInterface_getBufferTime(n, uid); + } + // close: + audioInterface_remove(n, uid); + + return 0; +} + diff --git a/test/test_audio_recorder/CMakeLists.txt b/test/test_audio_recorder/CMakeLists.txt index 59df064..ce9d67f 100644 --- a/test/test_audio_recorder/CMakeLists.txt +++ b/test/test_audio_recorder/CMakeLists.txt @@ -11,6 +11,7 @@ find_package(catkin REQUIRED COMPONENTS rospy std_msgs message_generation + audio ) ################################################ diff --git a/test/test_audio_recorder/package.xml b/test/test_audio_recorder/package.xml index 9c0ce0f..98d9c7f 100644 --- a/test/test_audio_recorder/package.xml +++ b/test/test_audio_recorder/package.xml @@ -12,11 +12,13 @@ roscpp std_msgs audio_msg + audio message_runtime roscpp std_msgs audio_core audio_msg + audio \ No newline at end of file diff --git a/test/test_audio_recorder/src/main.cpp b/test/test_audio_recorder/src/main.cpp index 9ae4864..43d36b4 100644 --- a/test/test_audio_recorder/src/main.cpp +++ b/test/test_audio_recorder/src/main.cpp @@ -1,5 +1,7 @@ #include "ros/ros.h" #include "audio_msg/AudioBuffer.h" +#include