[DEV] update audio time interface

This commit is contained in:
Edouard DUPIN 2015-04-13 21:49:48 +02:00
parent 620fb8e54f
commit 41f55ab83d
20 changed files with 78 additions and 87 deletions

View File

@ -171,7 +171,7 @@ void audio::river::Interface::setWriteCallback(audio::drain::playbackFunctionWri
algo->setCallback(_function);
}
void audio::river::Interface::start(const std11::chrono::system_clock::time_point& _time) {
void audio::river::Interface::start(const audio::Time& _time) {
std11::unique_lock<std11::recursive_mutex> lock(m_mutex);
RIVER_DEBUG("start [BEGIN]");
m_process.updateInterAlgo();
@ -407,11 +407,11 @@ void audio::river::Interface::clearInternalBuffer() {
}
std11::chrono::system_clock::time_point audio::river::Interface::getCurrentTime() const {
audio::Time audio::river::Interface::getCurrentTime() const {
std11::unique_lock<std11::recursive_mutex> lock(m_mutex);
// TODO :...
return std11::chrono::system_clock::time_point();
return std11::chrono::system_clock::now();
return audio::Time();
return audio::Time::now();
}
void audio::river::Interface::addVolumeGroup(const std::string& _name) {
@ -439,13 +439,13 @@ void audio::river::Interface::addVolumeGroup(const std::string& _name) {
}
}
void audio::river::Interface::systemNewInputData(std11::chrono::system_clock::time_point _time, const void* _data, size_t _nbChunk) {
void audio::river::Interface::systemNewInputData(audio::Time _time, const void* _data, size_t _nbChunk) {
std11::unique_lock<std11::recursive_mutex> lockProcess(m_mutex);
void * tmpData = const_cast<void*>(_data);
m_process.push(_time, tmpData, _nbChunk);
}
void audio::river::Interface::systemNeedOutputData(std11::chrono::system_clock::time_point _time, void* _data, size_t _nbChunk, size_t _chunkSize) {
void audio::river::Interface::systemNeedOutputData(audio::Time _time, void* _data, size_t _nbChunk, size_t _chunkSize) {
std11::unique_lock<std11::recursive_mutex> lockProcess(m_mutex);
//RIVER_INFO("time : " << _time);
m_process.pull(_time, _data, _nbChunk, _chunkSize);

View File

@ -21,6 +21,7 @@
#include <audio/drain/EndPointWrite.h>
#include <ejson/ejson.h>
#include <etk/os/FSNode.h>
#include <audio/Time.h>
namespace audio {
namespace river {
@ -178,7 +179,7 @@ namespace audio {
* @note _time to play buffer when output interface (if possible)
* @note _time to read buffer when inut interface (if possible)
*/
virtual void start(const std11::chrono::system_clock::time_point& _time = std11::chrono::system_clock::time_point());
virtual void start(const audio::Time& _time = audio::Time());
/**
* @brief Stop the current flow.
* @param[in] _fast The stream stop as fast as possible (not write all the buffer in speaker) but apply cross fade out.
@ -275,7 +276,7 @@ namespace audio {
* @brief Write : Get the time of the next sample time to write in the local buffer
* @brief Read : Get the time of the next sample time to read in the local buffer
*/
virtual std11::chrono::system_clock::time_point getCurrentTime() const;
virtual audio::Time getCurrentTime() const;
private:
/**
* @brief Node Call interface : Input interface node has new data.
@ -283,7 +284,7 @@ namespace audio {
* @param[in] _data Pointer on the new data.
* @param[in] _nbChunk Number of chunk in the buffer.
*/
virtual void systemNewInputData(std11::chrono::system_clock::time_point _time, const void* _data, size_t _nbChunk);
virtual void systemNewInputData(audio::Time _time, const void* _data, size_t _nbChunk);
/**
* @brief Node Call interface: Output interface node need new data.
* @param[in] _time Time where the data might be played
@ -291,7 +292,7 @@ namespace audio {
* @param[in] _nbChunk Number of chunk that might be write
* @param[in] _chunkSize Chunk size.
*/
virtual void systemNeedOutputData(std11::chrono::system_clock::time_point _time, void* _data, size_t _nbChunk, size_t _chunkSize);
virtual void systemNeedOutputData(audio::Time _time, void* _data, size_t _nbChunk, size_t _chunkSize);
/**
* @brief Node Call interface: A volume has change.
*/

View File

@ -191,7 +191,7 @@ void audio::river::io::Node::volumeChange() {
void audio::river::io::Node::newInput(const void* _inputBuffer,
uint32_t _nbChunk,
const std11::chrono::system_clock::time_point& _time) {
const audio::Time& _time) {
if (_inputBuffer == nullptr) {
return;
}
@ -212,7 +212,7 @@ void audio::river::io::Node::newInput(const void* _inputBuffer,
void audio::river::io::Node::newOutput(void* _outputBuffer,
uint32_t _nbChunk,
const std11::chrono::system_clock::time_point& _time) {
const audio::Time& _time) {
if (_outputBuffer == nullptr) {
return;
}

View File

@ -201,7 +201,7 @@ namespace audio {
*/
void newInput(const void* _inputBuffer,
uint32_t _nbChunk,
const std11::chrono::system_clock::time_point& _time);
const audio::Time& _time);
/**
* @brief Call by child classes to process data in all interface linked on the current Node. Have new output to get. this call the feedback too.
* @param[in,out] _outputBuffer Pointer on the buffer to write the data.
@ -210,7 +210,7 @@ namespace audio {
*/
void newOutput(void* _outputBuffer,
uint32_t _nbChunk,
const std11::chrono::system_clock::time_point& _time);
const audio::Time& _time);
public:
/**
* @brief Generate the node dot file section

View File

@ -61,7 +61,7 @@ audio::river::io::NodeAEC::NodeAEC(const std::string& _name, const std11::shared
m_P_latencyTime(100) {
audio::drain::IOFormatInterface interfaceFormat = getInterfaceFormat();
audio::drain::IOFormatInterface hardwareFormat = getHarwareFormat();
m_sampleTime = std11::chrono::nanoseconds(1000000000/int64_t(hardwareFormat.getFrequency()));
m_sampleTime = audio::Duration(1000000000/int64_t(hardwareFormat.getFrequency()));
/**
# connect in input mode
map-on-microphone:{
@ -166,13 +166,13 @@ void audio::river::io::NodeAEC::stop() {
void audio::river::io::NodeAEC::onDataReceivedMicrophone(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
const std::vector<audio::channel>& _map) {
RIVER_DEBUG("Microphone Time=" << _time << " _nbChunk=" << _nbChunk << " _map=" << _map << " _format=" << _format << " freq=" << _frequency);
RIVER_DEBUG(" next=" << _time + std11::chrono::nanoseconds(_nbChunk*1000000000LL/int64_t(_frequency)) );
RIVER_DEBUG(" next=" << _time + audio::Duration(0, _nbChunk*1000000000LL/int64_t(_frequency)) );
if (_format != audio::format_int16) {
RIVER_ERROR("call wrong type ... (need int16_t)");
}
@ -184,13 +184,13 @@ void audio::river::io::NodeAEC::onDataReceivedMicrophone(const void* _data,
}
void audio::river::io::NodeAEC::onDataReceivedFeedBack(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
const std::vector<audio::channel>& _map) {
RIVER_DEBUG("FeedBack Time=" << _time << " _nbChunk=" << _nbChunk << " _map=" << _map << " _format=" << _format << " freq=" << _frequency);
RIVER_DEBUG(" next=" << _time + std11::chrono::nanoseconds(_nbChunk*1000000000LL/int64_t(_frequency)) );
RIVER_DEBUG(" next=" << _time + audio::Duration(0, _nbChunk*1000000000LL/int64_t(_frequency)) );
if (_format != audio::format_int16) {
RIVER_ERROR("call wrong type ... (need int16_t)");
}
@ -206,9 +206,9 @@ void audio::river::io::NodeAEC::process() {
|| m_bufferFeedBack.getSize() <= m_nbChunk) {
return;
}
std11::chrono::system_clock::time_point MicTime = m_bufferMicrophone.getReadTimeStamp();
std11::chrono::system_clock::time_point fbTime = m_bufferFeedBack.getReadTimeStamp();
std11::chrono::nanoseconds delta;
audio::Time MicTime = m_bufferMicrophone.getReadTimeStamp();
audio::Time fbTime = m_bufferFeedBack.getReadTimeStamp();
audio::Duration delta;
if (MicTime < fbTime) {
delta = fbTime - MicTime;
} else {
@ -266,7 +266,7 @@ void audio::river::io::NodeAEC::process() {
}
void audio::river::io::NodeAEC::processAEC(void* _dataMic, void* _dataFB, uint32_t _nbChunk, const std11::chrono::system_clock::time_point& _time) {
void audio::river::io::NodeAEC::processAEC(void* _dataMic, void* _dataFB, uint32_t _nbChunk, const audio::Time& _time) {
audio::drain::IOFormatInterface hardwareFormat = getHarwareFormat();
// TODO : Set all these parameter in the parameter configuration section ...
int32_t attaqueTime = std::min(std::max(0,m_P_attaqueTime),1000);

View File

@ -56,7 +56,7 @@ namespace audio {
* @todo : copy doc ..
*/
void onDataReceivedMicrophone(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -66,7 +66,7 @@ namespace audio {
* @todo : copy doc ..
*/
void onDataReceivedFeedBack(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -74,7 +74,7 @@ namespace audio {
protected:
audio::drain::CircularBuffer m_bufferMicrophone; //!< temporary buffer to synchronize data.
audio::drain::CircularBuffer m_bufferFeedBack; //!< temporary buffer to synchronize data.
std11::chrono::nanoseconds m_sampleTime; //!< represent the sample time at the specify frequency.
audio::Duration m_sampleTime; //!< represent the sample time at the specify frequency.
/**
* @brief Process synchronization on the 2 flow.
*/
@ -87,7 +87,7 @@ namespace audio {
* @param[in] _time Time on the firsta sample that data has been captured.
* @return
*/
void processAEC(void* _dataMic, void* _dataFB, uint32_t _nbChunk, const std11::chrono::system_clock::time_point& _time);
void processAEC(void* _dataMic, void* _dataFB, uint32_t _nbChunk, const audio::Time& _time);
public:
virtual void generateDot(etk::FSNode& _node);
private:

View File

@ -56,7 +56,7 @@ audio::river::io::NodeMuxer::NodeMuxer(const std::string& _name, const std11::sh
Node(_name, _config) {
audio::drain::IOFormatInterface interfaceFormat = getInterfaceFormat();
audio::drain::IOFormatInterface hardwareFormat = getHarwareFormat();
m_sampleTime = std11::chrono::nanoseconds(1000000000/int64_t(hardwareFormat.getFrequency()));
m_sampleTime = audio::Duration(1000000000/int64_t(hardwareFormat.getFrequency()));
/**
# connect in input mode
map-on-input-1:{
@ -191,13 +191,13 @@ void audio::river::io::NodeMuxer::stop() {
void audio::river::io::NodeMuxer::onDataReceivedInput1(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
const std::vector<audio::channel>& _map) {
RIVER_DEBUG("Microphone Time=" << _time << " _nbChunk=" << _nbChunk << " _map=" << _map << " _format=" << _format << " freq=" << _frequency);
RIVER_DEBUG(" next=" << _time + std11::chrono::nanoseconds(_nbChunk*1000000000LL/int64_t(_frequency)) );
RIVER_DEBUG(" next=" << _time + audio::Duration(0, _nbChunk*1000000000LL/int64_t(_frequency)) );
if (_format != audio::format_int16) {
RIVER_ERROR("call wrong type ... (need int16_t)");
}
@ -209,13 +209,13 @@ void audio::river::io::NodeMuxer::onDataReceivedInput1(const void* _data,
}
void audio::river::io::NodeMuxer::onDataReceivedInput2(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
const std::vector<audio::channel>& _map) {
RIVER_DEBUG("FeedBack Time=" << _time << " _nbChunk=" << _nbChunk << " _map=" << _map << " _format=" << _format << " freq=" << _frequency);
RIVER_DEBUG(" next=" << _time + std11::chrono::nanoseconds(_nbChunk*1000000000LL/int64_t(_frequency)) );
RIVER_DEBUG(" next=" << _time + audio::Duration(0, _nbChunk*1000000000LL/int64_t(_frequency)) );
if (_format != audio::format_int16) {
RIVER_ERROR("call wrong type ... (need int16_t)");
}
@ -233,9 +233,9 @@ void audio::river::io::NodeMuxer::process() {
if (m_bufferInput2.getSize() <= 256) {
return;
}
std11::chrono::system_clock::time_point in1Time = m_bufferInput1.getReadTimeStamp();
std11::chrono::system_clock::time_point in2Time = m_bufferInput2.getReadTimeStamp();
std11::chrono::nanoseconds delta;
audio::Time in1Time = m_bufferInput1.getReadTimeStamp();
audio::Time in2Time = m_bufferInput2.getReadTimeStamp();
audio::Duration delta;
if (in1Time < in2Time) {
delta = in2Time - in1Time;
} else {
@ -423,7 +423,7 @@ void audio::river::io::NodeMuxer::reorder(void* _output, uint32_t _nbChunk, void
}
}
void audio::river::io::NodeMuxer::processMuxer(void* _dataIn1, void* _dataIn2, uint32_t _nbChunk, const std11::chrono::system_clock::time_point& _time) {
void audio::river::io::NodeMuxer::processMuxer(void* _dataIn1, void* _dataIn2, uint32_t _nbChunk, const audio::Time& _time) {
//RIVER_INFO("must Mux data : " << m_mapInput1 << " + " << m_mapInput2 << " ==> " << getInterfaceFormat().getMap());
memset(&m_data[0], 0, m_data.size());
reorder(&m_data[0], _nbChunk, _dataIn1, m_mapInput1);

View File

@ -38,13 +38,13 @@ namespace audio {
const std::string& _streamName,
const std::string& _name);
void onDataReceivedInput1(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
const std::vector<audio::channel>& _map);
void onDataReceivedInput2(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -53,9 +53,9 @@ namespace audio {
std::vector<audio::channel> m_mapInput2;
audio::drain::CircularBuffer m_bufferInput1;
audio::drain::CircularBuffer m_bufferInput2;
std11::chrono::nanoseconds m_sampleTime; //!< represent the sample time at the specify frequency.
audio::Duration m_sampleTime; //!< represent the sample time at the specify frequency.
void process();
void processMuxer(void* _dataMic, void* _dataFB, uint32_t _nbChunk, const std11::chrono::system_clock::time_point& _time);
void processMuxer(void* _dataMic, void* _dataFB, uint32_t _nbChunk, const audio::Time& _time);
std::vector<uint8_t> m_data;
public:
virtual void generateDot(etk::FSNode& _node);

View File

@ -13,18 +13,8 @@
#undef __class__
#define __class__ "io::NodeOrchestra"
static std::string asString(const std11::chrono::system_clock::time_point& tp) {
// convert to system time:
std::time_t t = std11::chrono::system_clock::to_time_t(tp);
// convert in human string
std::string ts = std::ctime(&t);
// remove \n
ts.resize(ts.size()-1);
return ts;
}
int32_t audio::river::io::NodeOrchestra::recordCallback(const void* _inputBuffer,
const std11::chrono::system_clock::time_point& _timeInput,
const audio::Time& _timeInput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status) {
std11::unique_lock<std11::mutex> lock(m_mutex);
@ -35,7 +25,7 @@ int32_t audio::river::io::NodeOrchestra::recordCallback(const void* _inputBuffer
}
int32_t audio::river::io::NodeOrchestra::playbackCallback(void* _outputBuffer,
const std11::chrono::system_clock::time_point& _timeOutput,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status) {
std11::unique_lock<std11::mutex> lock(m_mutex);

View File

@ -50,7 +50,7 @@ namespace audio {
* @return DEPRECATED soon
*/
int32_t recordCallback(const void* _inputBuffer,
const std11::chrono::system_clock::time_point& _timeInput,
const audio::Time& _timeInput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status);
/**
@ -62,7 +62,7 @@ namespace audio {
* @return DEPRECATED soon
*/
int32_t playbackCallback(void* _outputBuffer,
const std11::chrono::system_clock::time_point& _timeOutput,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status);
protected:

View File

@ -13,7 +13,7 @@
#undef __class__
#define __class__ "io::NodePortAudio"
static std::string asString(const std11::chrono::system_clock::time_point& tp) {
static std::string asString(const audio::Time& tp) {
// convert to system time:
std::time_t t = std11::chrono::system_clock::to_time_t(tp);
// convert in human string
@ -32,10 +32,10 @@ static int portAudioStreamCallback(const void *_input,
audio::river::io::NodePortAudio* myClass = reinterpret_cast<audio::river::io::NodePortAudio*>(_userData);
int64_t sec = int64_t(_timeInfo->inputBufferAdcTime);
int64_t nsec = (_timeInfo->inputBufferAdcTime-double(sec))*1000000000LL;
std11::chrono::system_clock::time_point timeInput = std11::chrono::system_clock::from_time_t(sec) + std11::chrono::nanoseconds(nsec);
audio::Time timeInput = std11::chrono::system_clock::from_time_t(sec) + audio::Duration(nsec);
sec = int64_t(_timeInfo->outputBufferDacTime);
nsec = (_timeInfo->outputBufferDacTime-double(sec))*1000000000LL;
std11::chrono::system_clock::time_point timeOutput = std11::chrono::system_clock::from_time_t(sec) + std11::chrono::nanoseconds(nsec);
audio::Time timeOutput = std11::chrono::system_clock::from_time_t(sec) + audio::Duration(nsec);
return myClass->duplexCallback(_input,
timeInput,
_output,
@ -45,9 +45,9 @@ static int portAudioStreamCallback(const void *_input,
}
int32_t audio::river::io::NodePortAudio::duplexCallback(const void* _inputBuffer,
const std11::chrono::system_clock::time_point& _timeInput,
const audio::Time& _timeInput,
void* _outputBuffer,
const std11::chrono::system_clock::time_point& _timeOutput,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
PaStreamCallbackFlags _status) {
std11::unique_lock<std11::mutex> lock(m_mutex);

View File

@ -37,9 +37,9 @@ namespace audio {
PaStream* m_stream;
public:
int32_t duplexCallback(const void* _inputBuffer,
const std11::chrono::system_clock::time_point& _timeInput,
const audio::Time& _timeInput,
void* _outputBuffer,
const std11::chrono::system_clock::time_point& _timeOutput,
const audio::Time& _timeOutput,
uint32_t _nbChunk,
PaStreamCallbackFlags _status);
protected:

View File

@ -73,7 +73,7 @@ namespace river_test_aec {
}
void onDataNeeded(void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -84,7 +84,7 @@ namespace river_test_aec {
m_buffer.read(_data, _nbChunk);
}
void onDataReceived(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,

View File

@ -23,8 +23,8 @@ namespace river_test_echo_delay {
double m_freq;
int32_t m_nextSampleCount;
std11::chrono::milliseconds m_delayBetweenEvent;
std11::chrono::system_clock::time_point m_nextTick;
std11::chrono::system_clock::time_point m_currentTick;
audio::Time m_nextTick;
audio::Time m_currentTick;
int32_t m_stateFB;
int32_t m_stateMic;
std::vector<uint64_t> m_delayListMic;
@ -104,7 +104,7 @@ namespace river_test_echo_delay {
m_manager->generateDotAll("activeProcess.dot");
}
void onDataNeeded(void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -122,7 +122,7 @@ namespace river_test_echo_delay {
}
}
} else {
if (_time == std11::chrono::system_clock::time_point()) {
if (_time == audio::Time()) {
for (int32_t iii=0; iii<_nbChunk; iii++) {
for (int32_t jjj=0; jjj<_map.size(); jjj++) {
data[_map.size()*iii+jjj] = 0;
@ -130,7 +130,7 @@ namespace river_test_echo_delay {
}
return;
}
if (m_nextTick == std11::chrono::system_clock::time_point()) {
if (m_nextTick == audio::Time()) {
m_nextTick = _time + m_delayBetweenEvent;
m_nextSampleCount = m_delayBetweenEvent.count()*int64_t(_frequency)/1000;
m_phase = -1;
@ -175,19 +175,19 @@ namespace river_test_echo_delay {
}
}
}
std11::chrono::system_clock::time_point getInterpolateTime(std11::chrono::system_clock::time_point _time, int32_t _pos, int16_t _val1, int16_t _val2, uint32_t _frequency) {
audio::Time getInterpolateTime(audio::Time _time, int32_t _pos, int16_t _val1, int16_t _val2, uint32_t _frequency) {
if (_val1 == 0) {
return _time + std11::chrono::nanoseconds(int64_t(_pos)*1000000000LL/int64_t(_frequency));
return _time + audio::Duration(0, int64_t(_pos)*1000000000LL/int64_t(_frequency));
} else if (_val2 == 0) {
return _time + std11::chrono::nanoseconds(int64_t(_pos+1)*1000000000LL/int64_t(_frequency));
return _time + audio::Duration(0, int64_t(_pos+1)*1000000000LL/int64_t(_frequency));
}
double xxx = double(-_val1) / double(_val2 - _val1);
APPL_VERBOSE("deltaPos:" << xxx);
return _time + std11::chrono::nanoseconds(int64_t((double(_pos)+xxx)*1000000000.0)/int64_t(_frequency));
return _time + audio::Duration(0, int64_t((double(_pos)+xxx)*1000000000.0)/int64_t(_frequency));
}
void onDataReceivedFeedBack(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -217,7 +217,7 @@ namespace river_test_echo_delay {
if (data[iii*_map.size() + jjj] <= 0) {
// detect inversion of signe ...
m_stateFB = 3;
std11::chrono::system_clock::time_point time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
audio::Time time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
APPL_VERBOSE("FB: 1 position -1: " << iii-1 << " " << data[(iii-1)*_map.size() + jjj]);
APPL_VERBOSE("FB: 1 position 0: " << iii << " " << data[iii*_map.size() + jjj]);
@ -228,7 +228,7 @@ namespace river_test_echo_delay {
if (data[iii*_map.size() + jjj] >= 0) {
// detect inversion of signe ...
m_stateFB = 3;
std11::chrono::system_clock::time_point time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
audio::Time time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
APPL_VERBOSE("FB: 2 position -1: " << iii-1 << " " << data[(iii-1)*_map.size() + jjj]);
APPL_VERBOSE("FB: 2 position 0: " << iii << " " << data[iii*_map.size() + jjj]);
APPL_WARNING("FB: 2 time detected: " << time << " delay = " << float((time-m_currentTick).count())/1000.0f << "µs");
@ -242,7 +242,7 @@ namespace river_test_echo_delay {
}
}
void onDataReceived(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -319,10 +319,10 @@ namespace river_test_echo_delay {
if (data[iii*_map.size() + jjj] <= 0) {
// detect inversion of signe ...
m_stateMic = 3;
std11::chrono::system_clock::time_point time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
audio::Time time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
APPL_VERBOSE("MIC: 1 position -1: " << iii-1 << " " << data[(iii-1)*_map.size() + jjj]);
APPL_VERBOSE("MIC: 1 position 0: " << iii << " " << data[iii*_map.size() + jjj]);
std11::chrono::nanoseconds delay = time-m_currentTick;
audio::Duration delay = time-m_currentTick;
int32_t sampleDalay = (delay.count()*_frequency)/1000000000LL;
APPL_WARNING("MIC: 1 time detected: " << time << " delay = " << float(delay.count())/1000.0f << "µs samples=" << sampleDalay);
m_delayListMic.push_back(delay.count());
@ -332,10 +332,10 @@ namespace river_test_echo_delay {
if (data[iii*_map.size() + jjj] >= 0) {
// detect inversion of signe ...
m_stateMic = 3;
std11::chrono::system_clock::time_point time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
audio::Time time = getInterpolateTime(_time, iii-1, data[(iii-1)*_map.size() + jjj], data[iii*_map.size() + jjj], _frequency);
APPL_VERBOSE("MIC: 2 position -1: " << iii-1 << " " << data[(iii-1)*_map.size() + jjj]);
APPL_VERBOSE("MIC: 2 position 0: " << iii << " " << data[iii*_map.size() + jjj]);
std11::chrono::nanoseconds delay = time-m_currentTick;
audio::Duration delay = time-m_currentTick;
int32_t sampleDalay = (delay.count()*_frequency)/1000000000LL;
APPL_WARNING("MIC: 2 time detected: " << time << " delay = " << float(delay.count())/1000.0f << "µs samples=" << sampleDalay);
m_delayListMic.push_back(delay.count());

View File

@ -81,7 +81,7 @@ namespace river_test_format {
std11::placeholders::_6));
}
void onDataNeeded(void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,

View File

@ -68,7 +68,7 @@ namespace river_test_muxer {
}
void onDataNeeded(void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,
@ -86,7 +86,7 @@ namespace river_test_muxer {
}
}
void onDataReceived(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,

View File

@ -44,7 +44,7 @@ namespace river_test_playback_callback {
std11::placeholders::_6));
}
void onDataNeeded(void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,

View File

@ -132,7 +132,7 @@ namespace river_test_playback_write {
std11::placeholders::_4,
std11::placeholders::_5));
}
void onDataNeeded(const std11::chrono::system_clock::time_point& _time,
void onDataNeeded(const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,

View File

@ -56,7 +56,7 @@ namespace river_test_record_callback {
std11::placeholders::_6));
}
void onDataReceived(const void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,

View File

@ -61,7 +61,7 @@ namespace river_test_volume {
m_interface->addVolumeGroup("FLOW");
}
void onDataNeeded(void* _data,
const std11::chrono::system_clock::time_point& _time,
const audio::Time& _time,
size_t _nbChunk,
enum audio::format _format,
uint32_t _frequency,