added openfabmap code, contributed by Arren Glover. fixed several warnings in the new versions of retina filters
This commit is contained in:
File diff suppressed because it is too large
Load Diff
139
modules/contrib/src/bowmsctrainer.cpp
Normal file
139
modules/contrib/src/bowmsctrainer.cpp
Normal file
@@ -0,0 +1,139 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
namespace of2 {
|
||||
|
||||
BOWMSCTrainer::BOWMSCTrainer(double _clusterSize) :
|
||||
clusterSize(_clusterSize) {
|
||||
}
|
||||
|
||||
BOWMSCTrainer::~BOWMSCTrainer() {
|
||||
}
|
||||
|
||||
Mat BOWMSCTrainer::cluster() const {
|
||||
CV_Assert(!descriptors.empty());
|
||||
int descCount = 0;
|
||||
for(size_t i = 0; i < descriptors.size(); i++)
|
||||
descCount += descriptors[i].rows;
|
||||
|
||||
Mat mergedDescriptors(descCount, descriptors[0].cols,
|
||||
descriptors[0].type());
|
||||
for(size_t i = 0, start = 0; i < descriptors.size(); i++)
|
||||
{
|
||||
Mat submut = mergedDescriptors.rowRange((int)start,
|
||||
(int)(start + descriptors[i].rows));
|
||||
descriptors[i].copyTo(submut);
|
||||
start += descriptors[i].rows;
|
||||
}
|
||||
return cluster(mergedDescriptors);
|
||||
}
|
||||
|
||||
Mat BOWMSCTrainer::cluster(const Mat& descriptors) const {
|
||||
|
||||
CV_Assert(!descriptors.empty());
|
||||
|
||||
// TODO: sort the descriptors before clustering.
|
||||
|
||||
|
||||
Mat icovar = Mat::eye(descriptors.cols,descriptors.cols,descriptors.type());
|
||||
|
||||
vector<Mat> initialCentres;
|
||||
initialCentres.push_back(descriptors.row(0));
|
||||
for (int i = 1; i < descriptors.rows; i++) {
|
||||
double minDist = DBL_MAX;
|
||||
for (size_t j = 0; j < initialCentres.size(); j++) {
|
||||
minDist = std::min(minDist,
|
||||
cv::Mahalanobis(descriptors.row(i),initialCentres[j],
|
||||
icovar));
|
||||
}
|
||||
if (minDist > clusterSize)
|
||||
initialCentres.push_back(descriptors.row(i));
|
||||
}
|
||||
|
||||
std::vector<std::list<cv::Mat> > clusters;
|
||||
clusters.resize(initialCentres.size());
|
||||
for (int i = 0; i < descriptors.rows; i++) {
|
||||
int index = 0; double dist = 0, minDist = DBL_MAX;
|
||||
for (size_t j = 0; j < initialCentres.size(); j++) {
|
||||
dist = cv::Mahalanobis(descriptors.row(i),initialCentres[j],icovar);
|
||||
if (dist < minDist) {
|
||||
minDist = dist;
|
||||
index = (int)j;
|
||||
}
|
||||
}
|
||||
clusters[index].push_back(descriptors.row(i));
|
||||
}
|
||||
|
||||
// TODO: throw away small clusters.
|
||||
|
||||
Mat vocabulary;
|
||||
Mat centre = Mat::zeros(1,descriptors.cols,descriptors.type());
|
||||
for (size_t i = 0; i < clusters.size(); i++) {
|
||||
centre.setTo(0);
|
||||
for (std::list<cv::Mat>::iterator Ci = clusters[i].begin(); Ci != clusters[i].end(); Ci++) {
|
||||
centre += *Ci;
|
||||
}
|
||||
centre /= (double)clusters[i].size();
|
||||
vocabulary.push_back(centre);
|
||||
}
|
||||
|
||||
return vocabulary;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
290
modules/contrib/src/chowliutree.cpp
Normal file
290
modules/contrib/src/chowliutree.cpp
Normal file
@@ -0,0 +1,290 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
namespace of2 {
|
||||
|
||||
ChowLiuTree::ChowLiuTree() {
|
||||
}
|
||||
|
||||
ChowLiuTree::~ChowLiuTree() {
|
||||
}
|
||||
|
||||
void ChowLiuTree::add(const Mat& imgDescriptor) {
|
||||
CV_Assert(!imgDescriptor.empty());
|
||||
if (!imgDescriptors.empty()) {
|
||||
CV_Assert(imgDescriptors[0].cols == imgDescriptor.cols);
|
||||
CV_Assert(imgDescriptors[0].type() == imgDescriptor.type());
|
||||
}
|
||||
|
||||
imgDescriptors.push_back(imgDescriptor);
|
||||
|
||||
}
|
||||
|
||||
void ChowLiuTree::add(const vector<Mat>& _imgDescriptors) {
|
||||
for (size_t i = 0; i < _imgDescriptors.size(); i++) {
|
||||
add(_imgDescriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<cv::Mat>& ChowLiuTree::getImgDescriptors() const {
|
||||
return imgDescriptors;
|
||||
}
|
||||
|
||||
Mat ChowLiuTree::make(double infoThreshold) {
|
||||
CV_Assert(!imgDescriptors.empty());
|
||||
|
||||
unsigned int descCount = 0;
|
||||
for (size_t i = 0; i < imgDescriptors.size(); i++)
|
||||
descCount += imgDescriptors[i].rows;
|
||||
|
||||
mergedImgDescriptors = cv::Mat(descCount, imgDescriptors[0].cols,
|
||||
imgDescriptors[0].type());
|
||||
for (size_t i = 0, start = 0; i < imgDescriptors.size(); i++)
|
||||
{
|
||||
Mat submut = mergedImgDescriptors.rowRange((int)start,
|
||||
(int)(start + imgDescriptors[i].rows));
|
||||
imgDescriptors[i].copyTo(submut);
|
||||
start += imgDescriptors[i].rows;
|
||||
}
|
||||
|
||||
std::list<info> edges;
|
||||
createBaseEdges(edges, infoThreshold);
|
||||
|
||||
// TODO: if it cv_asserts here they really won't know why.
|
||||
|
||||
CV_Assert(reduceEdgesToMinSpan(edges));
|
||||
|
||||
return buildTree(edges.front().word1, edges);
|
||||
}
|
||||
|
||||
double ChowLiuTree::P(int a, bool za) {
|
||||
|
||||
if(za) {
|
||||
return (0.98 * cv::countNonZero(mergedImgDescriptors.col(a)) /
|
||||
mergedImgDescriptors.rows) + 0.01;
|
||||
} else {
|
||||
return 1 - ((0.98 * cv::countNonZero(mergedImgDescriptors.col(a)) /
|
||||
mergedImgDescriptors.rows) + 0.01);
|
||||
}
|
||||
|
||||
}
|
||||
double ChowLiuTree::JP(int a, bool za, int b, bool zb) {
|
||||
|
||||
double count = 0;
|
||||
for(int i = 0; i < mergedImgDescriptors.rows; i++) {
|
||||
if((mergedImgDescriptors.at<float>(i,a) > 0) == za &&
|
||||
(mergedImgDescriptors.at<float>(i,b) > 0) == zb) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count / mergedImgDescriptors.rows;
|
||||
|
||||
}
|
||||
double ChowLiuTree::CP(int a, bool za, int b, bool zb){
|
||||
|
||||
int count = 0, total = 0;
|
||||
for(int i = 0; i < mergedImgDescriptors.rows; i++) {
|
||||
if((mergedImgDescriptors.at<float>(i,b) > 0) == zb) {
|
||||
total++;
|
||||
if((mergedImgDescriptors.at<float>(i,a) > 0) == za) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(total) {
|
||||
return (double)(0.98 * count)/total + 0.01;
|
||||
} else {
|
||||
return (za) ? 0.01 : 0.99;
|
||||
}
|
||||
}
|
||||
|
||||
cv::Mat ChowLiuTree::buildTree(int root_word, std::list<info> &edges) {
|
||||
|
||||
int q = root_word;
|
||||
cv::Mat cltree(4, (int)edges.size()+1, CV_64F);
|
||||
|
||||
cltree.at<double>(0, q) = q;
|
||||
cltree.at<double>(1, q) = P(q, true);
|
||||
cltree.at<double>(2, q) = P(q, true);
|
||||
cltree.at<double>(3, q) = P(q, true);
|
||||
//setting P(zq|zpq) to P(zq) gives the root node of the chow-liu
|
||||
//independence from a parent node.
|
||||
|
||||
//find all children and do the same
|
||||
vector<int> nextqs = extractChildren(edges, q);
|
||||
|
||||
int pq = q;
|
||||
vector<int>::iterator nextq;
|
||||
for(nextq = nextqs.begin(); nextq != nextqs.end(); nextq++) {
|
||||
recAddToTree(cltree, *nextq, pq, edges);
|
||||
}
|
||||
|
||||
return cltree;
|
||||
|
||||
|
||||
}
|
||||
|
||||
void ChowLiuTree::recAddToTree(cv::Mat &cltree, int q, int pq,
|
||||
std::list<info>& remaining_edges) {
|
||||
|
||||
cltree.at<double>(0, q) = pq;
|
||||
cltree.at<double>(1, q) = P(q, true);
|
||||
cltree.at<double>(2, q) = CP(q, true, pq, true);
|
||||
cltree.at<double>(3, q) = CP(q, true, pq, false);
|
||||
|
||||
//find all children and do the same
|
||||
vector<int> nextqs = extractChildren(remaining_edges, q);
|
||||
|
||||
pq = q;
|
||||
vector<int>::iterator nextq;
|
||||
for(nextq = nextqs.begin(); nextq != nextqs.end(); nextq++) {
|
||||
recAddToTree(cltree, *nextq, pq, remaining_edges);
|
||||
}
|
||||
}
|
||||
|
||||
vector<int> ChowLiuTree::extractChildren(std::list<info> &remaining_edges, int q) {
|
||||
|
||||
std::vector<int> children;
|
||||
std::list<info>::iterator edge = remaining_edges.begin();
|
||||
|
||||
while(edge != remaining_edges.end()) {
|
||||
if(edge->word1 == q) {
|
||||
children.push_back(edge->word2);
|
||||
edge = remaining_edges.erase(edge);
|
||||
continue;
|
||||
}
|
||||
if(edge->word2 == q) {
|
||||
children.push_back(edge->word1);
|
||||
edge = remaining_edges.erase(edge);
|
||||
continue;
|
||||
}
|
||||
edge++;
|
||||
}
|
||||
|
||||
return children;
|
||||
}
|
||||
|
||||
bool ChowLiuTree::sortInfoScores(const info& first, const info& second) {
|
||||
return first.score > second.score;
|
||||
}
|
||||
|
||||
double ChowLiuTree::calcMutInfo(int word1, int word2) {
|
||||
double accumulation = 0;
|
||||
|
||||
double P00 = JP(word1, false, word2, false);
|
||||
if(P00) accumulation += P00 * log(P00 / (P(word1, false)*P(word2, false)));
|
||||
|
||||
double P01 = JP(word1, false, word2, true);
|
||||
if(P01) accumulation += P01 * log(P01 / (P(word1, false)*P(word2, true)));
|
||||
|
||||
double P10 = JP(word1, true, word2, false);
|
||||
if(P10) accumulation += P10 * log(P10 / (P(word1, true)*P(word2, false)));
|
||||
|
||||
double P11 = JP(word1, true, word2, true);
|
||||
if(P11) accumulation += P11 * log(P11 / (P(word1, true)*P(word2, true)));
|
||||
|
||||
return accumulation;
|
||||
}
|
||||
|
||||
void ChowLiuTree::createBaseEdges(std::list<info>& edges, double infoThreshold) {
|
||||
|
||||
int nWords = imgDescriptors[0].cols;
|
||||
info mutInfo;
|
||||
|
||||
for(int word1 = 0; word1 < nWords; word1++) {
|
||||
for(int word2 = word1 + 1; word2 < nWords; word2++) {
|
||||
mutInfo.word1 = (short)word1;
|
||||
mutInfo.word2 = (short)word2;
|
||||
mutInfo.score = (float)calcMutInfo(word1, word2);
|
||||
if(mutInfo.score >= infoThreshold)
|
||||
edges.push_back(mutInfo);
|
||||
}
|
||||
}
|
||||
edges.sort(sortInfoScores);
|
||||
}
|
||||
|
||||
bool ChowLiuTree::reduceEdgesToMinSpan(std::list<info>& edges) {
|
||||
|
||||
std::map<int, int> groups;
|
||||
std::map<int, int>::iterator groupIt;
|
||||
for(int i = 0; i < imgDescriptors[0].cols; i++) groups[i] = i;
|
||||
int group1, group2;
|
||||
|
||||
std::list<info>::iterator edge = edges.begin();
|
||||
while(edge != edges.end()) {
|
||||
if(groups[edge->word1] != groups[edge->word2]) {
|
||||
group1 = groups[edge->word1];
|
||||
group2 = groups[edge->word2];
|
||||
for(groupIt = groups.begin(); groupIt != groups.end(); groupIt++)
|
||||
if(groupIt->second == group2) groupIt->second = group1;
|
||||
edge++;
|
||||
} else {
|
||||
edge = edges.erase(edge);
|
||||
}
|
||||
}
|
||||
|
||||
if(edges.size() != (unsigned int)imgDescriptors[0].cols - 1) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -100,143 +100,143 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class MagnoRetinaFilter: public BasicRetinaFilter
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* constructor parameters are only linked to image input size
|
||||
* @param NBrows: number of rows of the input image
|
||||
* @param NBcolumns: number of columns of the input image
|
||||
*/
|
||||
MagnoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns);
|
||||
|
||||
|
||||
/**
|
||||
* destructor
|
||||
*/
|
||||
virtual ~MagnoRetinaFilter();
|
||||
|
||||
/**
|
||||
* function that clears all buffers of the object
|
||||
*/
|
||||
void clearAllBuffers();
|
||||
|
||||
/**
|
||||
* resize retina magno filter object (resize all allocated buffers)
|
||||
* @param NBrows: the new height size
|
||||
* @param NBcolumns: the new width size
|
||||
*/
|
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns);
|
||||
|
||||
/**
|
||||
* set parameters values
|
||||
* @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0
|
||||
* @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response)
|
||||
* @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5
|
||||
* @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5
|
||||
* @param localAdaptIntegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
|
||||
* @param localAdaptIntegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
|
||||
*/
|
||||
void setCoefficientsTable(const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float localAdaptIntegration_tau, const float localAdaptIntegration_k);
|
||||
|
||||
/**
|
||||
* launch filter that runs all the IPL magno filter (model of the magnocellular channel of the Inner Plexiform Layer of the retina)
|
||||
* @param OPL_ON: the output of the bipolar ON cells of the retina (available from the ParvoRetinaFilter class (getBipolarCellsON() function)
|
||||
* @param OPL_OFF: the output of the bipolar OFF cells of the retina (available from the ParvoRetinaFilter class (getBipolarCellsOFF() function)
|
||||
* @return the processed result without post-processing
|
||||
*/
|
||||
const std::valarray<float> &runFilter(const std::valarray<float> &OPL_ON, const std::valarray<float> &OPL_OFF);
|
||||
|
||||
/**
|
||||
* @return the Magnocellular ON channel filtering output
|
||||
*/
|
||||
inline const std::valarray<float> &getMagnoON() const {return _magnoXOutputON;};
|
||||
|
||||
/**
|
||||
* @return the Magnocellular OFF channel filtering output
|
||||
*/
|
||||
inline const std::valarray<float> &getMagnoOFF() const {return _magnoXOutputOFF;};
|
||||
|
||||
/**
|
||||
* @return the Magnocellular Y (sum of the ON and OFF magno channels) filtering output
|
||||
*/
|
||||
inline const std::valarray<float> &getMagnoYsaturated() const {return *_magnoYsaturated;};
|
||||
|
||||
/**
|
||||
* applies an image normalization which saturates the high output values by the use of an assymetric sigmoide
|
||||
*/
|
||||
inline void normalizeGrayOutputNearZeroCentreredSigmoide(){_filterOutput.normalizeGrayOutputNearZeroCentreredSigmoide(&(*_magnoYOutput)[0], &(*_magnoYsaturated)[0]);};
|
||||
|
||||
/**
|
||||
* @return the horizontal cells' temporal constant
|
||||
*/
|
||||
inline float getTemporalConstant(){return this->_filteringCoeficientsTable[2];};
|
||||
|
||||
private:
|
||||
|
||||
// related pointers to these buffers
|
||||
std::valarray<float> _previousInput_ON;
|
||||
std::valarray<float> _previousInput_OFF;
|
||||
std::valarray<float> _amacrinCellsTempOutput_ON;
|
||||
std::valarray<float> _amacrinCellsTempOutput_OFF;
|
||||
std::valarray<float> _magnoXOutputON;
|
||||
std::valarray<float> _magnoXOutputOFF;
|
||||
std::valarray<float> _localProcessBufferON;
|
||||
std::valarray<float> _localProcessBufferOFF;
|
||||
// reference to parent buffers and allow better readability
|
||||
TemplateBuffer<float> *_magnoYOutput;
|
||||
std::valarray<float> *_magnoYsaturated;
|
||||
|
||||
// varialbles
|
||||
float _temporalCoefficient;
|
||||
|
||||
// amacrine cells filter : high pass temporal filter
|
||||
void _amacrineCellsComputing(const float *ONinput, const float *OFFinput);
|
||||
#ifdef MAKE_PARALLEL
|
||||
/******************************************************
|
||||
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
|
||||
** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
|
||||
** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
|
||||
** ==> functors constructors can differ from the parameters used with their related serial functions
|
||||
*/
|
||||
class Parallel_amacrineCellsComputing: public cv::ParallelLoopBody
|
||||
class MagnoRetinaFilter: public BasicRetinaFilter
|
||||
{
|
||||
private:
|
||||
const float *OPL_ON, *OPL_OFF;
|
||||
float *previousInput_ON, *previousInput_OFF, *amacrinCellsTempOutput_ON, *amacrinCellsTempOutput_OFF;
|
||||
const float temporalCoefficient;
|
||||
public:
|
||||
Parallel_amacrineCellsComputing(const float *OPL_ON_PTR, const float *OPL_OFF_PTR, float *previousInput_ON_PTR, float *previousInput_OFF_PTR, float *amacrinCellsTempOutput_ON_PTR, float *amacrinCellsTempOutput_OFF_PTR, float temporalCoefficientVal)
|
||||
:OPL_ON(OPL_ON_PTR), OPL_OFF(OPL_OFF_PTR), previousInput_ON(previousInput_ON_PTR), previousInput_OFF(previousInput_OFF_PTR), amacrinCellsTempOutput_ON(amacrinCellsTempOutput_ON_PTR), amacrinCellsTempOutput_OFF(amacrinCellsTempOutput_OFF_PTR), temporalCoefficient(temporalCoefficientVal) {}
|
||||
|
||||
virtual void operator()( const Range& r ) const {
|
||||
register const float *OPL_ON_PTR=OPL_ON+r.start;
|
||||
register const float *OPL_OFF_PTR=OPL_OFF+r.start;
|
||||
register float *previousInput_ON_PTR= previousInput_ON+r.start;
|
||||
register float *previousInput_OFF_PTR= previousInput_OFF+r.start;
|
||||
register float *amacrinCellsTempOutput_ON_PTR= amacrinCellsTempOutput_ON+r.start;
|
||||
register float *amacrinCellsTempOutput_OFF_PTR= amacrinCellsTempOutput_OFF+r.start;
|
||||
/**
|
||||
* constructor parameters are only linked to image input size
|
||||
* @param NBrows: number of rows of the input image
|
||||
* @param NBcolumns: number of columns of the input image
|
||||
*/
|
||||
MagnoRetinaFilter(const unsigned int NBrows, const unsigned int NBcolumns);
|
||||
|
||||
for (int IDpixel=r.start ; IDpixel!=r.end; ++IDpixel)
|
||||
{
|
||||
|
||||
/* Compute ON and OFF amacrin cells high pass temporal filter */
|
||||
float magnoXonPixelResult = temporalCoefficient*(*amacrinCellsTempOutput_ON_PTR+ *OPL_ON_PTR-*previousInput_ON_PTR);
|
||||
*(amacrinCellsTempOutput_ON_PTR++)=((float)(magnoXonPixelResult>0))*magnoXonPixelResult;
|
||||
/**
|
||||
* destructor
|
||||
*/
|
||||
virtual ~MagnoRetinaFilter();
|
||||
|
||||
float magnoXoffPixelResult = temporalCoefficient*(*amacrinCellsTempOutput_OFF_PTR+ *OPL_OFF_PTR-*previousInput_OFF_PTR);
|
||||
*(amacrinCellsTempOutput_OFF_PTR++)=((float)(magnoXoffPixelResult>0))*magnoXoffPixelResult;
|
||||
/**
|
||||
* function that clears all buffers of the object
|
||||
*/
|
||||
void clearAllBuffers();
|
||||
|
||||
/* prepare next loop */
|
||||
*(previousInput_ON_PTR++)=*(OPL_ON_PTR++);
|
||||
*(previousInput_OFF_PTR++)=*(OPL_OFF_PTR++);
|
||||
/**
|
||||
* resize retina magno filter object (resize all allocated buffers)
|
||||
* @param NBrows: the new height size
|
||||
* @param NBcolumns: the new width size
|
||||
*/
|
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns);
|
||||
|
||||
}
|
||||
}
|
||||
/**
|
||||
* set parameters values
|
||||
* @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0
|
||||
* @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response)
|
||||
* @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5
|
||||
* @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5
|
||||
* @param localAdaptIntegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
|
||||
* @param localAdaptIntegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
|
||||
*/
|
||||
void setCoefficientsTable(const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float localAdaptIntegration_tau, const float localAdaptIntegration_k);
|
||||
|
||||
};
|
||||
/**
|
||||
* launch filter that runs all the IPL magno filter (model of the magnocellular channel of the Inner Plexiform Layer of the retina)
|
||||
* @param OPL_ON: the output of the bipolar ON cells of the retina (available from the ParvoRetinaFilter class (getBipolarCellsON() function)
|
||||
* @param OPL_OFF: the output of the bipolar OFF cells of the retina (available from the ParvoRetinaFilter class (getBipolarCellsOFF() function)
|
||||
* @return the processed result without post-processing
|
||||
*/
|
||||
const std::valarray<float> &runFilter(const std::valarray<float> &OPL_ON, const std::valarray<float> &OPL_OFF);
|
||||
|
||||
/**
|
||||
* @return the Magnocellular ON channel filtering output
|
||||
*/
|
||||
inline const std::valarray<float> &getMagnoON() const {return _magnoXOutputON;};
|
||||
|
||||
/**
|
||||
* @return the Magnocellular OFF channel filtering output
|
||||
*/
|
||||
inline const std::valarray<float> &getMagnoOFF() const {return _magnoXOutputOFF;};
|
||||
|
||||
/**
|
||||
* @return the Magnocellular Y (sum of the ON and OFF magno channels) filtering output
|
||||
*/
|
||||
inline const std::valarray<float> &getMagnoYsaturated() const {return *_magnoYsaturated;};
|
||||
|
||||
/**
|
||||
* applies an image normalization which saturates the high output values by the use of an assymetric sigmoide
|
||||
*/
|
||||
inline void normalizeGrayOutputNearZeroCentreredSigmoide(){_filterOutput.normalizeGrayOutputNearZeroCentreredSigmoide(&(*_magnoYOutput)[0], &(*_magnoYsaturated)[0]);};
|
||||
|
||||
/**
|
||||
* @return the horizontal cells' temporal constant
|
||||
*/
|
||||
inline float getTemporalConstant(){return this->_filteringCoeficientsTable[2];};
|
||||
|
||||
private:
|
||||
|
||||
// related pointers to these buffers
|
||||
std::valarray<float> _previousInput_ON;
|
||||
std::valarray<float> _previousInput_OFF;
|
||||
std::valarray<float> _amacrinCellsTempOutput_ON;
|
||||
std::valarray<float> _amacrinCellsTempOutput_OFF;
|
||||
std::valarray<float> _magnoXOutputON;
|
||||
std::valarray<float> _magnoXOutputOFF;
|
||||
std::valarray<float> _localProcessBufferON;
|
||||
std::valarray<float> _localProcessBufferOFF;
|
||||
// reference to parent buffers and allow better readability
|
||||
TemplateBuffer<float> *_magnoYOutput;
|
||||
std::valarray<float> *_magnoYsaturated;
|
||||
|
||||
// varialbles
|
||||
float _temporalCoefficient;
|
||||
|
||||
// amacrine cells filter : high pass temporal filter
|
||||
void _amacrineCellsComputing(const float *ONinput, const float *OFFinput);
|
||||
#ifdef MAKE_PARALLEL
|
||||
/******************************************************
|
||||
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
|
||||
** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
|
||||
** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
|
||||
** ==> functors constructors can differ from the parameters used with their related serial functions
|
||||
*/
|
||||
class Parallel_amacrineCellsComputing: public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
const float *OPL_ON, *OPL_OFF;
|
||||
float *previousInput_ON, *previousInput_OFF, *amacrinCellsTempOutput_ON, *amacrinCellsTempOutput_OFF;
|
||||
float temporalCoefficient;
|
||||
public:
|
||||
Parallel_amacrineCellsComputing(const float *OPL_ON_PTR, const float *OPL_OFF_PTR, float *previousInput_ON_PTR, float *previousInput_OFF_PTR, float *amacrinCellsTempOutput_ON_PTR, float *amacrinCellsTempOutput_OFF_PTR, float temporalCoefficientVal)
|
||||
:OPL_ON(OPL_ON_PTR), OPL_OFF(OPL_OFF_PTR), previousInput_ON(previousInput_ON_PTR), previousInput_OFF(previousInput_OFF_PTR), amacrinCellsTempOutput_ON(amacrinCellsTempOutput_ON_PTR), amacrinCellsTempOutput_OFF(amacrinCellsTempOutput_OFF_PTR), temporalCoefficient(temporalCoefficientVal) {}
|
||||
|
||||
virtual void operator()( const Range& r ) const {
|
||||
register const float *OPL_ON_PTR=OPL_ON+r.start;
|
||||
register const float *OPL_OFF_PTR=OPL_OFF+r.start;
|
||||
register float *previousInput_ON_PTR= previousInput_ON+r.start;
|
||||
register float *previousInput_OFF_PTR= previousInput_OFF+r.start;
|
||||
register float *amacrinCellsTempOutput_ON_PTR= amacrinCellsTempOutput_ON+r.start;
|
||||
register float *amacrinCellsTempOutput_OFF_PTR= amacrinCellsTempOutput_OFF+r.start;
|
||||
|
||||
for (int IDpixel=r.start ; IDpixel!=r.end; ++IDpixel)
|
||||
{
|
||||
|
||||
/* Compute ON and OFF amacrin cells high pass temporal filter */
|
||||
float magnoXonPixelResult = temporalCoefficient*(*amacrinCellsTempOutput_ON_PTR+ *OPL_ON_PTR-*previousInput_ON_PTR);
|
||||
*(amacrinCellsTempOutput_ON_PTR++)=((float)(magnoXonPixelResult>0))*magnoXonPixelResult;
|
||||
|
||||
float magnoXoffPixelResult = temporalCoefficient*(*amacrinCellsTempOutput_OFF_PTR+ *OPL_OFF_PTR-*previousInput_OFF_PTR);
|
||||
*(amacrinCellsTempOutput_OFF_PTR++)=((float)(magnoXoffPixelResult>0))*magnoXoffPixelResult;
|
||||
|
||||
/* prepare next loop */
|
||||
*(previousInput_ON_PTR++)=*(OPL_ON_PTR++);
|
||||
*(previousInput_OFF_PTR++)=*(OPL_OFF_PTR++);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
779
modules/contrib/src/openfabmap.cpp
Normal file
779
modules/contrib/src/openfabmap.cpp
Normal file
@@ -0,0 +1,779 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
// This file originates from the openFABMAP project:
|
||||
// [http://code.google.com/p/openfabmap/]
|
||||
//
|
||||
// For published work which uses all or part of OpenFABMAP, please cite:
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
|
||||
//
|
||||
// Original Algorithm by Mark Cummins and Paul Newman:
|
||||
// [http://ijr.sagepub.com/content/27/6/647.short]
|
||||
// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
|
||||
// [http://ijr.sagepub.com/content/30/9/1100.abstract]
|
||||
//
|
||||
// License Agreement
|
||||
//
|
||||
// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
|
||||
// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
|
||||
//
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/contrib/openfabmap.hpp"
|
||||
|
||||
|
||||
/*
|
||||
Calculate the sum of two log likelihoods
|
||||
*/
|
||||
namespace cv {
|
||||
|
||||
namespace of2 {
|
||||
|
||||
static double logsumexp(double a, double b) {
|
||||
return a > b ? log(1 + exp(b - a)) + a : log(1 + exp(a - b)) + b;
|
||||
}
|
||||
|
||||
FabMap::FabMap(const Mat& _clTree, double _PzGe,
|
||||
double _PzGNe, int _flags, int _numSamples) :
|
||||
clTree(_clTree), PzGe(_PzGe), PzGNe(_PzGNe), flags(
|
||||
_flags), numSamples(_numSamples) {
|
||||
|
||||
CV_Assert(flags & MEAN_FIELD || flags & SAMPLED);
|
||||
CV_Assert(flags & NAIVE_BAYES || flags & CHOW_LIU);
|
||||
if (flags & NAIVE_BAYES) {
|
||||
PzGL = &FabMap::PzqGL;
|
||||
} else {
|
||||
PzGL = &FabMap::PzqGzpqL;
|
||||
}
|
||||
|
||||
//check for a valid Chow-Liu tree
|
||||
CV_Assert(clTree.type() == CV_64FC1);
|
||||
cv::checkRange(clTree.row(0), false, NULL, 0, clTree.cols);
|
||||
cv::checkRange(clTree.row(1), false, NULL, DBL_MIN, 1);
|
||||
cv::checkRange(clTree.row(2), false, NULL, DBL_MIN, 1);
|
||||
cv::checkRange(clTree.row(3), false, NULL, DBL_MIN, 1);
|
||||
|
||||
// TODO: Add default values for member variables
|
||||
Pnew = 0.9;
|
||||
sFactor = 0.99;
|
||||
mBias = 0.5;
|
||||
}
|
||||
|
||||
FabMap::~FabMap() {
|
||||
}
|
||||
|
||||
const std::vector<cv::Mat>& FabMap::getTrainingImgDescriptors() const {
|
||||
return trainingImgDescriptors;
|
||||
}
|
||||
|
||||
const std::vector<cv::Mat>& FabMap::getTestImgDescriptors() const {
|
||||
return testImgDescriptors;
|
||||
}
|
||||
|
||||
void FabMap::addTraining(const Mat& queryImgDescriptor) {
|
||||
CV_Assert(!queryImgDescriptor.empty());
|
||||
vector<Mat> queryImgDescriptors;
|
||||
for (int i = 0; i < queryImgDescriptor.rows; i++) {
|
||||
queryImgDescriptors.push_back(queryImgDescriptor.row(i));
|
||||
}
|
||||
addTraining(queryImgDescriptors);
|
||||
}
|
||||
|
||||
void FabMap::addTraining(const vector<Mat>& queryImgDescriptors) {
|
||||
for (size_t i = 0; i < queryImgDescriptors.size(); i++) {
|
||||
CV_Assert(!queryImgDescriptors[i].empty());
|
||||
CV_Assert(queryImgDescriptors[i].rows == 1);
|
||||
CV_Assert(queryImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(queryImgDescriptors[i].type() == CV_32F);
|
||||
trainingImgDescriptors.push_back(queryImgDescriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void FabMap::add(const cv::Mat& queryImgDescriptor) {
|
||||
CV_Assert(!queryImgDescriptor.empty());
|
||||
vector<Mat> queryImgDescriptors;
|
||||
for (int i = 0; i < queryImgDescriptor.rows; i++) {
|
||||
queryImgDescriptors.push_back(queryImgDescriptor.row(i));
|
||||
}
|
||||
add(queryImgDescriptors);
|
||||
}
|
||||
|
||||
void FabMap::add(const std::vector<cv::Mat>& queryImgDescriptors) {
|
||||
for (size_t i = 0; i < queryImgDescriptors.size(); i++) {
|
||||
CV_Assert(!queryImgDescriptors[i].empty());
|
||||
CV_Assert(queryImgDescriptors[i].rows == 1);
|
||||
CV_Assert(queryImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(queryImgDescriptors[i].type() == CV_32F);
|
||||
testImgDescriptors.push_back(queryImgDescriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void FabMap::compare(const Mat& queryImgDescriptor,
|
||||
vector<IMatch>& matches, bool addQuery,
|
||||
const Mat& mask) {
|
||||
CV_Assert(!queryImgDescriptor.empty());
|
||||
vector<Mat> queryImgDescriptors;
|
||||
for (int i = 0; i < queryImgDescriptor.rows; i++) {
|
||||
queryImgDescriptors.push_back(queryImgDescriptor.row(i));
|
||||
}
|
||||
compare(queryImgDescriptors,matches,addQuery,mask);
|
||||
}
|
||||
|
||||
void FabMap::compare(const Mat& queryImgDescriptor,
|
||||
const Mat& testImgDescriptor, vector<IMatch>& matches,
|
||||
const Mat& mask) {
|
||||
CV_Assert(!queryImgDescriptor.empty());
|
||||
vector<Mat> queryImgDescriptors;
|
||||
for (int i = 0; i < queryImgDescriptor.rows; i++) {
|
||||
queryImgDescriptors.push_back(queryImgDescriptor.row(i));
|
||||
}
|
||||
|
||||
CV_Assert(!testImgDescriptor.empty());
|
||||
vector<Mat> _testImgDescriptors;
|
||||
for (int i = 0; i < testImgDescriptor.rows; i++) {
|
||||
_testImgDescriptors.push_back(testImgDescriptor.row(i));
|
||||
}
|
||||
compare(queryImgDescriptors,_testImgDescriptors,matches,mask);
|
||||
|
||||
}
|
||||
|
||||
void FabMap::compare(const Mat& queryImgDescriptor,
|
||||
const vector<Mat>& _testImgDescriptors,
|
||||
vector<IMatch>& matches, const Mat& mask) {
|
||||
CV_Assert(!queryImgDescriptor.empty());
|
||||
vector<Mat> queryImgDescriptors;
|
||||
for (int i = 0; i < queryImgDescriptor.rows; i++) {
|
||||
queryImgDescriptors.push_back(queryImgDescriptor.row(i));
|
||||
}
|
||||
compare(queryImgDescriptors,_testImgDescriptors,matches,mask);
|
||||
}
|
||||
|
||||
void FabMap::compare(const vector<Mat>& queryImgDescriptors,
|
||||
vector<IMatch>& matches, bool addQuery, const Mat& /*mask*/) {
|
||||
|
||||
// TODO: add first query if empty (is this necessary)
|
||||
|
||||
for (size_t i = 0; i < queryImgDescriptors.size(); i++) {
|
||||
CV_Assert(!queryImgDescriptors[i].empty());
|
||||
CV_Assert(queryImgDescriptors[i].rows == 1);
|
||||
CV_Assert(queryImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(queryImgDescriptors[i].type() == CV_32F);
|
||||
|
||||
// TODO: add mask
|
||||
|
||||
compareImgDescriptor(queryImgDescriptors[i],
|
||||
(int)i, testImgDescriptors, matches);
|
||||
if (addQuery)
|
||||
add(queryImgDescriptors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void FabMap::compare(const vector<Mat>& queryImgDescriptors,
|
||||
const vector<Mat>& _testImgDescriptors,
|
||||
vector<IMatch>& matches, const Mat& /*mask*/) {
|
||||
if (_testImgDescriptors[0].data != this->testImgDescriptors[0].data) {
|
||||
CV_Assert(!(flags & MOTION_MODEL));
|
||||
for (size_t i = 0; i < _testImgDescriptors.size(); i++) {
|
||||
CV_Assert(!_testImgDescriptors[i].empty());
|
||||
CV_Assert(_testImgDescriptors[i].rows == 1);
|
||||
CV_Assert(_testImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(_testImgDescriptors[i].type() == CV_32F);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < queryImgDescriptors.size(); i++) {
|
||||
CV_Assert(!queryImgDescriptors[i].empty());
|
||||
CV_Assert(queryImgDescriptors[i].rows == 1);
|
||||
CV_Assert(queryImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(queryImgDescriptors[i].type() == CV_32F);
|
||||
|
||||
// TODO: add mask
|
||||
|
||||
compareImgDescriptor(queryImgDescriptors[i],
|
||||
(int)i, _testImgDescriptors, matches);
|
||||
}
|
||||
}
|
||||
|
||||
void FabMap::compareImgDescriptor(const Mat& queryImgDescriptor,
|
||||
int queryIndex, const vector<Mat>& _testImgDescriptors,
|
||||
vector<IMatch>& matches) {
|
||||
|
||||
vector<IMatch> queryMatches;
|
||||
queryMatches.push_back(IMatch(queryIndex,-1,
|
||||
getNewPlaceLikelihood(queryImgDescriptor),0));
|
||||
getLikelihoods(queryImgDescriptor,_testImgDescriptors,queryMatches);
|
||||
normaliseDistribution(queryMatches);
|
||||
for (size_t j = 1; j < queryMatches.size(); j++) {
|
||||
queryMatches[j].queryIdx = queryIndex;
|
||||
}
|
||||
matches.insert(matches.end(), queryMatches.begin(), queryMatches.end());
|
||||
}
|
||||
|
||||
void FabMap::getLikelihoods(const Mat& /*queryImgDescriptor*/,
|
||||
const vector<Mat>& /*testImgDescriptors*/, vector<IMatch>& /*matches*/) {
|
||||
|
||||
}
|
||||
|
||||
double FabMap::getNewPlaceLikelihood(const Mat& queryImgDescriptor) {
|
||||
if (flags & MEAN_FIELD) {
|
||||
double logP = 0;
|
||||
bool zq, zpq;
|
||||
if(flags & NAIVE_BAYES) {
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
zq = queryImgDescriptor.at<float>(0,q) > 0;
|
||||
|
||||
logP += log(Pzq(q, false) * PzqGeq(zq, false) +
|
||||
Pzq(q, true) * PzqGeq(zq, true));
|
||||
}
|
||||
} else {
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
zq = queryImgDescriptor.at<float>(0,q) > 0;
|
||||
zpq = queryImgDescriptor.at<float>(0,pq(q)) > 0;
|
||||
|
||||
double alpha, beta, p;
|
||||
alpha = Pzq(q, zq) * PzqGeq(!zq, false) * PzqGzpq(q, !zq, zpq);
|
||||
beta = Pzq(q, !zq) * PzqGeq(zq, false) * PzqGzpq(q, zq, zpq);
|
||||
p = Pzq(q, false) * beta / (alpha + beta);
|
||||
|
||||
alpha = Pzq(q, zq) * PzqGeq(!zq, true) * PzqGzpq(q, !zq, zpq);
|
||||
beta = Pzq(q, !zq) * PzqGeq(zq, true) * PzqGzpq(q, zq, zpq);
|
||||
p += Pzq(q, true) * beta / (alpha + beta);
|
||||
|
||||
logP += log(p);
|
||||
}
|
||||
}
|
||||
return logP;
|
||||
}
|
||||
|
||||
if (flags & SAMPLED) {
|
||||
CV_Assert(!trainingImgDescriptors.empty());
|
||||
CV_Assert(numSamples > 0);
|
||||
|
||||
vector<Mat> sampledImgDescriptors;
|
||||
|
||||
// TODO: this method can result in the same sample being added
|
||||
// multiple times. Is this desired?
|
||||
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
int index = rand() % trainingImgDescriptors.size();
|
||||
sampledImgDescriptors.push_back(trainingImgDescriptors[index]);
|
||||
}
|
||||
|
||||
vector<IMatch> matches;
|
||||
getLikelihoods(queryImgDescriptor,sampledImgDescriptors,matches);
|
||||
|
||||
double averageLogLikelihood = -DBL_MAX + matches.front().likelihood + 1;
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
averageLogLikelihood =
|
||||
logsumexp(matches[i].likelihood, averageLogLikelihood);
|
||||
}
|
||||
|
||||
return averageLogLikelihood - log((double)numSamples);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void FabMap::normaliseDistribution(vector<IMatch>& matches) {
|
||||
CV_Assert(!matches.empty());
|
||||
|
||||
if (flags & MOTION_MODEL) {
|
||||
|
||||
matches[0].match = matches[0].likelihood + log(Pnew);
|
||||
|
||||
if (priorMatches.size() > 2) {
|
||||
matches[1].match = matches[1].likelihood;
|
||||
matches[1].match += log(
|
||||
(2 * (1-mBias) * priorMatches[1].match +
|
||||
priorMatches[1].match +
|
||||
2 * mBias * priorMatches[2].match) / 3);
|
||||
for (size_t i = 2; i < priorMatches.size()-1; i++) {
|
||||
matches[i].match = matches[i].likelihood;
|
||||
matches[i].match += log(
|
||||
(2 * (1-mBias) * priorMatches[i-1].match +
|
||||
priorMatches[i].match +
|
||||
2 * mBias * priorMatches[i+1].match)/3);
|
||||
}
|
||||
matches[priorMatches.size()-1].match =
|
||||
matches[priorMatches.size()-1].likelihood;
|
||||
matches[priorMatches.size()-1].match += log(
|
||||
(2 * (1-mBias) * priorMatches[priorMatches.size()-2].match +
|
||||
priorMatches[priorMatches.size()-1].match +
|
||||
2 * mBias * priorMatches[priorMatches.size()-1].match)/3);
|
||||
|
||||
for(size_t i = priorMatches.size(); i < matches.size(); i++) {
|
||||
matches[i].match = matches[i].likelihood;
|
||||
}
|
||||
} else {
|
||||
for(size_t i = 1; i < matches.size(); i++) {
|
||||
matches[i].match = matches[i].likelihood;
|
||||
}
|
||||
}
|
||||
|
||||
double logsum = -DBL_MAX + matches.front().match + 1;
|
||||
|
||||
//calculate the normalising constant
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
logsum = logsumexp(logsum, matches[i].match);
|
||||
}
|
||||
|
||||
//normalise
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
matches[i].match = exp(matches[i].match - logsum);
|
||||
}
|
||||
|
||||
//smooth final probabilities
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
matches[i].match = sFactor*matches[i].match +
|
||||
(1 - sFactor)/matches.size();
|
||||
}
|
||||
|
||||
//update our location priors
|
||||
priorMatches = matches;
|
||||
|
||||
} else {
|
||||
|
||||
double logsum = -DBL_MAX + matches.front().likelihood + 1;
|
||||
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
logsum = logsumexp(logsum, matches[i].likelihood);
|
||||
}
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
matches[i].match = exp(matches[i].likelihood - logsum);
|
||||
}
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
matches[i].match = sFactor*matches[i].match +
|
||||
(1 - sFactor)/matches.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int FabMap::pq(int q) {
|
||||
return (int)clTree.at<double>(0,q);
|
||||
}
|
||||
|
||||
double FabMap::Pzq(int q, bool zq) {
|
||||
return (zq) ? clTree.at<double>(1,q) : 1 - clTree.at<double>(1,q);
|
||||
}
|
||||
|
||||
double FabMap::PzqGzpq(int q, bool zq, bool zpq) {
|
||||
if (zpq) {
|
||||
return (zq) ? clTree.at<double>(2,q) : 1 - clTree.at<double>(2,q);
|
||||
} else {
|
||||
return (zq) ? clTree.at<double>(3,q) : 1 - clTree.at<double>(3,q);
|
||||
}
|
||||
}
|
||||
|
||||
double FabMap::PzqGeq(bool zq, bool eq) {
|
||||
if (eq) {
|
||||
return (zq) ? PzGe : 1 - PzGe;
|
||||
} else {
|
||||
return (zq) ? PzGNe : 1 - PzGNe;
|
||||
}
|
||||
}
|
||||
|
||||
double FabMap::PeqGL(int q, bool Lzq, bool eq) {
|
||||
double alpha, beta;
|
||||
alpha = PzqGeq(Lzq, true) * Pzq(q, true);
|
||||
beta = PzqGeq(Lzq, false) * Pzq(q, false);
|
||||
|
||||
if (eq) {
|
||||
return alpha / (alpha + beta);
|
||||
} else {
|
||||
return 1 - (alpha / (alpha + beta));
|
||||
}
|
||||
}
|
||||
|
||||
double FabMap::PzqGL(int q, bool zq, bool /*zpq*/, bool Lzq) {
|
||||
return PeqGL(q, Lzq, false) * PzqGeq(zq, false) +
|
||||
PeqGL(q, Lzq, true) * PzqGeq(zq, true);
|
||||
}
|
||||
|
||||
|
||||
double FabMap::PzqGzpqL(int q, bool zq, bool zpq, bool Lzq) {
|
||||
double p;
|
||||
double alpha, beta;
|
||||
|
||||
alpha = Pzq(q, zq) * PzqGeq(!zq, false) * PzqGzpq(q, !zq, zpq);
|
||||
beta = Pzq(q, !zq) * PzqGeq( zq, false) * PzqGzpq(q, zq, zpq);
|
||||
p = PeqGL(q, Lzq, false) * beta / (alpha + beta);
|
||||
|
||||
alpha = Pzq(q, zq) * PzqGeq(!zq, true) * PzqGzpq(q, !zq, zpq);
|
||||
beta = Pzq(q, !zq) * PzqGeq( zq, true) * PzqGzpq(q, zq, zpq);
|
||||
p += PeqGL(q, Lzq, true) * beta / (alpha + beta);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
FabMap1::FabMap1(const Mat& _clTree, double _PzGe, double _PzGNe, int _flags,
|
||||
int _numSamples) : FabMap(_clTree, _PzGe, _PzGNe, _flags,
|
||||
_numSamples) {
|
||||
}
|
||||
|
||||
FabMap1::~FabMap1() {
|
||||
}
|
||||
|
||||
void FabMap1::getLikelihoods(const Mat& queryImgDescriptor,
|
||||
const vector<Mat>& testImgDescriptors, vector<IMatch>& matches) {
|
||||
|
||||
for (size_t i = 0; i < testImgDescriptors.size(); i++) {
|
||||
bool zq, zpq, Lzq;
|
||||
double logP = 0;
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
|
||||
zq = queryImgDescriptor.at<float>(0,q) > 0;
|
||||
zpq = queryImgDescriptor.at<float>(0,pq(q)) > 0;
|
||||
Lzq = testImgDescriptors[i].at<float>(0,q) > 0;
|
||||
|
||||
logP += log((this->*PzGL)(q, zq, zpq, Lzq));
|
||||
|
||||
}
|
||||
matches.push_back(IMatch(0,(int)i,logP,0));
|
||||
}
|
||||
}
|
||||
|
||||
FabMapLUT::FabMapLUT(const Mat& _clTree, double _PzGe, double _PzGNe,
|
||||
int _flags, int _numSamples, int _precision) :
|
||||
FabMap(_clTree, _PzGe, _PzGNe, _flags, _numSamples), precision(_precision) {
|
||||
|
||||
int nWords = clTree.cols;
|
||||
double precFactor = (double)pow(10.0, precision);
|
||||
|
||||
table = new int[nWords][8];
|
||||
|
||||
for (int q = 0; q < nWords; q++) {
|
||||
for (unsigned char i = 0; i < 8; i++) {
|
||||
|
||||
bool Lzq = (bool) ((i >> 2) & 0x01);
|
||||
bool zq = (bool) ((i >> 1) & 0x01);
|
||||
bool zpq = (bool) (i & 1);
|
||||
|
||||
table[q][i] = -(int)(log((this->*PzGL)(q, zq, zpq, Lzq))
|
||||
* precFactor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FabMapLUT::~FabMapLUT() {
|
||||
delete[] table;
|
||||
}
|
||||
|
||||
void FabMapLUT::getLikelihoods(const Mat& queryImgDescriptor,
|
||||
const vector<Mat>& testImgDescriptors, vector<IMatch>& matches) {
|
||||
|
||||
double precFactor = (double)pow(10.0, -precision);
|
||||
|
||||
for (size_t i = 0; i < testImgDescriptors.size(); i++) {
|
||||
unsigned long long int logP = 0;
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
logP += table[q][(queryImgDescriptor.at<float>(0,pq(q)) > 0) +
|
||||
((queryImgDescriptor.at<float>(0, q) > 0) << 1) +
|
||||
((testImgDescriptors[i].at<float>(0,q) > 0) << 2)];
|
||||
}
|
||||
matches.push_back(IMatch(0,(int)i,-precFactor*(double)logP,0));
|
||||
}
|
||||
}
|
||||
|
||||
FabMapFBO::FabMapFBO(const Mat& _clTree, double _PzGe, double _PzGNe,
|
||||
int _flags, int _numSamples, double _rejectionThreshold,
|
||||
double _PsGd, int _bisectionStart, int _bisectionIts) :
|
||||
FabMap(_clTree, _PzGe, _PzGNe, _flags, _numSamples), PsGd(_PsGd),
|
||||
rejectionThreshold(_rejectionThreshold), bisectionStart(_bisectionStart),
|
||||
bisectionIts(_bisectionIts) {
|
||||
}
|
||||
|
||||
|
||||
FabMapFBO::~FabMapFBO() {
|
||||
}
|
||||
|
||||
void FabMapFBO::getLikelihoods(const Mat& queryImgDescriptor,
|
||||
const vector<Mat>& testImgDescriptors, vector<IMatch>& matches) {
|
||||
|
||||
std::multiset<WordStats> wordData;
|
||||
setWordStatistics(queryImgDescriptor, wordData);
|
||||
|
||||
vector<int> matchIndices;
|
||||
vector<IMatch> queryMatches;
|
||||
|
||||
for (size_t i = 0; i < testImgDescriptors.size(); i++) {
|
||||
queryMatches.push_back(IMatch(0,(int)i,0,0));
|
||||
matchIndices.push_back((int)i);
|
||||
}
|
||||
|
||||
double currBest = -DBL_MAX;
|
||||
double bailedOut = DBL_MAX;
|
||||
|
||||
for (std::multiset<WordStats>::iterator wordIter = wordData.begin();
|
||||
wordIter != wordData.end(); wordIter++) {
|
||||
bool zq = queryImgDescriptor.at<float>(0,wordIter->q) > 0;
|
||||
bool zpq = queryImgDescriptor.at<float>(0,pq(wordIter->q)) > 0;
|
||||
|
||||
currBest = -DBL_MAX;
|
||||
|
||||
for (size_t i = 0; i < matchIndices.size(); i++) {
|
||||
bool Lzq =
|
||||
testImgDescriptors[matchIndices[i]].at<float>(0,wordIter->q) > 0;
|
||||
queryMatches[matchIndices[i]].likelihood +=
|
||||
log((this->*PzGL)(wordIter->q,zq,zpq,Lzq));
|
||||
currBest =
|
||||
std::max(queryMatches[matchIndices[i]].likelihood, currBest);
|
||||
}
|
||||
|
||||
if (matchIndices.size() == 1)
|
||||
continue;
|
||||
|
||||
double delta = std::max(limitbisection(wordIter->V, wordIter->M),
|
||||
-log(rejectionThreshold));
|
||||
|
||||
vector<int>::iterator matchIter = matchIndices.begin();
|
||||
while (matchIter != matchIndices.end()) {
|
||||
if (currBest - queryMatches[*matchIter].likelihood > delta) {
|
||||
queryMatches[*matchIter].likelihood = bailedOut;
|
||||
matchIter = matchIndices.erase(matchIter);
|
||||
} else {
|
||||
matchIter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < queryMatches.size(); i++) {
|
||||
if (queryMatches[i].likelihood == bailedOut) {
|
||||
queryMatches[i].likelihood = currBest + log(rejectionThreshold);
|
||||
}
|
||||
}
|
||||
matches.insert(matches.end(), queryMatches.begin(), queryMatches.end());
|
||||
|
||||
}
|
||||
|
||||
void FabMapFBO::setWordStatistics(const Mat& queryImgDescriptor,
|
||||
std::multiset<WordStats>& wordData) {
|
||||
//words are sorted according to information = -ln(P(zq|zpq))
|
||||
//in non-log format this is lowest probability first
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
wordData.insert(WordStats(q,PzqGzpq(q,
|
||||
queryImgDescriptor.at<float>(0,q) > 0,
|
||||
queryImgDescriptor.at<float>(0,pq(q)) > 0)));
|
||||
}
|
||||
|
||||
double d = 0, V = 0, M = 0;
|
||||
bool zq, zpq;
|
||||
|
||||
for (std::multiset<WordStats>::reverse_iterator wordIter =
|
||||
wordData.rbegin();
|
||||
wordIter != wordData.rend(); wordIter++) {
|
||||
|
||||
zq = queryImgDescriptor.at<float>(0,wordIter->q) > 0;
|
||||
zpq = queryImgDescriptor.at<float>(0,pq(wordIter->q)) > 0;
|
||||
|
||||
d = log((this->*PzGL)(wordIter->q, zq, zpq, true)) -
|
||||
log((this->*PzGL)(wordIter->q, zq, zpq, false));
|
||||
|
||||
V += pow(d, 2.0) * 2 *
|
||||
(Pzq(wordIter->q, true) - pow(Pzq(wordIter->q, true), 2.0));
|
||||
M = std::max(M, fabs(d));
|
||||
|
||||
wordIter->V = V;
|
||||
wordIter->M = M;
|
||||
}
|
||||
}
|
||||
|
||||
double FabMapFBO::limitbisection(double v, double m) {
|
||||
double midpoint, left_val, mid_val;
|
||||
double left = 0, right = bisectionStart;
|
||||
|
||||
left_val = bennettInequality(v, m, left) - PsGd;
|
||||
|
||||
for(int i = 0; i < bisectionIts; i++) {
|
||||
|
||||
midpoint = (left + right)*0.5;
|
||||
mid_val = bennettInequality(v, m, midpoint)- PsGd;
|
||||
|
||||
if(left_val * mid_val > 0) {
|
||||
left = midpoint;
|
||||
left_val = mid_val;
|
||||
} else {
|
||||
right = midpoint;
|
||||
}
|
||||
}
|
||||
|
||||
return (right + left) * 0.5;
|
||||
}
|
||||
|
||||
double FabMapFBO::bennettInequality(double v, double m, double delta) {
|
||||
double DMonV = delta * m / v;
|
||||
double f_delta = log(DMonV + sqrt(pow(DMonV, 2.0) + 1));
|
||||
return exp((v / pow(m, 2.0))*(cosh(f_delta) - 1 - DMonV * f_delta));
|
||||
}
|
||||
|
||||
bool FabMapFBO::compInfo(const WordStats& first, const WordStats& second) {
|
||||
return first.info < second.info;
|
||||
}
|
||||
|
||||
FabMap2::FabMap2(const Mat& _clTree, double _PzGe, double _PzGNe,
|
||||
int _flags) :
|
||||
FabMap(_clTree, _PzGe, _PzGNe, _flags) {
|
||||
CV_Assert(flags & SAMPLED);
|
||||
|
||||
children.resize(clTree.cols);
|
||||
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
d1.push_back(log((this->*PzGL)(q, false, false, true) /
|
||||
(this->*PzGL)(q, false, false, false)));
|
||||
d2.push_back(log((this->*PzGL)(q, false, true, true) /
|
||||
(this->*PzGL)(q, false, true, false)) - d1[q]);
|
||||
d3.push_back(log((this->*PzGL)(q, true, false, true) /
|
||||
(this->*PzGL)(q, true, false, false))- d1[q]);
|
||||
d4.push_back(log((this->*PzGL)(q, true, true, true) /
|
||||
(this->*PzGL)(q, true, true, false))- d1[q]);
|
||||
children[pq(q)].push_back(q);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
FabMap2::~FabMap2() {
|
||||
}
|
||||
|
||||
|
||||
void FabMap2::addTraining(const vector<Mat>& queryImgDescriptors) {
|
||||
for (size_t i = 0; i < queryImgDescriptors.size(); i++) {
|
||||
CV_Assert(!queryImgDescriptors[i].empty());
|
||||
CV_Assert(queryImgDescriptors[i].rows == 1);
|
||||
CV_Assert(queryImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(queryImgDescriptors[i].type() == CV_32F);
|
||||
trainingImgDescriptors.push_back(queryImgDescriptors[i]);
|
||||
addToIndex(queryImgDescriptors[i], trainingDefaults, trainingInvertedMap);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FabMap2::add(const vector<Mat>& queryImgDescriptors) {
|
||||
for (size_t i = 0; i < queryImgDescriptors.size(); i++) {
|
||||
CV_Assert(!queryImgDescriptors[i].empty());
|
||||
CV_Assert(queryImgDescriptors[i].rows == 1);
|
||||
CV_Assert(queryImgDescriptors[i].cols == clTree.cols);
|
||||
CV_Assert(queryImgDescriptors[i].type() == CV_32F);
|
||||
testImgDescriptors.push_back(queryImgDescriptors[i]);
|
||||
addToIndex(queryImgDescriptors[i], testDefaults, testInvertedMap);
|
||||
}
|
||||
}
|
||||
|
||||
void FabMap2::getLikelihoods(const Mat& queryImgDescriptor,
|
||||
const vector<Mat>& testImgDescriptors, vector<IMatch>& matches) {
|
||||
|
||||
if (&testImgDescriptors== &(this->testImgDescriptors)) {
|
||||
getIndexLikelihoods(queryImgDescriptor, testDefaults, testInvertedMap,
|
||||
matches);
|
||||
} else {
|
||||
CV_Assert(!(flags & MOTION_MODEL));
|
||||
vector<double> defaults;
|
||||
std::map<int, vector<int> > invertedMap;
|
||||
for (size_t i = 0; i < testImgDescriptors.size(); i++) {
|
||||
addToIndex(testImgDescriptors[i],defaults,invertedMap);
|
||||
}
|
||||
getIndexLikelihoods(queryImgDescriptor, defaults, invertedMap, matches);
|
||||
}
|
||||
}
|
||||
|
||||
double FabMap2::getNewPlaceLikelihood(const Mat& queryImgDescriptor) {
|
||||
|
||||
CV_Assert(!trainingImgDescriptors.empty());
|
||||
|
||||
vector<IMatch> matches;
|
||||
getIndexLikelihoods(queryImgDescriptor, trainingDefaults,
|
||||
trainingInvertedMap, matches);
|
||||
|
||||
double averageLogLikelihood = -DBL_MAX + matches.front().likelihood + 1;
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
averageLogLikelihood =
|
||||
logsumexp(matches[i].likelihood, averageLogLikelihood);
|
||||
}
|
||||
|
||||
return averageLogLikelihood - log((double)trainingDefaults.size());
|
||||
|
||||
}
|
||||
|
||||
void FabMap2::addToIndex(const Mat& queryImgDescriptor,
|
||||
vector<double>& defaults,
|
||||
std::map<int, vector<int> >& invertedMap) {
|
||||
defaults.push_back(0);
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
if (queryImgDescriptor.at<float>(0,q) > 0) {
|
||||
defaults.back() += d1[q];
|
||||
invertedMap[q].push_back((int)defaults.size()-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FabMap2::getIndexLikelihoods(const Mat& queryImgDescriptor,
|
||||
std::vector<double>& defaults,
|
||||
std::map<int, vector<int> >& invertedMap,
|
||||
std::vector<IMatch>& matches) {
|
||||
|
||||
vector<int>::iterator LwithI, child;
|
||||
|
||||
std::vector<double> likelihoods = defaults;
|
||||
|
||||
for (int q = 0; q < clTree.cols; q++) {
|
||||
if (queryImgDescriptor.at<float>(0,q) > 0) {
|
||||
for (LwithI = invertedMap[q].begin();
|
||||
LwithI != invertedMap[q].end(); LwithI++) {
|
||||
|
||||
if (queryImgDescriptor.at<float>(0,pq(q)) > 0) {
|
||||
likelihoods[*LwithI] += d4[q];
|
||||
} else {
|
||||
likelihoods[*LwithI] += d3[q];
|
||||
}
|
||||
}
|
||||
for (child = children[q].begin(); child != children[q].end();
|
||||
child++) {
|
||||
|
||||
if (queryImgDescriptor.at<float>(0,*child) == 0) {
|
||||
for (LwithI = invertedMap[*child].begin();
|
||||
LwithI != invertedMap[*child].end(); LwithI++) {
|
||||
|
||||
likelihoods[*LwithI] += d2[*child];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < likelihoods.size(); i++) {
|
||||
matches.push_back(IMatch(0,(int)i,likelihoods[i],0));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@@ -86,255 +86,255 @@
|
||||
namespace cv
|
||||
{
|
||||
|
||||
class RetinaColor: public BasicRetinaFilter
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @typedef which allows to select the type of photoreceptors color sampling
|
||||
*/
|
||||
class RetinaColor: public BasicRetinaFilter
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @typedef which allows to select the type of photoreceptors color sampling
|
||||
*/
|
||||
|
||||
/**
|
||||
* constructor of the retina color processing model
|
||||
* @param NBrows: number of rows of the input image
|
||||
* @param NBcolumns: number of columns of the input image
|
||||
* @param samplingMethod: the chosen color sampling method
|
||||
*/
|
||||
RetinaColor(const unsigned int NBrows, const unsigned int NBcolumns, const RETINA_COLORSAMPLINGMETHOD samplingMethod=RETINA_COLOR_DIAGONAL);
|
||||
/**
|
||||
* constructor of the retina color processing model
|
||||
* @param NBrows: number of rows of the input image
|
||||
* @param NBcolumns: number of columns of the input image
|
||||
* @param samplingMethod: the chosen color sampling method
|
||||
*/
|
||||
RetinaColor(const unsigned int NBrows, const unsigned int NBcolumns, const RETINA_COLORSAMPLINGMETHOD samplingMethod=RETINA_COLOR_DIAGONAL);
|
||||
|
||||
/**
|
||||
* standard destructor
|
||||
*/
|
||||
virtual ~RetinaColor();
|
||||
/**
|
||||
* standard destructor
|
||||
*/
|
||||
virtual ~RetinaColor();
|
||||
|
||||
/**
|
||||
* function that clears all buffers of the object
|
||||
*/
|
||||
void clearAllBuffers();
|
||||
/**
|
||||
* function that clears all buffers of the object
|
||||
*/
|
||||
void clearAllBuffers();
|
||||
|
||||
/**
|
||||
* resize retina color filter object (resize all allocated buffers)
|
||||
* @param NBrows: the new height size
|
||||
* @param NBcolumns: the new width size
|
||||
*/
|
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns);
|
||||
/**
|
||||
* resize retina color filter object (resize all allocated buffers)
|
||||
* @param NBrows: the new height size
|
||||
* @param NBcolumns: the new width size
|
||||
*/
|
||||
void resize(const unsigned int NBrows, const unsigned int NBcolumns);
|
||||
|
||||
|
||||
/**
|
||||
* color multiplexing function: a demultiplexed RGB frame of size M*N*3 is transformed into a multiplexed M*N*1 pixels frame where each pixel is either Red, or Green or Blue
|
||||
* @param inputRGBFrame: the input RGB frame to be processed
|
||||
* @return, nothing but the multiplexed frame is available by the use of the getMultiplexedFrame() function
|
||||
*/
|
||||
inline void runColorMultiplexing(const std::valarray<float> &inputRGBFrame){runColorMultiplexing(inputRGBFrame, *_multiplexedFrame);};
|
||||
/**
|
||||
* color multiplexing function: a demultiplexed RGB frame of size M*N*3 is transformed into a multiplexed M*N*1 pixels frame where each pixel is either Red, or Green or Blue
|
||||
* @param inputRGBFrame: the input RGB frame to be processed
|
||||
* @return, nothing but the multiplexed frame is available by the use of the getMultiplexedFrame() function
|
||||
*/
|
||||
inline void runColorMultiplexing(const std::valarray<float> &inputRGBFrame){runColorMultiplexing(inputRGBFrame, *_multiplexedFrame);};
|
||||
|
||||
/**
|
||||
* color multiplexing function: a demultipleed RGB frame of size M*N*3 is transformed into a multiplexed M*N*1 pixels frame where each pixel is either Red, or Green or Blue if using RGB images
|
||||
* @param demultiplexedInputFrame: the demultiplexed input frame to be processed of size M*N*3
|
||||
* @param multiplexedFrame: the resulting multiplexed frame
|
||||
*/
|
||||
void runColorMultiplexing(const std::valarray<float> &demultiplexedInputFrame, std::valarray<float> &multiplexedFrame);
|
||||
/**
|
||||
* color multiplexing function: a demultipleed RGB frame of size M*N*3 is transformed into a multiplexed M*N*1 pixels frame where each pixel is either Red, or Green or Blue if using RGB images
|
||||
* @param demultiplexedInputFrame: the demultiplexed input frame to be processed of size M*N*3
|
||||
* @param multiplexedFrame: the resulting multiplexed frame
|
||||
*/
|
||||
void runColorMultiplexing(const std::valarray<float> &demultiplexedInputFrame, std::valarray<float> &multiplexedFrame);
|
||||
|
||||
/**
|
||||
* color demultiplexing function: a multiplexed frame of size M*N*1 pixels is transformed into a RGB demultiplexed M*N*3 pixels frame
|
||||
* @param multiplexedColorFrame: the input multiplexed frame to be processed
|
||||
* @param adaptiveFiltering: specifies if an adaptive filtering has to be perform rather than standard filtering (adaptive filtering allows a better rendering)
|
||||
* @param maxInputValue: the maximum input data value (should be 255 for 8 bits images but it can change in the case of High Dynamic Range Images (HDRI)
|
||||
* @return, nothing but the output demultiplexed frame is available by the use of the getDemultiplexedColorFrame() function, also use getLuminance() and getChrominance() in order to retreive either luminance or chrominance
|
||||
*/
|
||||
void runColorDemultiplexing(const std::valarray<float> &multiplexedColorFrame, const bool adaptiveFiltering=false, const float maxInputValue=255.0);
|
||||
/**
|
||||
* color demultiplexing function: a multiplexed frame of size M*N*1 pixels is transformed into a RGB demultiplexed M*N*3 pixels frame
|
||||
* @param multiplexedColorFrame: the input multiplexed frame to be processed
|
||||
* @param adaptiveFiltering: specifies if an adaptive filtering has to be perform rather than standard filtering (adaptive filtering allows a better rendering)
|
||||
* @param maxInputValue: the maximum input data value (should be 255 for 8 bits images but it can change in the case of High Dynamic Range Images (HDRI)
|
||||
* @return, nothing but the output demultiplexed frame is available by the use of the getDemultiplexedColorFrame() function, also use getLuminance() and getChrominance() in order to retreive either luminance or chrominance
|
||||
*/
|
||||
void runColorDemultiplexing(const std::valarray<float> &multiplexedColorFrame, const bool adaptiveFiltering=false, const float maxInputValue=255.0);
|
||||
|
||||
/**
|
||||
* activate color saturation as the final step of the color demultiplexing process
|
||||
* -> this saturation is a sigmoide function applied to each channel of the demultiplexed image.
|
||||
* @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false)
|
||||
* @param colorSaturationValue: the saturation factor
|
||||
* */
|
||||
void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0){_saturateColors=saturateColors; _colorSaturationValue=colorSaturationValue;};
|
||||
/**
|
||||
* activate color saturation as the final step of the color demultiplexing process
|
||||
* -> this saturation is a sigmoide function applied to each channel of the demultiplexed image.
|
||||
* @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false)
|
||||
* @param colorSaturationValue: the saturation factor
|
||||
* */
|
||||
void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0){_saturateColors=saturateColors; _colorSaturationValue=colorSaturationValue;};
|
||||
|
||||
/**
|
||||
* set parameters of the low pass spatio-temporal filter used to retreive the low chrominance
|
||||
* @param beta: gain of the filter (generally set to zero)
|
||||
* @param tau: time constant of the filter (unit is frame for video processing), typically 0 when considering static processing, 1 or more if a temporal smoothing effect is required
|
||||
* @param k: spatial constant of the filter (unit is pixels), typical value is 2.5
|
||||
*/
|
||||
void setChrominanceLPfilterParameters(const float beta, const float tau, const float k){setLPfilterParameters(beta, tau, k);};
|
||||
/**
|
||||
* set parameters of the low pass spatio-temporal filter used to retreive the low chrominance
|
||||
* @param beta: gain of the filter (generally set to zero)
|
||||
* @param tau: time constant of the filter (unit is frame for video processing), typically 0 when considering static processing, 1 or more if a temporal smoothing effect is required
|
||||
* @param k: spatial constant of the filter (unit is pixels), typical value is 2.5
|
||||
*/
|
||||
void setChrominanceLPfilterParameters(const float beta, const float tau, const float k){setLPfilterParameters(beta, tau, k);};
|
||||
|
||||
/**
|
||||
* apply to the retina color output the Krauskopf transformation which leads to an opponent color system: output colorspace if Acr1cr2 if input of the retina was LMS color space
|
||||
* @param result: the input buffer to fill with the transformed colorspace retina output
|
||||
* @return true if process ended successfully
|
||||
*/
|
||||
bool applyKrauskopfLMS2Acr1cr2Transform(std::valarray<float> &result);
|
||||
/**
|
||||
* apply to the retina color output the Krauskopf transformation which leads to an opponent color system: output colorspace if Acr1cr2 if input of the retina was LMS color space
|
||||
* @param result: the input buffer to fill with the transformed colorspace retina output
|
||||
* @return true if process ended successfully
|
||||
*/
|
||||
bool applyKrauskopfLMS2Acr1cr2Transform(std::valarray<float> &result);
|
||||
|
||||
/**
|
||||
* apply to the retina color output the CIE Lab color transformation
|
||||
* @param result: the input buffer to fill with the transformed colorspace retina output
|
||||
* @return true if process ended successfully
|
||||
*/
|
||||
bool applyLMS2LabTransform(std::valarray<float> &result);
|
||||
/**
|
||||
* apply to the retina color output the CIE Lab color transformation
|
||||
* @param result: the input buffer to fill with the transformed colorspace retina output
|
||||
* @return true if process ended successfully
|
||||
*/
|
||||
bool applyLMS2LabTransform(std::valarray<float> &result);
|
||||
|
||||
/**
|
||||
* @return the multiplexed frame result (use this after function runColorMultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getMultiplexedFrame() const {return *_multiplexedFrame;};
|
||||
/**
|
||||
* @return the multiplexed frame result (use this after function runColorMultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getMultiplexedFrame() const {return *_multiplexedFrame;};
|
||||
|
||||
/**
|
||||
* @return the demultiplexed frame result (use this after function runColorDemultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getDemultiplexedColorFrame() const {return _demultiplexedColorFrame;};
|
||||
/**
|
||||
* @return the demultiplexed frame result (use this after function runColorDemultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getDemultiplexedColorFrame() const {return _demultiplexedColorFrame;};
|
||||
|
||||
/**
|
||||
* @return the luminance of the processed frame (use this after function runColorDemultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getLuminance() const {return *_luminance;};
|
||||
/**
|
||||
* @return the luminance of the processed frame (use this after function runColorDemultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getLuminance() const {return *_luminance;};
|
||||
|
||||
/**
|
||||
* @return the chrominance of the processed frame (use this after function runColorDemultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getChrominance() const {return _chrominance;};
|
||||
/**
|
||||
* @return the chrominance of the processed frame (use this after function runColorDemultiplexing)
|
||||
*/
|
||||
inline const std::valarray<float> &getChrominance() const {return _chrominance;};
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image clipping function appled to RGB images (of size M*N*3 pixels)
|
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param maxOutputValue: the maximum value allowed at the output (values superior to it would be clipped
|
||||
*/
|
||||
void clipRGBOutput_0_maxInputValue(float *inputOutputBuffer, const float maxOutputValue=255.0);
|
||||
/**
|
||||
* standard 0 to 255 image clipping function appled to RGB images (of size M*N*3 pixels)
|
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param maxOutputValue: the maximum value allowed at the output (values superior to it would be clipped
|
||||
*/
|
||||
void clipRGBOutput_0_maxInputValue(float *inputOutputBuffer, const float maxOutputValue=255.0);
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function appled to RGB images (of size M*N*3 pixels)
|
||||
* @param maxOutputValue: the maximum value allowed at the output (values superior to it would be clipped
|
||||
*/
|
||||
void normalizeRGBOutput_0_maxOutputValue(const float maxOutputValue=255.0);
|
||||
/**
|
||||
* standard 0 to 255 image normalization function appled to RGB images (of size M*N*3 pixels)
|
||||
* @param maxOutputValue: the maximum value allowed at the output (values superior to it would be clipped
|
||||
*/
|
||||
void normalizeRGBOutput_0_maxOutputValue(const float maxOutputValue=255.0);
|
||||
|
||||
/**
|
||||
* return the color sampling map: a Nrows*Mcolumns image in which each pixel value is the ofsset adress which gives the adress of the sampled pixel on an Nrows*Mcolumns*3 color image ordered by layers: layer1, layer2, layer3
|
||||
*/
|
||||
inline const std::valarray<unsigned int> &getSamplingMap() const {return _colorSampling;};
|
||||
/**
|
||||
* return the color sampling map: a Nrows*Mcolumns image in which each pixel value is the ofsset adress which gives the adress of the sampled pixel on an Nrows*Mcolumns*3 color image ordered by layers: layer1, layer2, layer3
|
||||
*/
|
||||
inline const std::valarray<unsigned int> &getSamplingMap() const {return _colorSampling;};
|
||||
|
||||
/**
|
||||
* function used (to bypass processing) to manually set the color output
|
||||
* @param demultiplexedImage: the color image (luminance+chrominance) which has to be written in the object buffer
|
||||
*/
|
||||
inline void setDemultiplexedColorFrame(const std::valarray<float> &demultiplexedImage){_demultiplexedColorFrame=demultiplexedImage;};
|
||||
/**
|
||||
* function used (to bypass processing) to manually set the color output
|
||||
* @param demultiplexedImage: the color image (luminance+chrominance) which has to be written in the object buffer
|
||||
*/
|
||||
inline void setDemultiplexedColorFrame(const std::valarray<float> &demultiplexedImage){_demultiplexedColorFrame=demultiplexedImage;};
|
||||
|
||||
protected:
|
||||
protected:
|
||||
|
||||
// private functions
|
||||
RETINA_COLORSAMPLINGMETHOD _samplingMethod;
|
||||
bool _saturateColors;
|
||||
float _colorSaturationValue;
|
||||
// links to parent buffers (more convienient names
|
||||
TemplateBuffer<float> *_luminance;
|
||||
std::valarray<float> *_multiplexedFrame;
|
||||
// instance buffers
|
||||
std::valarray<unsigned int> _colorSampling; // table (size (_nbRows*_nbColumns) which specifies the color of each pixel
|
||||
std::valarray<float> _RGBmosaic;
|
||||
std::valarray<float> _tempMultiplexedFrame;
|
||||
std::valarray<float> _demultiplexedTempBuffer;
|
||||
std::valarray<float> _demultiplexedColorFrame;
|
||||
std::valarray<float> _chrominance;
|
||||
std::valarray<float> _colorLocalDensity;// buffer which contains the local density of the R, G and B photoreceptors for a normalization use
|
||||
std::valarray<float> _imageGradient;
|
||||
// private functions
|
||||
RETINA_COLORSAMPLINGMETHOD _samplingMethod;
|
||||
bool _saturateColors;
|
||||
float _colorSaturationValue;
|
||||
// links to parent buffers (more convienient names
|
||||
TemplateBuffer<float> *_luminance;
|
||||
std::valarray<float> *_multiplexedFrame;
|
||||
// instance buffers
|
||||
std::valarray<unsigned int> _colorSampling; // table (size (_nbRows*_nbColumns) which specifies the color of each pixel
|
||||
std::valarray<float> _RGBmosaic;
|
||||
std::valarray<float> _tempMultiplexedFrame;
|
||||
std::valarray<float> _demultiplexedTempBuffer;
|
||||
std::valarray<float> _demultiplexedColorFrame;
|
||||
std::valarray<float> _chrominance;
|
||||
std::valarray<float> _colorLocalDensity;// buffer which contains the local density of the R, G and B photoreceptors for a normalization use
|
||||
std::valarray<float> _imageGradient;
|
||||
|
||||
// variables
|
||||
float _pR, _pG, _pB; // probabilities of color R, G and B
|
||||
bool _objectInit;
|
||||
// variables
|
||||
float _pR, _pG, _pB; // probabilities of color R, G and B
|
||||
bool _objectInit;
|
||||
|
||||
// protected functions
|
||||
void _initColorSampling();
|
||||
void _interpolateImageDemultiplexedImage(float *inputOutputBuffer);
|
||||
void _interpolateSingleChannelImage111(float *inputOutputBuffer);
|
||||
void _interpolateBayerRGBchannels(float *inputOutputBuffer);
|
||||
void _applyRIFfilter(const float *sourceBuffer, float *destinationBuffer);
|
||||
void _getNormalizedContoursImage(const float *inputFrame, float *outputFrame);
|
||||
// -> special adaptive filters dedicated to low pass filtering on the chrominance (skeeps filtering on the edges)
|
||||
void _adaptiveSpatialLPfilter(const float *inputFrame, float *outputFrame);
|
||||
void _adaptiveHorizontalCausalFilter_addInput(const float *inputFrame, float *outputFrame, const unsigned int IDrowStart, const unsigned int IDrowEnd); // TBB parallelized
|
||||
void _adaptiveVerticalAnticausalFilter_multGain(float *outputFrame, const unsigned int IDcolumnStart, const unsigned int IDcolumnEnd);
|
||||
void _computeGradient(const float *luminance);
|
||||
void _normalizeOutputs_0_maxOutputValue(void);
|
||||
// protected functions
|
||||
void _initColorSampling();
|
||||
void _interpolateImageDemultiplexedImage(float *inputOutputBuffer);
|
||||
void _interpolateSingleChannelImage111(float *inputOutputBuffer);
|
||||
void _interpolateBayerRGBchannels(float *inputOutputBuffer);
|
||||
void _applyRIFfilter(const float *sourceBuffer, float *destinationBuffer);
|
||||
void _getNormalizedContoursImage(const float *inputFrame, float *outputFrame);
|
||||
// -> special adaptive filters dedicated to low pass filtering on the chrominance (skeeps filtering on the edges)
|
||||
void _adaptiveSpatialLPfilter(const float *inputFrame, float *outputFrame);
|
||||
void _adaptiveHorizontalCausalFilter_addInput(const float *inputFrame, float *outputFrame, const unsigned int IDrowStart, const unsigned int IDrowEnd); // TBB parallelized
|
||||
void _adaptiveVerticalAnticausalFilter_multGain(float *outputFrame, const unsigned int IDcolumnStart, const unsigned int IDcolumnEnd);
|
||||
void _computeGradient(const float *luminance);
|
||||
void _normalizeOutputs_0_maxOutputValue(void);
|
||||
|
||||
// color space transform
|
||||
void _applyImageColorSpaceConversion(const std::valarray<float> &inputFrame, std::valarray<float> &outputFrame, const float *transformTable);
|
||||
// color space transform
|
||||
void _applyImageColorSpaceConversion(const std::valarray<float> &inputFrame, std::valarray<float> &outputFrame, const float *transformTable);
|
||||
|
||||
#ifdef MAKE_PARALLEL
|
||||
/******************************************************
|
||||
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
|
||||
** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
|
||||
** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
|
||||
** ==> functors constructors can differ from the parameters used with their related serial functions
|
||||
*/
|
||||
/******************************************************
|
||||
** IF some parallelizing thread methods are available, then, main loops are parallelized using these functors
|
||||
** ==> main idea paralellise main filters loops, then, only the most used methods are parallelized... TODO : increase the number of parallelised methods as necessary
|
||||
** ==> functors names = Parallel_$$$ where $$$= the name of the serial method that is parallelised
|
||||
** ==> functors constructors can differ from the parameters used with their related serial functions
|
||||
*/
|
||||
|
||||
/* Template :
|
||||
class Parallel_ : public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
/* Template :
|
||||
class Parallel_ : public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
|
||||
public:
|
||||
Parallel_()
|
||||
: {}
|
||||
|
||||
virtual void operator()( const cv::Range& r ) const {
|
||||
public:
|
||||
Parallel_()
|
||||
: {}
|
||||
|
||||
virtual void operator()( const cv::Range& r ) const {
|
||||
|
||||
}
|
||||
}:
|
||||
*/
|
||||
class Parallel_adaptiveHorizontalCausalFilter_addInput: public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
float *outputFrame;
|
||||
const float *inputFrame, *imageGradient;
|
||||
const unsigned int nbColumns;
|
||||
public:
|
||||
Parallel_adaptiveHorizontalCausalFilter_addInput(const float *inputImg, float *bufferToProcess, const float *imageGrad, const unsigned int nbCols)
|
||||
:outputFrame(bufferToProcess), inputFrame(inputImg), imageGradient(imageGrad), nbColumns(nbCols) {};
|
||||
|
||||
virtual void operator()( const Range& r ) const {
|
||||
register float* outputPTR=outputFrame+r.start*nbColumns;
|
||||
register const float* inputPTR=inputFrame+r.start*nbColumns;
|
||||
register const float *imageGradientPTR= imageGradient+r.start*nbColumns;
|
||||
for (int IDrow=r.start; IDrow!=r.end; ++IDrow)
|
||||
{
|
||||
register float result=0;
|
||||
for (unsigned int index=0; index<nbColumns; ++index)
|
||||
{
|
||||
result = *(inputPTR++) + (*imageGradientPTR++)* result;
|
||||
*(outputPTR++) = result;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}:
|
||||
*/
|
||||
class Parallel_adaptiveHorizontalCausalFilter_addInput: public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
float *outputFrame;
|
||||
const float *inputFrame, *imageGradient;
|
||||
unsigned int nbColumns;
|
||||
public:
|
||||
Parallel_adaptiveHorizontalCausalFilter_addInput(const float *inputImg, float *bufferToProcess, const float *imageGrad, const unsigned int nbCols)
|
||||
:outputFrame(bufferToProcess), inputFrame(inputImg), imageGradient(imageGrad), nbColumns(nbCols) {};
|
||||
|
||||
class Parallel_adaptiveVerticalAnticausalFilter_multGain: public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
float *outputFrame;
|
||||
const float *imageGradient;
|
||||
const unsigned int nbRows, nbColumns;
|
||||
const float filterParam_gain;
|
||||
public:
|
||||
Parallel_adaptiveVerticalAnticausalFilter_multGain(float *bufferToProcess, const float *imageGrad, const unsigned int nbRws, const unsigned int nbCols, const float gain)
|
||||
:outputFrame(bufferToProcess), imageGradient(imageGrad), nbRows(nbRws), nbColumns(nbCols), filterParam_gain(gain){}
|
||||
|
||||
virtual void operator()( const Range& r ) const {
|
||||
float* offset=outputFrame+nbColumns*nbRows-nbColumns;
|
||||
const float* gradOffset= imageGradient+nbColumns*nbRows-nbColumns;
|
||||
for (int IDcolumn=r.start; IDcolumn!=r.end; ++IDcolumn)
|
||||
{
|
||||
register float result=0;
|
||||
register float *outputPTR=offset+IDcolumn;
|
||||
register const float *imageGradientPTR=gradOffset+IDcolumn;
|
||||
for (unsigned int index=0; index<nbRows; ++index)
|
||||
{
|
||||
result = *(outputPTR) + *(imageGradientPTR) * result;
|
||||
*(outputPTR) = filterParam_gain*result;
|
||||
outputPTR-=nbColumns;
|
||||
imageGradientPTR-=nbColumns;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
virtual void operator()( const Range& r ) const {
|
||||
register float* outputPTR=outputFrame+r.start*nbColumns;
|
||||
register const float* inputPTR=inputFrame+r.start*nbColumns;
|
||||
register const float *imageGradientPTR= imageGradient+r.start*nbColumns;
|
||||
for (int IDrow=r.start; IDrow!=r.end; ++IDrow)
|
||||
{
|
||||
register float result=0;
|
||||
for (unsigned int index=0; index<nbColumns; ++index)
|
||||
{
|
||||
result = *(inputPTR++) + (*imageGradientPTR++)* result;
|
||||
*(outputPTR++) = result;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class Parallel_adaptiveVerticalAnticausalFilter_multGain: public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
float *outputFrame;
|
||||
const float *imageGradient;
|
||||
unsigned int nbRows, nbColumns;
|
||||
float filterParam_gain;
|
||||
public:
|
||||
Parallel_adaptiveVerticalAnticausalFilter_multGain(float *bufferToProcess, const float *imageGrad, const unsigned int nbRws, const unsigned int nbCols, const float gain)
|
||||
:outputFrame(bufferToProcess), imageGradient(imageGrad), nbRows(nbRws), nbColumns(nbCols), filterParam_gain(gain){}
|
||||
|
||||
virtual void operator()( const Range& r ) const {
|
||||
float* offset=outputFrame+nbColumns*nbRows-nbColumns;
|
||||
const float* gradOffset= imageGradient+nbColumns*nbRows-nbColumns;
|
||||
for (int IDcolumn=r.start; IDcolumn!=r.end; ++IDcolumn)
|
||||
{
|
||||
register float result=0;
|
||||
register float *outputPTR=offset+IDcolumn;
|
||||
register const float *imageGradientPTR=gradOffset+IDcolumn;
|
||||
for (unsigned int index=0; index<nbRows; ++index)
|
||||
{
|
||||
result = *(outputPTR) + *(imageGradientPTR) * result;
|
||||
*(outputPTR) = filterParam_gain*result;
|
||||
outputPTR-=nbColumns;
|
||||
imageGradientPTR-=nbColumns;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
#endif
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
#endif /*RETINACOLOR_HPP_*/
|
||||
|
@@ -82,21 +82,21 @@ class Parallel_clipBufferValues: public cv::ParallelLoopBody
|
||||
{
|
||||
private:
|
||||
type *bufferToClip;
|
||||
const type minValue, maxValue;
|
||||
|
||||
type minValue, maxValue;
|
||||
|
||||
public:
|
||||
Parallel_clipBufferValues(type* bufferToProcess, const type min, const type max)
|
||||
: bufferToClip(bufferToProcess), minValue(min), maxValue(max){}
|
||||
: bufferToClip(bufferToProcess), minValue(min), maxValue(max){}
|
||||
|
||||
virtual void operator()( const cv::Range &r ) const {
|
||||
register type *inputOutputBufferPTR=bufferToClip+r.start;
|
||||
register type *inputOutputBufferPTR=bufferToClip+r.start;
|
||||
for (register int jf = r.start; jf != r.end; ++jf, ++inputOutputBufferPTR)
|
||||
{
|
||||
if (*inputOutputBufferPTR>maxValue)
|
||||
*inputOutputBufferPTR=maxValue;
|
||||
else if (*inputOutputBufferPTR<minValue)
|
||||
*inputOutputBufferPTR=minValue;
|
||||
}
|
||||
{
|
||||
if (*inputOutputBufferPTR>maxValue)
|
||||
*inputOutputBufferPTR=maxValue;
|
||||
else if (*inputOutputBufferPTR<minValue)
|
||||
*inputOutputBufferPTR=minValue;
|
||||
}
|
||||
}
|
||||
};
|
||||
#endif
|
||||
@@ -105,448 +105,448 @@ public:
|
||||
|
||||
namespace cv
|
||||
{
|
||||
/**
|
||||
* @class TemplateBuffer
|
||||
* @brief this class is a simple template memory buffer which contains basic functions to get information on or normalize the buffer content
|
||||
* note that thanks to the parent STL template class "valarray", it is possible to perform easily operations on the full array such as addition, product etc.
|
||||
* @author Alexandre BENOIT (benoit.alexandre.vision@gmail.com), helped by Gelu IONESCU (gelu.ionescu@lis.inpg.fr)
|
||||
* creation date: september 2007
|
||||
*/
|
||||
template <class type> class TemplateBuffer : public std::valarray<type>
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* constructor for monodimensional array
|
||||
* @param dim: the size of the vector
|
||||
* @class TemplateBuffer
|
||||
* @brief this class is a simple template memory buffer which contains basic functions to get information on or normalize the buffer content
|
||||
* note that thanks to the parent STL template class "valarray", it is possible to perform easily operations on the full array such as addition, product etc.
|
||||
* @author Alexandre BENOIT (benoit.alexandre.vision@gmail.com), helped by Gelu IONESCU (gelu.ionescu@lis.inpg.fr)
|
||||
* creation date: september 2007
|
||||
*/
|
||||
TemplateBuffer(const size_t dim=0)
|
||||
: std::valarray<type>((type)0, dim)
|
||||
{
|
||||
_NBrows=1;
|
||||
_NBcolumns=dim;
|
||||
_NBdepths=1;
|
||||
_NBpixels=dim;
|
||||
_doubleNBpixels=2*dim;
|
||||
}
|
||||
|
||||
/**
|
||||
* constructor by copy for monodimensional array
|
||||
* @param pVal: the pointer to a buffer to copy
|
||||
* @param dim: the size of the vector
|
||||
*/
|
||||
TemplateBuffer(const type* pVal, const size_t dim)
|
||||
: std::valarray<type>(pVal, dim)
|
||||
{
|
||||
_NBrows=1;
|
||||
_NBcolumns=dim;
|
||||
_NBdepths=1;
|
||||
_NBpixels=dim;
|
||||
_doubleNBpixels=2*dim;
|
||||
}
|
||||
|
||||
/**
|
||||
* constructor for bidimensional array
|
||||
* @param dimRows: the size of the vector
|
||||
* @param dimColumns: the size of the vector
|
||||
* @param depth: the number of layers of the buffer in its third dimension (3 of color images, 1 for gray images.
|
||||
*/
|
||||
TemplateBuffer(const size_t dimRows, const size_t dimColumns, const size_t depth=1)
|
||||
: std::valarray<type>((type)0, dimRows*dimColumns*depth)
|
||||
{
|
||||
#ifdef TEMPLATEBUFFERDEBUG
|
||||
std::cout<<"TemplateBuffer::TemplateBuffer: new buffer, size="<<dimRows<<", "<<dimColumns<<", "<<depth<<"valarraySize="<<this->size()<<std::endl;
|
||||
#endif
|
||||
_NBrows=dimRows;
|
||||
_NBcolumns=dimColumns;
|
||||
_NBdepths=depth;
|
||||
_NBpixels=dimRows*dimColumns;
|
||||
_doubleNBpixels=2*dimRows*dimColumns;
|
||||
//_createTableIndex();
|
||||
#ifdef TEMPLATEBUFFERDEBUG
|
||||
std::cout<<"TemplateBuffer::TemplateBuffer: construction successful"<<std::endl;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* copy constructor
|
||||
* @param toCopy
|
||||
* @return thenconstructed instance
|
||||
*emplateBuffer(const TemplateBuffer &toCopy)
|
||||
:_NBrows(toCopy.getNBrows()),_NBcolumns(toCopy.getNBcolumns()),_NBdepths(toCopy.getNBdephs()), _NBpixels(toCopy.getNBpixels()), _doubleNBpixels(toCopy.getNBpixels()*2)
|
||||
//std::valarray<type>(toCopy)
|
||||
template <class type> class TemplateBuffer : public std::valarray<type>
|
||||
{
|
||||
memcpy(Buffer(), toCopy.Buffer(), this->size());
|
||||
}*/
|
||||
/**
|
||||
* destructor
|
||||
*/
|
||||
virtual ~TemplateBuffer()
|
||||
{
|
||||
#ifdef TEMPLATEBUFFERDEBUG
|
||||
std::cout<<"~TemplateBuffer"<<std::endl;
|
||||
#endif
|
||||
}
|
||||
public:
|
||||
|
||||
/**
|
||||
* delete the buffer content (set zeros)
|
||||
*/
|
||||
inline void setZero(){std::valarray<type>::operator=(0);};//memset(Buffer(), 0, sizeof(type)*_NBpixels);};
|
||||
|
||||
/**
|
||||
* @return the numbers of rows (height) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getNBrows(){return (unsigned int)_NBrows;};
|
||||
|
||||
/**
|
||||
* @return the numbers of columns (width) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getNBcolumns(){return (unsigned int)_NBcolumns;};
|
||||
|
||||
/**
|
||||
* @return the numbers of pixels (width*height) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getNBpixels(){return (unsigned int)_NBpixels;};
|
||||
|
||||
/**
|
||||
* @return the numbers of pixels (width*height) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getDoubleNBpixels(){return (unsigned int)_doubleNBpixels;};
|
||||
|
||||
/**
|
||||
* @return the numbers of depths (3rd dimension: 1 for gray images, 3 for rgb images) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getDepthSize(){return (unsigned int)_NBdepths;};
|
||||
|
||||
/**
|
||||
* resize the buffer and recompute table index etc.
|
||||
*/
|
||||
void resizeBuffer(const size_t dimRows, const size_t dimColumns, const size_t depth=1)
|
||||
{
|
||||
this->resize(dimRows*dimColumns*depth);
|
||||
_NBrows=dimRows;
|
||||
_NBcolumns=dimColumns;
|
||||
_NBdepths=depth;
|
||||
_NBpixels=dimRows*dimColumns;
|
||||
_doubleNBpixels=2*dimRows*dimColumns;
|
||||
}
|
||||
|
||||
inline TemplateBuffer<type> & operator=(const std::valarray<type> &b)
|
||||
{
|
||||
//std::cout<<"TemplateBuffer<type> & operator= affect vector: "<<std::endl;
|
||||
std::valarray<type>::operator=(b);
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline TemplateBuffer<type> & operator=(const type &b)
|
||||
{
|
||||
//std::cout<<"TemplateBuffer<type> & operator= affect value: "<<b<<std::endl;
|
||||
std::valarray<type>::operator=(b);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* inline const type &operator[](const unsigned int &b)
|
||||
{
|
||||
return (*this)[b];
|
||||
}
|
||||
*/
|
||||
/**
|
||||
* @return the buffer adress in non const mode
|
||||
*/
|
||||
inline type* Buffer() { return &(*this)[0]; }
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Standard Image manipulation functions
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function
|
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param nbPixels: specifies the number of pixel on which the normalization should be performed, if 0, then all pixels specified in the constructor are processed
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
static void normalizeGrayOutput_0_maxOutputValue(type *inputOutputBuffer, const size_t nbPixels, const type maxOutputValue=(type)255.0);
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function
|
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param nbPixels: specifies the number of pixel on which the normalization should be performed, if 0, then all pixels specified in the constructor are processed
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
void normalizeGrayOutput_0_maxOutputValue(const type maxOutputValue=(type)255.0){normalizeGrayOutput_0_maxOutputValue(this->Buffer(), this->size(), maxOutputValue);};
|
||||
|
||||
/**
|
||||
* sigmoide image normalization function (saturates min and max values)
|
||||
* @param meanValue: specifies the mean value of th pixels to be processed
|
||||
* @param sensitivity: strenght of the sigmoide
|
||||
* @param inputPicture: the image to be normalized if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param outputBuffer: the ouput buffer on which the result is writed, if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
static void normalizeGrayOutputCentredSigmoide(const type meanValue, const type sensitivity, const type maxOutputValue, type *inputPicture, type *outputBuffer, const unsigned int nbPixels);
|
||||
|
||||
/**
|
||||
* sigmoide image normalization function on the current buffer (saturates min and max values)
|
||||
* @param meanValue: specifies the mean value of th pixels to be processed
|
||||
* @param sensitivity: strenght of the sigmoide
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
inline void normalizeGrayOutputCentredSigmoide(const type meanValue=(type)0.0, const type sensitivity=(type)2.0, const type maxOutputValue=(type)255.0){ (void)maxOutputValue; normalizeGrayOutputCentredSigmoide(meanValue, sensitivity, 255.0, this->Buffer(), this->Buffer(), this->getNBpixels());};
|
||||
|
||||
/**
|
||||
* sigmoide image normalization function (saturates min and max values), in this function, the sigmoide is centered on low values (high saturation of the medium and high values
|
||||
* @param inputPicture: the image to be normalized if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param outputBuffer: the ouput buffer on which the result is writed, if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param sensitivity: strenght of the sigmoide
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
void normalizeGrayOutputNearZeroCentreredSigmoide(type *inputPicture=(type*)NULL, type *outputBuffer=(type*)NULL, const type sensitivity=(type)40, const type maxOutputValue=(type)255.0);
|
||||
|
||||
/**
|
||||
* center and reduct the image (image-mean)/std
|
||||
* @param inputOutputBuffer: the image to be normalized if no parameter, the result is rewrited on it
|
||||
*/
|
||||
void centerReductImageLuminance(type *inputOutputBuffer=(type*)NULL);
|
||||
|
||||
/**
|
||||
* @return standard deviation of the buffer
|
||||
*/
|
||||
double getStandardDeviation()
|
||||
{
|
||||
double standardDeviation=0;
|
||||
double meanValue=getMean();
|
||||
|
||||
type *bufferPTR=Buffer();
|
||||
for (unsigned int i=0;i<this->size();++i)
|
||||
/**
|
||||
* constructor for monodimensional array
|
||||
* @param dim: the size of the vector
|
||||
*/
|
||||
TemplateBuffer(const size_t dim=0)
|
||||
: std::valarray<type>((type)0, dim)
|
||||
{
|
||||
double diff=(*(bufferPTR++)-meanValue);
|
||||
standardDeviation+=diff*diff;
|
||||
_NBrows=1;
|
||||
_NBcolumns=dim;
|
||||
_NBdepths=1;
|
||||
_NBpixels=dim;
|
||||
_doubleNBpixels=2*dim;
|
||||
}
|
||||
return sqrt(standardDeviation/this->size());
|
||||
|
||||
/**
|
||||
* constructor by copy for monodimensional array
|
||||
* @param pVal: the pointer to a buffer to copy
|
||||
* @param dim: the size of the vector
|
||||
*/
|
||||
TemplateBuffer(const type* pVal, const size_t dim)
|
||||
: std::valarray<type>(pVal, dim)
|
||||
{
|
||||
_NBrows=1;
|
||||
_NBcolumns=dim;
|
||||
_NBdepths=1;
|
||||
_NBpixels=dim;
|
||||
_doubleNBpixels=2*dim;
|
||||
}
|
||||
|
||||
/**
|
||||
* constructor for bidimensional array
|
||||
* @param dimRows: the size of the vector
|
||||
* @param dimColumns: the size of the vector
|
||||
* @param depth: the number of layers of the buffer in its third dimension (3 of color images, 1 for gray images.
|
||||
*/
|
||||
TemplateBuffer(const size_t dimRows, const size_t dimColumns, const size_t depth=1)
|
||||
: std::valarray<type>((type)0, dimRows*dimColumns*depth)
|
||||
{
|
||||
#ifdef TEMPLATEBUFFERDEBUG
|
||||
std::cout<<"TemplateBuffer::TemplateBuffer: new buffer, size="<<dimRows<<", "<<dimColumns<<", "<<depth<<"valarraySize="<<this->size()<<std::endl;
|
||||
#endif
|
||||
_NBrows=dimRows;
|
||||
_NBcolumns=dimColumns;
|
||||
_NBdepths=depth;
|
||||
_NBpixels=dimRows*dimColumns;
|
||||
_doubleNBpixels=2*dimRows*dimColumns;
|
||||
//_createTableIndex();
|
||||
#ifdef TEMPLATEBUFFERDEBUG
|
||||
std::cout<<"TemplateBuffer::TemplateBuffer: construction successful"<<std::endl;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* copy constructor
|
||||
* @param toCopy
|
||||
* @return thenconstructed instance
|
||||
*emplateBuffer(const TemplateBuffer &toCopy)
|
||||
:_NBrows(toCopy.getNBrows()),_NBcolumns(toCopy.getNBcolumns()),_NBdepths(toCopy.getNBdephs()), _NBpixels(toCopy.getNBpixels()), _doubleNBpixels(toCopy.getNBpixels()*2)
|
||||
//std::valarray<type>(toCopy)
|
||||
{
|
||||
memcpy(Buffer(), toCopy.Buffer(), this->size());
|
||||
}*/
|
||||
/**
|
||||
* destructor
|
||||
*/
|
||||
virtual ~TemplateBuffer()
|
||||
{
|
||||
#ifdef TEMPLATEBUFFERDEBUG
|
||||
std::cout<<"~TemplateBuffer"<<std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* delete the buffer content (set zeros)
|
||||
*/
|
||||
inline void setZero(){std::valarray<type>::operator=(0);};//memset(Buffer(), 0, sizeof(type)*_NBpixels);};
|
||||
|
||||
/**
|
||||
* @return the numbers of rows (height) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getNBrows(){return (unsigned int)_NBrows;};
|
||||
|
||||
/**
|
||||
* @return the numbers of columns (width) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getNBcolumns(){return (unsigned int)_NBcolumns;};
|
||||
|
||||
/**
|
||||
* @return the numbers of pixels (width*height) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getNBpixels(){return (unsigned int)_NBpixels;};
|
||||
|
||||
/**
|
||||
* @return the numbers of pixels (width*height) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getDoubleNBpixels(){return (unsigned int)_doubleNBpixels;};
|
||||
|
||||
/**
|
||||
* @return the numbers of depths (3rd dimension: 1 for gray images, 3 for rgb images) of the images used by the object
|
||||
*/
|
||||
inline unsigned int getDepthSize(){return (unsigned int)_NBdepths;};
|
||||
|
||||
/**
|
||||
* resize the buffer and recompute table index etc.
|
||||
*/
|
||||
void resizeBuffer(const size_t dimRows, const size_t dimColumns, const size_t depth=1)
|
||||
{
|
||||
this->resize(dimRows*dimColumns*depth);
|
||||
_NBrows=dimRows;
|
||||
_NBcolumns=dimColumns;
|
||||
_NBdepths=depth;
|
||||
_NBpixels=dimRows*dimColumns;
|
||||
_doubleNBpixels=2*dimRows*dimColumns;
|
||||
}
|
||||
|
||||
inline TemplateBuffer<type> & operator=(const std::valarray<type> &b)
|
||||
{
|
||||
//std::cout<<"TemplateBuffer<type> & operator= affect vector: "<<std::endl;
|
||||
std::valarray<type>::operator=(b);
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline TemplateBuffer<type> & operator=(const type &b)
|
||||
{
|
||||
//std::cout<<"TemplateBuffer<type> & operator= affect value: "<<b<<std::endl;
|
||||
std::valarray<type>::operator=(b);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* inline const type &operator[](const unsigned int &b)
|
||||
{
|
||||
return (*this)[b];
|
||||
}
|
||||
*/
|
||||
/**
|
||||
* @return the buffer adress in non const mode
|
||||
*/
|
||||
inline type* Buffer() { return &(*this)[0]; }
|
||||
|
||||
///////////////////////////////////////////////////////
|
||||
// Standard Image manipulation functions
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function
|
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param nbPixels: specifies the number of pixel on which the normalization should be performed, if 0, then all pixels specified in the constructor are processed
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
static void normalizeGrayOutput_0_maxOutputValue(type *inputOutputBuffer, const size_t nbPixels, const type maxOutputValue=(type)255.0);
|
||||
|
||||
/**
|
||||
* standard 0 to 255 image normalization function
|
||||
* @param inputOutputBuffer: the image to be normalized (rewrites the input), if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param nbPixels: specifies the number of pixel on which the normalization should be performed, if 0, then all pixels specified in the constructor are processed
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
void normalizeGrayOutput_0_maxOutputValue(const type maxOutputValue=(type)255.0){normalizeGrayOutput_0_maxOutputValue(this->Buffer(), this->size(), maxOutputValue);};
|
||||
|
||||
/**
|
||||
* sigmoide image normalization function (saturates min and max values)
|
||||
* @param meanValue: specifies the mean value of th pixels to be processed
|
||||
* @param sensitivity: strenght of the sigmoide
|
||||
* @param inputPicture: the image to be normalized if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param outputBuffer: the ouput buffer on which the result is writed, if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
static void normalizeGrayOutputCentredSigmoide(const type meanValue, const type sensitivity, const type maxOutputValue, type *inputPicture, type *outputBuffer, const unsigned int nbPixels);
|
||||
|
||||
/**
|
||||
* sigmoide image normalization function on the current buffer (saturates min and max values)
|
||||
* @param meanValue: specifies the mean value of th pixels to be processed
|
||||
* @param sensitivity: strenght of the sigmoide
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
inline void normalizeGrayOutputCentredSigmoide(const type meanValue=(type)0.0, const type sensitivity=(type)2.0, const type maxOutputValue=(type)255.0){ (void)maxOutputValue; normalizeGrayOutputCentredSigmoide(meanValue, sensitivity, 255.0, this->Buffer(), this->Buffer(), this->getNBpixels());};
|
||||
|
||||
/**
|
||||
* sigmoide image normalization function (saturates min and max values), in this function, the sigmoide is centered on low values (high saturation of the medium and high values
|
||||
* @param inputPicture: the image to be normalized if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param outputBuffer: the ouput buffer on which the result is writed, if no parameter, then, the built in buffer reachable by getOutput() function is normalized
|
||||
* @param sensitivity: strenght of the sigmoide
|
||||
* @param maxOutputValue: the maximum output value
|
||||
*/
|
||||
void normalizeGrayOutputNearZeroCentreredSigmoide(type *inputPicture=(type*)NULL, type *outputBuffer=(type*)NULL, const type sensitivity=(type)40, const type maxOutputValue=(type)255.0);
|
||||
|
||||
/**
|
||||
* center and reduct the image (image-mean)/std
|
||||
* @param inputOutputBuffer: the image to be normalized if no parameter, the result is rewrited on it
|
||||
*/
|
||||
void centerReductImageLuminance(type *inputOutputBuffer=(type*)NULL);
|
||||
|
||||
/**
|
||||
* @return standard deviation of the buffer
|
||||
*/
|
||||
double getStandardDeviation()
|
||||
{
|
||||
double standardDeviation=0;
|
||||
double meanValue=getMean();
|
||||
|
||||
type *bufferPTR=Buffer();
|
||||
for (unsigned int i=0;i<this->size();++i)
|
||||
{
|
||||
double diff=(*(bufferPTR++)-meanValue);
|
||||
standardDeviation+=diff*diff;
|
||||
}
|
||||
return sqrt(standardDeviation/this->size());
|
||||
};
|
||||
|
||||
/**
|
||||
* Clip buffer histogram
|
||||
* @param minRatio: the minimum ratio of the lower pixel values, range=[0,1] and lower than maxRatio
|
||||
* @param maxRatio: the aximum ratio of the higher pixel values, range=[0,1] and higher than minRatio
|
||||
*/
|
||||
void clipHistogram(double minRatio, double maxRatio, double maxOutputValue)
|
||||
{
|
||||
|
||||
if (minRatio>=maxRatio)
|
||||
{
|
||||
std::cerr<<"TemplateBuffer::clipHistogram: minRatio must be inferior to maxRatio, buffer unchanged"<<std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
/* minRatio=min(max(minRatio, 1.0),0.0);
|
||||
maxRatio=max(max(maxRatio, 0.0),1.0);
|
||||
*/
|
||||
|
||||
// find the pixel value just above the threshold
|
||||
const double maxThreshold=this->max()*maxRatio;
|
||||
const double minThreshold=(this->max()-this->min())*minRatio+this->min();
|
||||
|
||||
type *bufferPTR=this->Buffer();
|
||||
|
||||
double deltaH=maxThreshold;
|
||||
double deltaL=maxThreshold;
|
||||
|
||||
double updatedHighValue=maxThreshold;
|
||||
double updatedLowValue=maxThreshold;
|
||||
|
||||
for (unsigned int i=0;i<this->size();++i)
|
||||
{
|
||||
double curentValue=(double)*(bufferPTR++);
|
||||
|
||||
// updating "closest to the high threshold" pixel value
|
||||
double highValueTest=maxThreshold-curentValue;
|
||||
if (highValueTest>0)
|
||||
{
|
||||
if (deltaH>highValueTest)
|
||||
{
|
||||
deltaH=highValueTest;
|
||||
updatedHighValue=curentValue;
|
||||
}
|
||||
}
|
||||
|
||||
// updating "closest to the low threshold" pixel value
|
||||
double lowValueTest=curentValue-minThreshold;
|
||||
if (lowValueTest>0)
|
||||
{
|
||||
if (deltaL>lowValueTest)
|
||||
{
|
||||
deltaL=lowValueTest;
|
||||
updatedLowValue=curentValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cout<<"Tdebug"<<std::endl;
|
||||
std::cout<<"deltaL="<<deltaL<<", deltaH="<<deltaH<<std::endl;
|
||||
std::cout<<"this->max()"<<this->max()<<"maxThreshold="<<maxThreshold<<"updatedHighValue="<<updatedHighValue<<std::endl;
|
||||
std::cout<<"this->min()"<<this->min()<<"minThreshold="<<minThreshold<<"updatedLowValue="<<updatedLowValue<<std::endl;
|
||||
// clipping values outside than the updated thresholds
|
||||
bufferPTR=this->Buffer();
|
||||
#ifdef MAKE_PARALLEL // call the TemplateBuffer multitreaded clipping method
|
||||
parallel_for_(cv::Range(0,this->size()), Parallel_clipBufferValues<type>(bufferPTR, updatedLowValue, updatedHighValue));
|
||||
#else
|
||||
|
||||
for (unsigned int i=0;i<this->size();++i, ++bufferPTR)
|
||||
{
|
||||
if (*bufferPTR<updatedLowValue)
|
||||
*bufferPTR=updatedLowValue;
|
||||
else if (*bufferPTR>updatedHighValue)
|
||||
*bufferPTR=updatedHighValue;
|
||||
}
|
||||
#endif
|
||||
normalizeGrayOutput_0_maxOutputValue(this->Buffer(), this->size(), maxOutputValue);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mean value of the vector
|
||||
*/
|
||||
inline double getMean(){return this->sum()/this->size();};
|
||||
|
||||
protected:
|
||||
size_t _NBrows;
|
||||
size_t _NBcolumns;
|
||||
size_t _NBdepths;
|
||||
size_t _NBpixels;
|
||||
size_t _doubleNBpixels;
|
||||
// utilities
|
||||
static type _abs(const type x);
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* Clip buffer histogram
|
||||
* @param minRatio: the minimum ratio of the lower pixel values, range=[0,1] and lower than maxRatio
|
||||
* @param maxRatio: the aximum ratio of the higher pixel values, range=[0,1] and higher than minRatio
|
||||
*/
|
||||
void clipHistogram(double minRatio, double maxRatio, double maxOutputValue)
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
/// normalize output between 0 and 255, can be applied on images of different size that the declared size if nbPixels parameters is setted up;
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::normalizeGrayOutput_0_maxOutputValue(type *inputOutputBuffer, const size_t processedPixels, const type maxOutputValue)
|
||||
{
|
||||
type maxValue=inputOutputBuffer[0], minValue=inputOutputBuffer[0];
|
||||
|
||||
// get the min and max value
|
||||
register type *inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (register size_t j = 0; j<processedPixels; ++j)
|
||||
{
|
||||
type pixValue = *(inputOutputBufferPTR++);
|
||||
if (maxValue < pixValue)
|
||||
maxValue = pixValue;
|
||||
else if (minValue > pixValue)
|
||||
minValue = pixValue;
|
||||
}
|
||||
// change the range of the data to 0->255
|
||||
|
||||
type factor = maxOutputValue/(maxValue-minValue);
|
||||
type offset = (type)(-minValue*factor);
|
||||
|
||||
inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (register size_t j = 0; j < processedPixels; ++j, ++inputOutputBufferPTR)
|
||||
*inputOutputBufferPTR=*(inputOutputBufferPTR)*factor+offset;
|
||||
|
||||
}
|
||||
// normalize data with a sigmoide close to 0 (saturates values for those superior to 0)
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::normalizeGrayOutputNearZeroCentreredSigmoide(type *inputBuffer, type *outputBuffer, const type sensitivity, const type maxOutputValue)
|
||||
{
|
||||
if (inputBuffer==NULL)
|
||||
inputBuffer=Buffer();
|
||||
if (outputBuffer==NULL)
|
||||
outputBuffer=Buffer();
|
||||
|
||||
type X0cube=sensitivity*sensitivity*sensitivity;
|
||||
|
||||
register type *inputBufferPTR=inputBuffer;
|
||||
register type *outputBufferPTR=outputBuffer;
|
||||
|
||||
for (register size_t j = 0; j < _NBpixels; ++j, ++inputBufferPTR)
|
||||
{
|
||||
|
||||
type currentCubeLuminance=*inputBufferPTR**inputBufferPTR**inputBufferPTR;
|
||||
*(outputBufferPTR++)=maxOutputValue*currentCubeLuminance/(currentCubeLuminance+X0cube);
|
||||
}
|
||||
}
|
||||
|
||||
// normalize and adjust luminance with a centered to 128 sigmode
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::normalizeGrayOutputCentredSigmoide(const type meanValue, const type sensitivity, const type maxOutputValue, type *inputBuffer, type *outputBuffer, const unsigned int nbPixels)
|
||||
{
|
||||
|
||||
if (minRatio>=maxRatio)
|
||||
if (sensitivity==1.0)
|
||||
{
|
||||
std::cerr<<"TemplateBuffer::clipHistogram: minRatio must be inferior to maxRatio, buffer unchanged"<<std::endl;
|
||||
std::cerr<<"TemplateBuffer::TemplateBuffer<type>::normalizeGrayOutputCentredSigmoide error: 2nd parameter (sensitivity) must not equal 0, copying original data..."<<std::endl;
|
||||
memcpy(outputBuffer, inputBuffer, sizeof(type)*nbPixels);
|
||||
return;
|
||||
}
|
||||
|
||||
/* minRatio=min(max(minRatio, 1.0),0.0);
|
||||
maxRatio=max(max(maxRatio, 0.0),1.0);
|
||||
*/
|
||||
type X0=maxOutputValue/(sensitivity-(type)1.0);
|
||||
|
||||
// find the pixel value just above the threshold
|
||||
const double maxThreshold=this->max()*maxRatio;
|
||||
const double minThreshold=(this->max()-this->min())*minRatio+this->min();
|
||||
register type *inputBufferPTR=inputBuffer;
|
||||
register type *outputBufferPTR=outputBuffer;
|
||||
|
||||
type *bufferPTR=this->Buffer();
|
||||
for (register size_t j = 0; j < nbPixels; ++j, ++inputBufferPTR)
|
||||
*(outputBufferPTR++)=(meanValue+(meanValue+X0)*(*(inputBufferPTR)-meanValue)/(_abs(*(inputBufferPTR)-meanValue)+X0));
|
||||
|
||||
double deltaH=maxThreshold;
|
||||
double deltaL=maxThreshold;
|
||||
}
|
||||
|
||||
double updatedHighValue=maxThreshold;
|
||||
double updatedLowValue=maxThreshold;
|
||||
// center and reduct the image (image-mean)/std
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::centerReductImageLuminance(type *inputOutputBuffer)
|
||||
{
|
||||
// if outputBuffer unsassigned, the rewrite the buffer
|
||||
if (inputOutputBuffer==NULL)
|
||||
inputOutputBuffer=Buffer();
|
||||
type meanValue=0, stdValue=0;
|
||||
|
||||
for (unsigned int i=0;i<this->size();++i)
|
||||
// compute mean value
|
||||
for (register size_t j = 0; j < _NBpixels; ++j)
|
||||
meanValue+=inputOutputBuffer[j];
|
||||
meanValue/=((type)_NBpixels);
|
||||
|
||||
// compute std value
|
||||
register type *inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (size_t index=0;index<_NBpixels;++index)
|
||||
{
|
||||
double curentValue=(double)*(bufferPTR++);
|
||||
|
||||
// updating "closest to the high threshold" pixel value
|
||||
double highValueTest=maxThreshold-curentValue;
|
||||
if (highValueTest>0)
|
||||
{
|
||||
if (deltaH>highValueTest)
|
||||
{
|
||||
deltaH=highValueTest;
|
||||
updatedHighValue=curentValue;
|
||||
}
|
||||
}
|
||||
|
||||
// updating "closest to the low threshold" pixel value
|
||||
double lowValueTest=curentValue-minThreshold;
|
||||
if (lowValueTest>0)
|
||||
{
|
||||
if (deltaL>lowValueTest)
|
||||
{
|
||||
deltaL=lowValueTest;
|
||||
updatedLowValue=curentValue;
|
||||
}
|
||||
}
|
||||
type inputMinusMean=*(inputOutputBufferPTR++)-meanValue;
|
||||
stdValue+=inputMinusMean*inputMinusMean;
|
||||
}
|
||||
|
||||
std::cout<<"Tdebug"<<std::endl;
|
||||
std::cout<<"deltaL="<<deltaL<<", deltaH="<<deltaH<<std::endl;
|
||||
std::cout<<"this->max()"<<this->max()<<"maxThreshold="<<maxThreshold<<"updatedHighValue="<<updatedHighValue<<std::endl;
|
||||
std::cout<<"this->min()"<<this->min()<<"minThreshold="<<minThreshold<<"updatedLowValue="<<updatedLowValue<<std::endl;
|
||||
// clipping values outside than the updated thresholds
|
||||
bufferPTR=this->Buffer();
|
||||
#ifdef MAKE_PARALLEL // call the TemplateBuffer multitreaded clipping method
|
||||
parallel_for_(cv::Range(0,this->size()), Parallel_clipBufferValues<type>(bufferPTR, updatedLowValue, updatedHighValue));
|
||||
#else
|
||||
|
||||
for (unsigned int i=0;i<this->size();++i, ++bufferPTR)
|
||||
{
|
||||
if (*bufferPTR<updatedLowValue)
|
||||
*bufferPTR=updatedLowValue;
|
||||
else if (*bufferPTR>updatedHighValue)
|
||||
*bufferPTR=updatedHighValue;
|
||||
}
|
||||
#endif
|
||||
normalizeGrayOutput_0_maxOutputValue(this->Buffer(), this->size(), maxOutputValue);
|
||||
|
||||
stdValue=sqrt(stdValue/((type)_NBpixels));
|
||||
// adjust luminance in regard of mean and std value;
|
||||
inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (size_t index=0;index<_NBpixels;++index, ++inputOutputBufferPTR)
|
||||
*inputOutputBufferPTR=(*(inputOutputBufferPTR)-meanValue)/stdValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mean value of the vector
|
||||
*/
|
||||
inline double getMean(){return this->sum()/this->size();};
|
||||
|
||||
protected:
|
||||
size_t _NBrows;
|
||||
size_t _NBcolumns;
|
||||
size_t _NBdepths;
|
||||
size_t _NBpixels;
|
||||
size_t _doubleNBpixels;
|
||||
// utilities
|
||||
static type _abs(const type x);
|
||||
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
/// normalize output between 0 and 255, can be applied on images of different size that the declared size if nbPixels parameters is setted up;
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::normalizeGrayOutput_0_maxOutputValue(type *inputOutputBuffer, const size_t processedPixels, const type maxOutputValue)
|
||||
{
|
||||
type maxValue=inputOutputBuffer[0], minValue=inputOutputBuffer[0];
|
||||
|
||||
// get the min and max value
|
||||
register type *inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (register size_t j = 0; j<processedPixels; ++j)
|
||||
{
|
||||
type pixValue = *(inputOutputBufferPTR++);
|
||||
if (maxValue < pixValue)
|
||||
maxValue = pixValue;
|
||||
else if (minValue > pixValue)
|
||||
minValue = pixValue;
|
||||
}
|
||||
// change the range of the data to 0->255
|
||||
|
||||
type factor = maxOutputValue/(maxValue-minValue);
|
||||
type offset = (type)(-minValue*factor);
|
||||
|
||||
inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (register size_t j = 0; j < processedPixels; ++j, ++inputOutputBufferPTR)
|
||||
*inputOutputBufferPTR=*(inputOutputBufferPTR)*factor+offset;
|
||||
|
||||
}
|
||||
// normalize data with a sigmoide close to 0 (saturates values for those superior to 0)
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::normalizeGrayOutputNearZeroCentreredSigmoide(type *inputBuffer, type *outputBuffer, const type sensitivity, const type maxOutputValue)
|
||||
{
|
||||
if (inputBuffer==NULL)
|
||||
inputBuffer=Buffer();
|
||||
if (outputBuffer==NULL)
|
||||
outputBuffer=Buffer();
|
||||
|
||||
type X0cube=sensitivity*sensitivity*sensitivity;
|
||||
|
||||
register type *inputBufferPTR=inputBuffer;
|
||||
register type *outputBufferPTR=outputBuffer;
|
||||
|
||||
for (register size_t j = 0; j < _NBpixels; ++j, ++inputBufferPTR)
|
||||
template <class type>
|
||||
type TemplateBuffer<type>::_abs(const type x)
|
||||
{
|
||||
|
||||
type currentCubeLuminance=*inputBufferPTR**inputBufferPTR**inputBufferPTR;
|
||||
*(outputBufferPTR++)=maxOutputValue*currentCubeLuminance/(currentCubeLuminance+X0cube);
|
||||
if (x>0)
|
||||
return x;
|
||||
else
|
||||
return -x;
|
||||
}
|
||||
}
|
||||
|
||||
// normalize and adjust luminance with a centered to 128 sigmode
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::normalizeGrayOutputCentredSigmoide(const type meanValue, const type sensitivity, const type maxOutputValue, type *inputBuffer, type *outputBuffer, const unsigned int nbPixels)
|
||||
{
|
||||
|
||||
if (sensitivity==1.0)
|
||||
template < >
|
||||
inline int TemplateBuffer<int>::_abs(const int x)
|
||||
{
|
||||
std::cerr<<"TemplateBuffer::TemplateBuffer<type>::normalizeGrayOutputCentredSigmoide error: 2nd parameter (sensitivity) must not equal 0, copying original data..."<<std::endl;
|
||||
memcpy(outputBuffer, inputBuffer, sizeof(type)*nbPixels);
|
||||
return;
|
||||
return std::abs(x);
|
||||
}
|
||||
|
||||
type X0=maxOutputValue/(sensitivity-(type)1.0);
|
||||
|
||||
register type *inputBufferPTR=inputBuffer;
|
||||
register type *outputBufferPTR=outputBuffer;
|
||||
|
||||
for (register size_t j = 0; j < nbPixels; ++j, ++inputBufferPTR)
|
||||
*(outputBufferPTR++)=(meanValue+(meanValue+X0)*(*(inputBufferPTR)-meanValue)/(_abs(*(inputBufferPTR)-meanValue)+X0));
|
||||
|
||||
}
|
||||
|
||||
// center and reduct the image (image-mean)/std
|
||||
template <class type>
|
||||
void TemplateBuffer<type>::centerReductImageLuminance(type *inputOutputBuffer)
|
||||
{
|
||||
// if outputBuffer unsassigned, the rewrite the buffer
|
||||
if (inputOutputBuffer==NULL)
|
||||
inputOutputBuffer=Buffer();
|
||||
type meanValue=0, stdValue=0;
|
||||
|
||||
// compute mean value
|
||||
for (register size_t j = 0; j < _NBpixels; ++j)
|
||||
meanValue+=inputOutputBuffer[j];
|
||||
meanValue/=((type)_NBpixels);
|
||||
|
||||
// compute std value
|
||||
register type *inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (size_t index=0;index<_NBpixels;++index)
|
||||
template < >
|
||||
inline double TemplateBuffer<double>::_abs(const double x)
|
||||
{
|
||||
type inputMinusMean=*(inputOutputBufferPTR++)-meanValue;
|
||||
stdValue+=inputMinusMean*inputMinusMean;
|
||||
return std::fabs(x);
|
||||
}
|
||||
|
||||
stdValue=sqrt(stdValue/((type)_NBpixels));
|
||||
// adjust luminance in regard of mean and std value;
|
||||
inputOutputBufferPTR=inputOutputBuffer;
|
||||
for (size_t index=0;index<_NBpixels;++index, ++inputOutputBufferPTR)
|
||||
*inputOutputBufferPTR=(*(inputOutputBufferPTR)-meanValue)/stdValue;
|
||||
}
|
||||
|
||||
|
||||
template <class type>
|
||||
type TemplateBuffer<type>::_abs(const type x)
|
||||
{
|
||||
|
||||
if (x>0)
|
||||
return x;
|
||||
else
|
||||
return -x;
|
||||
}
|
||||
|
||||
template < >
|
||||
inline int TemplateBuffer<int>::_abs(const int x)
|
||||
{
|
||||
return std::abs(x);
|
||||
}
|
||||
template < >
|
||||
inline double TemplateBuffer<double>::_abs(const double x)
|
||||
{
|
||||
return std::fabs(x);
|
||||
}
|
||||
|
||||
template < >
|
||||
inline float TemplateBuffer<float>::_abs(const float x)
|
||||
{
|
||||
return std::fabs(x);
|
||||
}
|
||||
template < >
|
||||
inline float TemplateBuffer<float>::_abs(const float x)
|
||||
{
|
||||
return std::fabs(x);
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user