Merged the trunk r8589:8653 - all changes related to build warnings
This commit is contained in:
@@ -46,12 +46,12 @@
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
|
||||
// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
|
||||
// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
|
||||
class CV_EXPORTS SimilarRects
|
||||
{
|
||||
public:
|
||||
public:
|
||||
SimilarRects(double _eps) : eps(_eps) {}
|
||||
inline bool operator()(const Rect& r1, const Rect& r2) const
|
||||
{
|
||||
@@ -62,8 +62,8 @@ public:
|
||||
std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
|
||||
}
|
||||
double eps;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vector<int>* weights, vector<double>* levelWeights)
|
||||
{
|
||||
@@ -78,13 +78,13 @@ void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vec
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
vector<int> labels;
|
||||
int nclasses = partition(rectList, labels, SimilarRects(eps));
|
||||
|
||||
|
||||
vector<Rect> rrects(nclasses);
|
||||
vector<int> rweights(nclasses, 0);
|
||||
vector<int> rejectLevels(nclasses, 0);
|
||||
vector<int> rejectLevels(nclasses, 0);
|
||||
vector<double> rejectWeights(nclasses, DBL_MIN);
|
||||
int i, j, nlabels = (int)labels.size();
|
||||
for( i = 0; i < nlabels; i++ )
|
||||
@@ -97,10 +97,10 @@ void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vec
|
||||
rweights[cls]++;
|
||||
}
|
||||
if ( levelWeights && weights && !weights->empty() && !levelWeights->empty() )
|
||||
{
|
||||
for( i = 0; i < nlabels; i++ )
|
||||
{
|
||||
int cls = labels[i];
|
||||
{
|
||||
for( i = 0; i < nlabels; i++ )
|
||||
{
|
||||
int cls = labels[i];
|
||||
if( (*weights)[i] > rejectLevels[cls] )
|
||||
{
|
||||
rejectLevels[cls] = (*weights)[i];
|
||||
@@ -108,9 +108,9 @@ void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vec
|
||||
}
|
||||
else if( ( (*weights)[i] == rejectLevels[cls] ) && ( (*levelWeights)[i] > rejectWeights[cls] ) )
|
||||
rejectWeights[cls] = (*levelWeights)[i];
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
for( i = 0; i < nclasses; i++ )
|
||||
{
|
||||
Rect r = rrects[i];
|
||||
@@ -120,32 +120,32 @@ void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vec
|
||||
saturate_cast<int>(r.width*s),
|
||||
saturate_cast<int>(r.height*s));
|
||||
}
|
||||
|
||||
|
||||
rectList.clear();
|
||||
if( weights )
|
||||
weights->clear();
|
||||
if( levelWeights )
|
||||
levelWeights->clear();
|
||||
|
||||
if( levelWeights )
|
||||
levelWeights->clear();
|
||||
|
||||
for( i = 0; i < nclasses; i++ )
|
||||
{
|
||||
Rect r1 = rrects[i];
|
||||
int n1 = levelWeights ? rejectLevels[i] : rweights[i];
|
||||
double w1 = rejectWeights[i];
|
||||
double w1 = rejectWeights[i];
|
||||
if( n1 <= groupThreshold )
|
||||
continue;
|
||||
// filter out small face rectangles inside large rectangles
|
||||
for( j = 0; j < nclasses; j++ )
|
||||
{
|
||||
int n2 = rweights[j];
|
||||
|
||||
|
||||
if( j == i || n2 <= groupThreshold )
|
||||
continue;
|
||||
Rect r2 = rrects[j];
|
||||
|
||||
|
||||
int dx = saturate_cast<int>( r2.width * eps );
|
||||
int dy = saturate_cast<int>( r2.height * eps );
|
||||
|
||||
|
||||
if( i != j &&
|
||||
r1.x >= r2.x - dx &&
|
||||
r1.y >= r2.y - dy &&
|
||||
@@ -154,14 +154,14 @@ void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vec
|
||||
(n2 > std::max(3, n1) || n1 < 3) )
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if( j == nclasses )
|
||||
{
|
||||
rectList.push_back(r1);
|
||||
if( weights )
|
||||
weights->push_back(n1);
|
||||
if( levelWeights )
|
||||
levelWeights->push_back(w1);
|
||||
if( levelWeights )
|
||||
levelWeights->push_back(w1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -169,158 +169,158 @@ void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vec
|
||||
class MeanshiftGrouping
|
||||
{
|
||||
public:
|
||||
MeanshiftGrouping(const Point3d& densKer, const vector<Point3d>& posV,
|
||||
const vector<double>& wV, double, int maxIter = 20)
|
||||
MeanshiftGrouping(const Point3d& densKer, const vector<Point3d>& posV,
|
||||
const vector<double>& wV, double, int maxIter = 20)
|
||||
{
|
||||
densityKernel = densKer;
|
||||
densityKernel = densKer;
|
||||
weightsV = wV;
|
||||
positionsV = posV;
|
||||
positionsCount = (int)posV.size();
|
||||
meanshiftV.resize(positionsCount);
|
||||
meanshiftV.resize(positionsCount);
|
||||
distanceV.resize(positionsCount);
|
||||
iterMax = maxIter;
|
||||
|
||||
for (unsigned i = 0; i<positionsV.size(); i++)
|
||||
{
|
||||
meanshiftV[i] = getNewValue(positionsV[i]);
|
||||
distanceV[i] = moveToMode(meanshiftV[i]);
|
||||
meanshiftV[i] -= positionsV[i];
|
||||
}
|
||||
}
|
||||
|
||||
void getModes(vector<Point3d>& modesV, vector<double>& resWeightsV, const double eps)
|
||||
{
|
||||
for (size_t i=0; i <distanceV.size(); i++)
|
||||
{
|
||||
bool is_found = false;
|
||||
for(size_t j=0; j<modesV.size(); j++)
|
||||
{
|
||||
if ( getDistance(distanceV[i], modesV[j]) < eps)
|
||||
{
|
||||
is_found=true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!is_found)
|
||||
{
|
||||
modesV.push_back(distanceV[i]);
|
||||
}
|
||||
}
|
||||
|
||||
resWeightsV.resize(modesV.size());
|
||||
iterMax = maxIter;
|
||||
|
||||
for (size_t i=0; i<modesV.size(); i++)
|
||||
{
|
||||
resWeightsV[i] = getResultWeight(modesV[i]);
|
||||
}
|
||||
for (unsigned i = 0; i<positionsV.size(); i++)
|
||||
{
|
||||
meanshiftV[i] = getNewValue(positionsV[i]);
|
||||
distanceV[i] = moveToMode(meanshiftV[i]);
|
||||
meanshiftV[i] -= positionsV[i];
|
||||
}
|
||||
}
|
||||
|
||||
void getModes(vector<Point3d>& modesV, vector<double>& resWeightsV, const double eps)
|
||||
{
|
||||
for (size_t i=0; i <distanceV.size(); i++)
|
||||
{
|
||||
bool is_found = false;
|
||||
for(size_t j=0; j<modesV.size(); j++)
|
||||
{
|
||||
if ( getDistance(distanceV[i], modesV[j]) < eps)
|
||||
{
|
||||
is_found=true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!is_found)
|
||||
{
|
||||
modesV.push_back(distanceV[i]);
|
||||
}
|
||||
}
|
||||
|
||||
resWeightsV.resize(modesV.size());
|
||||
|
||||
for (size_t i=0; i<modesV.size(); i++)
|
||||
{
|
||||
resWeightsV[i] = getResultWeight(modesV[i]);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
vector<Point3d> positionsV;
|
||||
vector<double> weightsV;
|
||||
vector<Point3d> positionsV;
|
||||
vector<double> weightsV;
|
||||
|
||||
Point3d densityKernel;
|
||||
int positionsCount;
|
||||
Point3d densityKernel;
|
||||
int positionsCount;
|
||||
|
||||
vector<Point3d> meanshiftV;
|
||||
vector<Point3d> distanceV;
|
||||
int iterMax;
|
||||
double modeEps;
|
||||
vector<Point3d> meanshiftV;
|
||||
vector<Point3d> distanceV;
|
||||
int iterMax;
|
||||
double modeEps;
|
||||
|
||||
Point3d getNewValue(const Point3d& inPt) const
|
||||
Point3d getNewValue(const Point3d& inPt) const
|
||||
{
|
||||
Point3d resPoint(.0);
|
||||
Point3d ratPoint(.0);
|
||||
for (size_t i=0; i<positionsV.size(); i++)
|
||||
{
|
||||
Point3d aPt= positionsV[i];
|
||||
Point3d bPt = inPt;
|
||||
Point3d sPt = densityKernel;
|
||||
|
||||
sPt.x *= exp(aPt.z);
|
||||
sPt.y *= exp(aPt.z);
|
||||
|
||||
aPt.x /= sPt.x;
|
||||
aPt.y /= sPt.y;
|
||||
aPt.z /= sPt.z;
|
||||
Point3d resPoint(.0);
|
||||
Point3d ratPoint(.0);
|
||||
for (size_t i=0; i<positionsV.size(); i++)
|
||||
{
|
||||
Point3d aPt= positionsV[i];
|
||||
Point3d bPt = inPt;
|
||||
Point3d sPt = densityKernel;
|
||||
|
||||
bPt.x /= sPt.x;
|
||||
bPt.y /= sPt.y;
|
||||
bPt.z /= sPt.z;
|
||||
|
||||
double w = (weightsV[i])*std::exp(-((aPt-bPt).dot(aPt-bPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
|
||||
|
||||
resPoint += w*aPt;
|
||||
sPt.x *= exp(aPt.z);
|
||||
sPt.y *= exp(aPt.z);
|
||||
|
||||
ratPoint.x += w/sPt.x;
|
||||
ratPoint.y += w/sPt.y;
|
||||
ratPoint.z += w/sPt.z;
|
||||
}
|
||||
resPoint.x /= ratPoint.x;
|
||||
resPoint.y /= ratPoint.y;
|
||||
resPoint.z /= ratPoint.z;
|
||||
return resPoint;
|
||||
aPt.x /= sPt.x;
|
||||
aPt.y /= sPt.y;
|
||||
aPt.z /= sPt.z;
|
||||
|
||||
bPt.x /= sPt.x;
|
||||
bPt.y /= sPt.y;
|
||||
bPt.z /= sPt.z;
|
||||
|
||||
double w = (weightsV[i])*std::exp(-((aPt-bPt).dot(aPt-bPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
|
||||
|
||||
resPoint += w*aPt;
|
||||
|
||||
ratPoint.x += w/sPt.x;
|
||||
ratPoint.y += w/sPt.y;
|
||||
ratPoint.z += w/sPt.z;
|
||||
}
|
||||
resPoint.x /= ratPoint.x;
|
||||
resPoint.y /= ratPoint.y;
|
||||
resPoint.z /= ratPoint.z;
|
||||
return resPoint;
|
||||
}
|
||||
|
||||
double getResultWeight(const Point3d& inPt) const
|
||||
double getResultWeight(const Point3d& inPt) const
|
||||
{
|
||||
double sumW=0;
|
||||
for (size_t i=0; i<positionsV.size(); i++)
|
||||
{
|
||||
Point3d aPt = positionsV[i];
|
||||
Point3d sPt = densityKernel;
|
||||
double sumW=0;
|
||||
for (size_t i=0; i<positionsV.size(); i++)
|
||||
{
|
||||
Point3d aPt = positionsV[i];
|
||||
Point3d sPt = densityKernel;
|
||||
|
||||
sPt.x *= exp(aPt.z);
|
||||
sPt.y *= exp(aPt.z);
|
||||
sPt.x *= exp(aPt.z);
|
||||
sPt.y *= exp(aPt.z);
|
||||
|
||||
aPt -= inPt;
|
||||
|
||||
aPt.x /= sPt.x;
|
||||
aPt.y /= sPt.y;
|
||||
aPt.z /= sPt.z;
|
||||
|
||||
sumW+=(weightsV[i])*std::exp(-(aPt.dot(aPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
|
||||
}
|
||||
return sumW;
|
||||
aPt -= inPt;
|
||||
|
||||
aPt.x /= sPt.x;
|
||||
aPt.y /= sPt.y;
|
||||
aPt.z /= sPt.z;
|
||||
|
||||
sumW+=(weightsV[i])*std::exp(-(aPt.dot(aPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
|
||||
}
|
||||
return sumW;
|
||||
}
|
||||
|
||||
Point3d moveToMode(Point3d aPt) const
|
||||
|
||||
Point3d moveToMode(Point3d aPt) const
|
||||
{
|
||||
Point3d bPt;
|
||||
for (int i = 0; i<iterMax; i++)
|
||||
{
|
||||
bPt = aPt;
|
||||
aPt = getNewValue(bPt);
|
||||
if ( getDistance(aPt, bPt) <= modeEps )
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return aPt;
|
||||
Point3d bPt;
|
||||
for (int i = 0; i<iterMax; i++)
|
||||
{
|
||||
bPt = aPt;
|
||||
aPt = getNewValue(bPt);
|
||||
if ( getDistance(aPt, bPt) <= modeEps )
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
return aPt;
|
||||
}
|
||||
|
||||
double getDistance(Point3d p1, Point3d p2) const
|
||||
{
|
||||
Point3d ns = densityKernel;
|
||||
ns.x *= exp(p2.z);
|
||||
ns.y *= exp(p2.z);
|
||||
p2 -= p1;
|
||||
p2.x /= ns.x;
|
||||
p2.y /= ns.y;
|
||||
p2.z /= ns.z;
|
||||
return p2.dot(p2);
|
||||
Point3d ns = densityKernel;
|
||||
ns.x *= exp(p2.z);
|
||||
ns.y *= exp(p2.z);
|
||||
p2 -= p1;
|
||||
p2.x /= ns.x;
|
||||
p2.y /= ns.y;
|
||||
p2.z /= ns.z;
|
||||
return p2.dot(p2);
|
||||
}
|
||||
};
|
||||
//new grouping function with using meanshift
|
||||
static void groupRectangles_meanshift(vector<Rect>& rectList, double detectThreshold, vector<double>* foundWeights,
|
||||
vector<double>& scales, Size winDetSize)
|
||||
static void groupRectangles_meanshift(vector<Rect>& rectList, double detectThreshold, vector<double>* foundWeights,
|
||||
vector<double>& scales, Size winDetSize)
|
||||
{
|
||||
int detectionCount = (int)rectList.size();
|
||||
vector<Point3d> hits(detectionCount), resultHits;
|
||||
vector<double> hitWeights(detectionCount), resultWeights;
|
||||
Point2d hitCenter;
|
||||
|
||||
for (int i=0; i < detectionCount; i++)
|
||||
for (int i=0; i < detectionCount; i++)
|
||||
{
|
||||
hitWeights[i] = (*foundWeights)[i];
|
||||
hitCenter = (rectList[i].tl() + rectList[i].br())*(0.5); //center of rectangles
|
||||
@@ -338,17 +338,17 @@ static void groupRectangles_meanshift(vector<Rect>& rectList, double detectThres
|
||||
|
||||
msGrouping.getModes(resultHits, resultWeights, 1);
|
||||
|
||||
for (unsigned i=0; i < resultHits.size(); ++i)
|
||||
for (unsigned i=0; i < resultHits.size(); ++i)
|
||||
{
|
||||
|
||||
double scale = exp(resultHits[i].z);
|
||||
hitCenter.x = resultHits[i].x;
|
||||
hitCenter.y = resultHits[i].y;
|
||||
Size s( int(winDetSize.width * scale), int(winDetSize.height * scale) );
|
||||
Rect resultRect( int(hitCenter.x-s.width/2), int(hitCenter.y-s.height/2),
|
||||
int(s.width), int(s.height) );
|
||||
Rect resultRect( int(hitCenter.x-s.width/2), int(hitCenter.y-s.height/2),
|
||||
int(s.width), int(s.height) );
|
||||
|
||||
if (resultWeights[i] > detectThreshold)
|
||||
if (resultWeights[i] > detectThreshold)
|
||||
{
|
||||
rectList.push_back(resultRect);
|
||||
foundWeights->push_back(resultWeights[i]);
|
||||
@@ -371,13 +371,13 @@ void groupRectangles(vector<Rect>& rectList, vector<int>& rejectLevels, vector<d
|
||||
groupRectangles(rectList, groupThreshold, eps, &rejectLevels, &levelWeights);
|
||||
}
|
||||
//can be used for HOG detection algorithm only
|
||||
void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights,
|
||||
vector<double>& foundScales, double detectThreshold, Size winDetSize)
|
||||
void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights,
|
||||
vector<double>& foundScales, double detectThreshold, Size winDetSize)
|
||||
{
|
||||
groupRectangles_meanshift(rectList, detectThreshold, &foundWeights, foundScales, winDetSize);
|
||||
groupRectangles_meanshift(rectList, detectThreshold, &foundWeights, foundScales, winDetSize);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
FeatureEvaluator::~FeatureEvaluator() {}
|
||||
bool FeatureEvaluator::read(const FileNode&) {return true;}
|
||||
@@ -394,21 +394,21 @@ bool HaarEvaluator::Feature :: read( const FileNode& node )
|
||||
{
|
||||
FileNode rnode = node[CC_RECTS];
|
||||
FileNodeIterator it = rnode.begin(), it_end = rnode.end();
|
||||
|
||||
|
||||
int ri;
|
||||
for( ri = 0; ri < RECT_NUM; ri++ )
|
||||
{
|
||||
rect[ri].r = Rect();
|
||||
rect[ri].weight = 0.f;
|
||||
}
|
||||
|
||||
|
||||
for(ri = 0; it != it_end; ++it, ri++)
|
||||
{
|
||||
FileNodeIterator it2 = (*it).begin();
|
||||
it2 >> rect[ri].r.x >> rect[ri].r.y >>
|
||||
rect[ri].r.width >> rect[ri].r.height >> rect[ri].weight;
|
||||
}
|
||||
|
||||
|
||||
tilted = (int)node[CC_TILTED] != 0;
|
||||
return true;
|
||||
}
|
||||
@@ -427,7 +427,7 @@ bool HaarEvaluator::read(const FileNode& node)
|
||||
featuresPtr = &(*features)[0];
|
||||
FileNodeIterator it = node.begin(), it_end = node.end();
|
||||
hasTiltedFeatures = false;
|
||||
|
||||
|
||||
for(int i = 0; it != it_end; ++it, i++)
|
||||
{
|
||||
if(!featuresPtr[i].read(*it))
|
||||
@@ -437,7 +437,7 @@ bool HaarEvaluator::read(const FileNode& node)
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Ptr<FeatureEvaluator> HaarEvaluator::clone() const
|
||||
{
|
||||
HaarEvaluator* ret = new HaarEvaluator;
|
||||
@@ -451,7 +451,7 @@ Ptr<FeatureEvaluator> HaarEvaluator::clone() const
|
||||
memcpy( ret->p, p, 4*sizeof(p[0]) );
|
||||
memcpy( ret->pq, pq, 4*sizeof(pq[0]) );
|
||||
ret->offset = offset;
|
||||
ret->varianceNormFactor = varianceNormFactor;
|
||||
ret->varianceNormFactor = varianceNormFactor;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -460,10 +460,10 @@ bool HaarEvaluator::setImage( const Mat &image, Size _origWinSize )
|
||||
int rn = image.rows+1, cn = image.cols+1;
|
||||
origWinSize = _origWinSize;
|
||||
normrect = Rect(1, 1, origWinSize.width-2, origWinSize.height-2);
|
||||
|
||||
|
||||
if (image.cols < origWinSize.width || image.rows < origWinSize.height)
|
||||
return false;
|
||||
|
||||
|
||||
if( sum0.rows < rn || sum0.cols < cn )
|
||||
{
|
||||
sum0.create(rn, cn, CV_32S);
|
||||
@@ -485,10 +485,10 @@ bool HaarEvaluator::setImage( const Mat &image, Size _origWinSize )
|
||||
const double* sqdata = (const double*)sqsum.data;
|
||||
size_t sumStep = sum.step/sizeof(sdata[0]);
|
||||
size_t sqsumStep = sqsum.step/sizeof(sqdata[0]);
|
||||
|
||||
|
||||
CV_SUM_PTRS( p[0], p[1], p[2], p[3], sdata, normrect, sumStep );
|
||||
CV_SUM_PTRS( pq[0], pq[1], pq[2], pq[3], sqdata, normrect, sqsumStep );
|
||||
|
||||
|
||||
size_t fi, nfeatures = features->size();
|
||||
|
||||
for( fi = 0; fi < nfeatures; fi++ )
|
||||
@@ -568,19 +568,19 @@ bool LBPEvaluator::setImage( const Mat& image, Size _origWinSize )
|
||||
|
||||
if( image.cols < origWinSize.width || image.rows < origWinSize.height )
|
||||
return false;
|
||||
|
||||
|
||||
if( sum0.rows < rn || sum0.cols < cn )
|
||||
sum0.create(rn, cn, CV_32S);
|
||||
sum = Mat(rn, cn, CV_32S, sum0.data);
|
||||
integral(image, sum);
|
||||
|
||||
|
||||
size_t fi, nfeatures = features->size();
|
||||
|
||||
|
||||
for( fi = 0; fi < nfeatures; fi++ )
|
||||
featuresPtr[fi].updatePtrs( sum );
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool LBPEvaluator::setWindow( Point pt )
|
||||
{
|
||||
if( pt.x < 0 || pt.y < 0 ||
|
||||
@@ -589,7 +589,7 @@ bool LBPEvaluator::setWindow( Point pt )
|
||||
return false;
|
||||
offset = pt.y * ((int)sum.step/sizeof(int)) + pt.x;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------- HOGEvaluator ---------------------------------------
|
||||
bool HOGEvaluator::Feature :: read( const FileNode& node )
|
||||
@@ -638,7 +638,7 @@ Ptr<FeatureEvaluator> HOGEvaluator::clone() const
|
||||
ret->featuresPtr = &(*ret->features)[0];
|
||||
ret->offset = offset;
|
||||
ret->hist = hist;
|
||||
ret->normSum = normSum;
|
||||
ret->normSum = normSum;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -756,7 +756,7 @@ void HOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, Mat
|
||||
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );
|
||||
histBuf += histStep + 1;
|
||||
for( y = 0; y < qangle.rows; y++ )
|
||||
{
|
||||
{
|
||||
histBuf[-1] = 0.f;
|
||||
float strSum = 0.f;
|
||||
for( x = 0; x < qangle.cols; x++ )
|
||||
@@ -775,7 +775,7 @@ void HOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, Mat
|
||||
Ptr<FeatureEvaluator> FeatureEvaluator::create( int featureType )
|
||||
{
|
||||
return featureType == HAAR ? Ptr<FeatureEvaluator>(new HaarEvaluator) :
|
||||
featureType == LBP ? Ptr<FeatureEvaluator>(new LBPEvaluator) :
|
||||
featureType == LBP ? Ptr<FeatureEvaluator>(new LBPEvaluator) :
|
||||
featureType == HOG ? Ptr<FeatureEvaluator>(new HOGEvaluator) :
|
||||
Ptr<FeatureEvaluator>();
|
||||
}
|
||||
@@ -787,13 +787,13 @@ CascadeClassifier::CascadeClassifier()
|
||||
}
|
||||
|
||||
CascadeClassifier::CascadeClassifier(const string& filename)
|
||||
{
|
||||
load(filename);
|
||||
{
|
||||
load(filename);
|
||||
}
|
||||
|
||||
CascadeClassifier::~CascadeClassifier()
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
bool CascadeClassifier::empty() const
|
||||
{
|
||||
@@ -805,57 +805,57 @@ bool CascadeClassifier::load(const string& filename)
|
||||
oldCascade.release();
|
||||
data = Data();
|
||||
featureEvaluator.release();
|
||||
|
||||
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
if( !fs.isOpened() )
|
||||
return false;
|
||||
|
||||
|
||||
if( read(fs.getFirstTopLevelNode()) )
|
||||
return true;
|
||||
|
||||
|
||||
fs.release();
|
||||
|
||||
|
||||
oldCascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
|
||||
return !oldCascade.empty();
|
||||
}
|
||||
|
||||
int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& featureEvaluator, Point pt, double& weight )
|
||||
|
||||
int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& evaluator, Point pt, double& weight )
|
||||
{
|
||||
CV_Assert( oldCascade.empty() );
|
||||
|
||||
|
||||
assert( data.featureType == FeatureEvaluator::HAAR ||
|
||||
data.featureType == FeatureEvaluator::LBP ||
|
||||
data.featureType == FeatureEvaluator::HOG );
|
||||
|
||||
if( !featureEvaluator->setWindow(pt) )
|
||||
if( !evaluator->setWindow(pt) )
|
||||
return -1;
|
||||
if( data.isStumpBased )
|
||||
{
|
||||
if( data.featureType == FeatureEvaluator::HAAR )
|
||||
return predictOrderedStump<HaarEvaluator>( *this, featureEvaluator, weight );
|
||||
return predictOrderedStump<HaarEvaluator>( *this, evaluator, weight );
|
||||
else if( data.featureType == FeatureEvaluator::LBP )
|
||||
return predictCategoricalStump<LBPEvaluator>( *this, featureEvaluator, weight );
|
||||
return predictCategoricalStump<LBPEvaluator>( *this, evaluator, weight );
|
||||
else if( data.featureType == FeatureEvaluator::HOG )
|
||||
return predictOrderedStump<HOGEvaluator>( *this, featureEvaluator, weight );
|
||||
return predictOrderedStump<HOGEvaluator>( *this, evaluator, weight );
|
||||
else
|
||||
return -2;
|
||||
}
|
||||
else
|
||||
{
|
||||
if( data.featureType == FeatureEvaluator::HAAR )
|
||||
return predictOrdered<HaarEvaluator>( *this, featureEvaluator, weight );
|
||||
return predictOrdered<HaarEvaluator>( *this, evaluator, weight );
|
||||
else if( data.featureType == FeatureEvaluator::LBP )
|
||||
return predictCategorical<LBPEvaluator>( *this, featureEvaluator, weight );
|
||||
return predictCategorical<LBPEvaluator>( *this, evaluator, weight );
|
||||
else if( data.featureType == FeatureEvaluator::HOG )
|
||||
return predictOrdered<HOGEvaluator>( *this, featureEvaluator, weight );
|
||||
return predictOrdered<HOGEvaluator>( *this, evaluator, weight );
|
||||
else
|
||||
return -2;
|
||||
}
|
||||
}
|
||||
|
||||
bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& featureEvaluator, const Mat& image )
|
||||
|
||||
bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& evaluator, const Mat& image )
|
||||
{
|
||||
return empty() ? false : featureEvaluator->setImage(image, data.origWinSize);
|
||||
return empty() ? false : evaluator->setImage(image, data.origWinSize);
|
||||
}
|
||||
|
||||
void CascadeClassifier::setMaskGenerator(Ptr<MaskGenerator> _maskGenerator)
|
||||
@@ -878,7 +878,7 @@ void CascadeClassifier::setFaceDetectionMaskGenerator()
|
||||
|
||||
struct CascadeClassifierInvoker
|
||||
{
|
||||
CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor,
|
||||
CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor,
|
||||
ConcurrentRectVector& _vec, vector<int>& _levels, vector<double>& _weights, bool outputLevels, const Mat& _mask)
|
||||
{
|
||||
classifier = &_cc;
|
||||
@@ -891,7 +891,7 @@ struct CascadeClassifierInvoker
|
||||
levelWeights = outputLevels ? &_weights : 0;
|
||||
mask=_mask;
|
||||
}
|
||||
|
||||
|
||||
void operator()(const BlockedRange& range) const
|
||||
{
|
||||
Ptr<FeatureEvaluator> evaluator = classifier->featureEvaluator->clone();
|
||||
@@ -916,11 +916,11 @@ struct CascadeClassifierInvoker
|
||||
result = -(int)classifier->data.stages.size();
|
||||
if( classifier->data.stages.size() + result < 4 )
|
||||
{
|
||||
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height));
|
||||
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height));
|
||||
rejectLevels->push_back(-result);
|
||||
levelWeights->push_back(gypWeight);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if( result > 0 )
|
||||
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor),
|
||||
winSize.width, winSize.height));
|
||||
@@ -929,7 +929,7 @@ struct CascadeClassifierInvoker
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CascadeClassifier* classifier;
|
||||
ConcurrentRectVector* rectangles;
|
||||
Size processingRectSize;
|
||||
@@ -939,7 +939,7 @@ struct CascadeClassifierInvoker
|
||||
vector<double> *levelWeights;
|
||||
Mat mask;
|
||||
};
|
||||
|
||||
|
||||
struct getRect { Rect operator ()(const CvAvgComp& e) const { return e.rect; } };
|
||||
|
||||
bool CascadeClassifier::detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
|
||||
@@ -995,17 +995,17 @@ bool CascadeClassifier::setImage(const Mat& image)
|
||||
return featureEvaluator->setImage(image, data.origWinSize);
|
||||
}
|
||||
|
||||
void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
|
||||
void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
|
||||
vector<int>& rejectLevels,
|
||||
vector<double>& levelWeights,
|
||||
double scaleFactor, int minNeighbors,
|
||||
int flags, Size minObjectSize, Size maxObjectSize,
|
||||
int flags, Size minObjectSize, Size maxObjectSize,
|
||||
bool outputRejectLevels )
|
||||
{
|
||||
const double GROUP_EPS = 0.2;
|
||||
|
||||
|
||||
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U );
|
||||
|
||||
|
||||
if( empty() )
|
||||
return;
|
||||
|
||||
@@ -1031,7 +1031,7 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
||||
|
||||
if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
|
||||
maxObjectSize = image.size();
|
||||
|
||||
|
||||
Mat grayImage = image;
|
||||
if( grayImage.channels() > 1 )
|
||||
{
|
||||
@@ -1039,7 +1039,7 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
||||
cvtColor(grayImage, temp, CV_BGR2GRAY);
|
||||
grayImage = temp;
|
||||
}
|
||||
|
||||
|
||||
Mat imageBuffer(image.rows + 1, image.cols + 1, CV_8U);
|
||||
vector<Rect> candidates;
|
||||
|
||||
@@ -1050,14 +1050,14 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
||||
Size windowSize( cvRound(originalWindowSize.width*factor), cvRound(originalWindowSize.height*factor) );
|
||||
Size scaledImageSize( cvRound( grayImage.cols/factor ), cvRound( grayImage.rows/factor ) );
|
||||
Size processingRectSize( scaledImageSize.width - originalWindowSize.width + 1, scaledImageSize.height - originalWindowSize.height + 1 );
|
||||
|
||||
|
||||
if( processingRectSize.width <= 0 || processingRectSize.height <= 0 )
|
||||
break;
|
||||
if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
|
||||
break;
|
||||
if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
|
||||
continue;
|
||||
|
||||
|
||||
Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
|
||||
resize( grayImage, scaledImage, scaledImageSize, 0, 0, CV_INTER_LINEAR );
|
||||
|
||||
@@ -1083,12 +1083,12 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
||||
stripSize = processingRectSize.height;
|
||||
#endif
|
||||
|
||||
if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
|
||||
if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
|
||||
rejectLevels, levelWeights, outputRejectLevels ) )
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
|
||||
objects.resize(candidates.size());
|
||||
std::copy(candidates.begin(), candidates.end(), objects.begin());
|
||||
|
||||
@@ -1108,14 +1108,14 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
||||
{
|
||||
vector<int> fakeLevels;
|
||||
vector<double> fakeWeights;
|
||||
detectMultiScale( image, objects, fakeLevels, fakeWeights, scaleFactor,
|
||||
detectMultiScale( image, objects, fakeLevels, fakeWeights, scaleFactor,
|
||||
minNeighbors, flags, minObjectSize, maxObjectSize, false );
|
||||
}
|
||||
}
|
||||
|
||||
bool CascadeClassifier::Data::read(const FileNode &root)
|
||||
{
|
||||
static const float THRESHOLD_EPS = 1e-5f;
|
||||
|
||||
|
||||
// load stage params
|
||||
string stageTypeStr = (string)root[CC_STAGE_TYPE];
|
||||
if( stageTypeStr == CC_BOOST )
|
||||
@@ -1232,11 +1232,11 @@ bool CascadeClassifier::read(const FileNode& root)
|
||||
FileNode fn = root[CC_FEATURES];
|
||||
if( fn.empty() )
|
||||
return false;
|
||||
|
||||
|
||||
return featureEvaluator->read(fn);
|
||||
}
|
||||
|
||||
|
||||
template<> void Ptr<CvHaarClassifierCascade>::delete_obj()
|
||||
{ cvReleaseHaarClassifierCascade(&obj); }
|
||||
{ cvReleaseHaarClassifierCascade(&obj); }
|
||||
|
||||
} // namespace cv
|
||||
|
@@ -256,14 +256,14 @@ static int decode(Sampler &sa, code &cc)
|
||||
{
|
||||
uchar binary[8] = {0,0,0,0,0,0,0,0};
|
||||
uchar b = 0;
|
||||
int i, sum;
|
||||
int sum;
|
||||
|
||||
sum = 0;
|
||||
|
||||
for (i = 0; i < 64; i++)
|
||||
for (int i = 0; i < 64; i++)
|
||||
sum += sa.getpixel(1 + (i & 7), 1 + (i >> 3));
|
||||
uchar mean = (uchar)(sum / 64);
|
||||
for (i = 0; i < 64; i++) {
|
||||
for (int i = 0; i < 64; i++) {
|
||||
b = (b << 1) + (sa.getpixel(pickup[i].x, pickup[i].y) <= mean);
|
||||
if ((i & 7) == 7) {
|
||||
binary[i >> 3] = b;
|
||||
@@ -275,12 +275,11 @@ static int decode(Sampler &sa, code &cc)
|
||||
|
||||
uchar c[5] = {0,0,0,0,0};
|
||||
{
|
||||
int i, j;
|
||||
uchar a[5] = {228, 48, 15, 111, 62};
|
||||
int k = 5;
|
||||
for (i = 0; i < 3; i++) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
uchar t = binary[i] ^ c[4];
|
||||
for (j = k - 1; j != -1; j--) {
|
||||
for (int j = k - 1; j != -1; j--) {
|
||||
if (t == 0)
|
||||
c[j] = 0;
|
||||
else
|
||||
@@ -390,12 +389,12 @@ deque <CvDataMatrixCode> cvFindDataMatrix(CvMat *im)
|
||||
deque <CvPoint> candidates;
|
||||
{
|
||||
int x, y;
|
||||
int r = cxy->rows;
|
||||
int c = cxy->cols;
|
||||
for (y = 0; y < r; y++) {
|
||||
int rows = cxy->rows;
|
||||
int cols = cxy->cols;
|
||||
for (y = 0; y < rows; y++) {
|
||||
const short *cd = (const short*)cvPtr2D(cxy, y, 0);
|
||||
const short *ccd = (const short*)cvPtr2D(ccxy, y, 0);
|
||||
for (x = 0; x < c; x += 4, cd += 8, ccd += 8) {
|
||||
for (x = 0; x < cols; x += 4, cd += 8, ccd += 8) {
|
||||
__m128i v = _mm_loadu_si128((const __m128i*)cd);
|
||||
__m128 cyxyxA = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v, v), 16));
|
||||
__m128 cyxyxB = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v, v), 16));
|
||||
@@ -496,7 +495,7 @@ endo: ; // end search for this o
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
|
||||
void findDataMatrix(InputArray _image,
|
||||
vector<string>& codes,
|
||||
OutputArray _corners,
|
||||
@@ -507,23 +506,23 @@ void findDataMatrix(InputArray _image,
|
||||
deque <CvDataMatrixCode> rc = cvFindDataMatrix(&m);
|
||||
int i, n = (int)rc.size();
|
||||
Mat corners;
|
||||
|
||||
|
||||
if( _corners.needed() )
|
||||
{
|
||||
_corners.create(n, 4, CV_32SC2);
|
||||
corners = _corners.getMat();
|
||||
}
|
||||
|
||||
|
||||
if( _dmtx.needed() )
|
||||
_dmtx.create(n, 1, CV_8U);
|
||||
|
||||
|
||||
codes.resize(n);
|
||||
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
CvDataMatrixCode& rc_i = rc[i];
|
||||
codes[i] = string(rc_i.msg);
|
||||
|
||||
|
||||
if( corners.data )
|
||||
{
|
||||
const Point* srcpt = (Point*)rc_i.corners->data.ptr;
|
||||
@@ -532,7 +531,7 @@ void findDataMatrix(InputArray _image,
|
||||
dstpt[k] = srcpt[k];
|
||||
}
|
||||
cvReleaseMat(&rc_i.corners);
|
||||
|
||||
|
||||
if( _dmtx.needed() )
|
||||
{
|
||||
_dmtx.create(rc_i.original->rows, rc_i.original->cols, rc_i.original->type, i);
|
||||
@@ -550,20 +549,20 @@ void drawDataMatrixCodes(InputOutputArray _image,
|
||||
Mat image = _image.getMat();
|
||||
Mat corners = _corners.getMat();
|
||||
int i, n = corners.rows;
|
||||
|
||||
|
||||
if( n > 0 )
|
||||
{
|
||||
CV_Assert( corners.depth() == CV_32S &&
|
||||
corners.cols*corners.channels() == 8 &&
|
||||
n == (int)codes.size() );
|
||||
}
|
||||
|
||||
|
||||
for( i = 0; i < n; i++ )
|
||||
{
|
||||
Scalar c(0, 255, 0);
|
||||
Scalar c2(255, 0,0);
|
||||
const Point* pt = (const Point*)corners.ptr(i);
|
||||
|
||||
|
||||
for( int k = 0; k < 4; k++ )
|
||||
line(image, pt[k], pt[(k+1)%4], c);
|
||||
//int baseline = 0;
|
||||
@@ -571,5 +570,5 @@ void drawDataMatrixCodes(InputOutputArray _image,
|
||||
putText(image, codes[i], pt[0], CV_FONT_HERSHEY_SIMPLEX, 0.8, c2, 1, CV_AA, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@@ -9,8 +9,8 @@
|
||||
//
|
||||
//
|
||||
// API
|
||||
// int GetPointOfIntersection(const float *f,
|
||||
const float a, const float b,
|
||||
// int GetPointOfIntersection(const float *f,
|
||||
const float a, const float b,
|
||||
int q1, int q2, float *point);
|
||||
// INPUT
|
||||
// f - function on the regular grid
|
||||
@@ -23,15 +23,15 @@
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int GetPointOfIntersection(const float *f,
|
||||
const float a, const float b,
|
||||
int GetPointOfIntersection(const float *f,
|
||||
const float a, const float b,
|
||||
int q1, int q2, float *point)
|
||||
{
|
||||
if (q1 == q2)
|
||||
{
|
||||
return DISTANCE_TRANSFORM_EQUAL_POINTS;
|
||||
} /* if (q1 == q2) */
|
||||
(*point) = ( (f[q2] - a * q2 + b *q2 * q2) -
|
||||
} /* if (q1 == q2) */
|
||||
(*point) = ( (f[q2] - a * q2 + b *q2 * q2) -
|
||||
(f[q1] - a * q1 + b * q1 * q1) ) / (2 * b * (q2 - q1));
|
||||
return DISTANCE_TRANSFORM_OK;
|
||||
}
|
||||
@@ -43,9 +43,9 @@ int GetPointOfIntersection(const float *f,
|
||||
//
|
||||
// API
|
||||
// int DistanceTransformOneDimensionalProblem(const float *f, const int n,
|
||||
const float a, const float b,
|
||||
const float a, const float b,
|
||||
float *distanceTransform,
|
||||
int *points);
|
||||
int *points);
|
||||
// INPUT
|
||||
// f - function on the regular grid
|
||||
// n - grid dimension
|
||||
@@ -58,7 +58,7 @@ int GetPointOfIntersection(const float *f,
|
||||
// Error status
|
||||
*/
|
||||
int DistanceTransformOneDimensionalProblem(const float *f, const int n,
|
||||
const float a, const float b,
|
||||
const float a, const float b,
|
||||
float *distanceTransform,
|
||||
int *points)
|
||||
{
|
||||
@@ -73,7 +73,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
|
||||
// Allocation memory (must be free in this function)
|
||||
v = (int *)malloc (sizeof(int) * n);
|
||||
z = (float *)malloc (sizeof(float) * (n + 1));
|
||||
|
||||
|
||||
v[0] = 0;
|
||||
z[0] = (float)F_MIN; // left border of envelope
|
||||
z[1] = (float)F_MAX; // right border of envelope
|
||||
@@ -89,7 +89,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
|
||||
} /* if (tmp != DISTANCE_TRANSFORM_OK) */
|
||||
if (pointIntersection <= z[k])
|
||||
{
|
||||
// Envelope doesn't contain current parabola
|
||||
// Envelope doesn't contain current parabola
|
||||
do
|
||||
{
|
||||
k--;
|
||||
@@ -144,7 +144,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
|
||||
// INPUT
|
||||
// k - index of the previous cycle element
|
||||
// n - number of matrix rows
|
||||
// q - parameter that equal
|
||||
// q - parameter that equal
|
||||
(number_of_rows * number_of_columns - 1)
|
||||
// OUTPUT
|
||||
// None
|
||||
@@ -196,7 +196,7 @@ void TransposeCycleElements(float *a, int *cycle, int cycle_len)
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)
|
||||
static void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)
|
||||
{
|
||||
int i;
|
||||
int buf;
|
||||
@@ -229,7 +229,7 @@ void Transpose(float *a, int n, int m)
|
||||
int max_cycle_len;
|
||||
|
||||
max_cycle_len = n * m;
|
||||
|
||||
|
||||
// Allocation memory (must be free in this function)
|
||||
cycle = (int *)malloc(sizeof(int) * max_cycle_len);
|
||||
|
||||
@@ -240,12 +240,12 @@ void Transpose(float *a, int n, int m)
|
||||
k = GetNextCycleElement(i, n, q);
|
||||
cycle[cycle_len] = i;
|
||||
cycle_len++;
|
||||
|
||||
|
||||
while (k > i)
|
||||
{
|
||||
cycle[cycle_len] = k;
|
||||
{
|
||||
cycle[cycle_len] = k;
|
||||
cycle_len++;
|
||||
k = GetNextCycleElement(k, n, q);
|
||||
k = GetNextCycleElement(k, n, q);
|
||||
}
|
||||
if (k == i)
|
||||
{
|
||||
@@ -272,14 +272,14 @@ void Transpose(float *a, int n, int m)
|
||||
// RESULT
|
||||
// None
|
||||
*/
|
||||
void Transpose_int(int *a, int n, int m)
|
||||
static void Transpose_int(int *a, int n, int m)
|
||||
{
|
||||
int *cycle;
|
||||
int i, k, q, cycle_len;
|
||||
int max_cycle_len;
|
||||
|
||||
max_cycle_len = n * m;
|
||||
|
||||
|
||||
// Allocation memory (must be free in this function)
|
||||
cycle = (int *)malloc(sizeof(int) * max_cycle_len);
|
||||
|
||||
@@ -290,12 +290,12 @@ void Transpose_int(int *a, int n, int m)
|
||||
k = GetNextCycleElement(i, n, q);
|
||||
cycle[cycle_len] = i;
|
||||
cycle_len++;
|
||||
|
||||
|
||||
while (k > i)
|
||||
{
|
||||
cycle[cycle_len] = k;
|
||||
{
|
||||
cycle[cycle_len] = k;
|
||||
cycle_len++;
|
||||
k = GetNextCycleElement(k, n, q);
|
||||
k = GetNextCycleElement(k, n, q);
|
||||
}
|
||||
if (k == i)
|
||||
{
|
||||
@@ -311,21 +311,21 @@ void Transpose_int(int *a, int n, int m)
|
||||
/*
|
||||
// Decision of two dimensional problem generalized distance transform
|
||||
// on the regular grid at all points
|
||||
// min{d2(y' - y) + d4(y' - y)(y' - y) +
|
||||
// min{d2(y' - y) + d4(y' - y)(y' - y) +
|
||||
min(d1(x' - x) + d3(x' - x)(x' - x) + f(x',y'))} (on x', y')
|
||||
//
|
||||
// API
|
||||
// int DistanceTransformTwoDimensionalProblem(const float *f,
|
||||
// int DistanceTransformTwoDimensionalProblem(const float *f,
|
||||
const int n, const int m,
|
||||
const float coeff[4],
|
||||
const float coeff[4],
|
||||
float *distanceTransform,
|
||||
int *pointsX, int *pointsY);
|
||||
int *pointsX, int *pointsY);
|
||||
// INPUT
|
||||
// f - function on the regular grid
|
||||
// n - number of rows
|
||||
// m - number of columns
|
||||
// coeff - coefficients of optimizable function
|
||||
coeff[0] = d1, coeff[1] = d2,
|
||||
coeff[0] = d1, coeff[1] = d2,
|
||||
coeff[2] = d3, coeff[3] = d4
|
||||
// OUTPUT
|
||||
// distanceTransform - values of generalized distance transform
|
||||
@@ -334,9 +334,9 @@ void Transpose_int(int *a, int n, int m)
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int DistanceTransformTwoDimensionalProblem(const float *f,
|
||||
int DistanceTransformTwoDimensionalProblem(const float *f,
|
||||
const int n, const int m,
|
||||
const float coeff[4],
|
||||
const float coeff[4],
|
||||
float *distanceTransform,
|
||||
int *pointsX, int *pointsY)
|
||||
{
|
||||
@@ -349,10 +349,10 @@ int DistanceTransformTwoDimensionalProblem(const float *f,
|
||||
for (i = 0; i < n; i++)
|
||||
{
|
||||
resOneDimProblem = DistanceTransformOneDimensionalProblem(
|
||||
f + i * m, m,
|
||||
coeff[0], coeff[2],
|
||||
&internalDistTrans[i * m],
|
||||
&internalPointsX[i * m]);
|
||||
f + i * m, m,
|
||||
coeff[0], coeff[2],
|
||||
&internalDistTrans[i * m],
|
||||
&internalPointsX[i * m]);
|
||||
if (resOneDimProblem != DISTANCE_TRANSFORM_OK)
|
||||
return DISTANCE_TRANSFORM_ERROR;
|
||||
}
|
||||
@@ -360,9 +360,9 @@ int DistanceTransformTwoDimensionalProblem(const float *f,
|
||||
for (j = 0; j < m; j++)
|
||||
{
|
||||
resOneDimProblem = DistanceTransformOneDimensionalProblem(
|
||||
&internalDistTrans[j * n], n,
|
||||
coeff[1], coeff[3],
|
||||
distanceTransform + j * n,
|
||||
&internalDistTrans[j * n], n,
|
||||
coeff[1], coeff[3],
|
||||
distanceTransform + j * n,
|
||||
pointsY + j * n);
|
||||
if (resOneDimProblem != DISTANCE_TRANSFORM_OK)
|
||||
return DISTANCE_TRANSFORM_ERROR;
|
||||
|
@@ -30,12 +30,12 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
int height, width, numChannels;
|
||||
int i, j, kk, c, ii, jj, d;
|
||||
float * datadx, * datady;
|
||||
|
||||
|
||||
//<2F><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20> <20><><EFBFBD><EFBFBD><EFBFBD>
|
||||
int ch;
|
||||
int ch;
|
||||
//<2F><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
float magnitude, x, y, tx, ty;
|
||||
|
||||
|
||||
IplImage * dx, * dy;
|
||||
int *nearest;
|
||||
float *w, a_x, b_x;
|
||||
@@ -51,7 +51,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
// <20><> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
int * alfa;
|
||||
|
||||
|
||||
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
float boundary_x[NUM_SECTOR + 1];
|
||||
float boundary_y[NUM_SECTOR + 1];
|
||||
@@ -63,9 +63,9 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
|
||||
numChannels = image->nChannels;
|
||||
|
||||
dx = cvCreateImage(cvSize(image->width, image->height),
|
||||
dx = cvCreateImage(cvSize(image->width, image->height),
|
||||
IPL_DEPTH_32F, 3);
|
||||
dy = cvCreateImage(cvSize(image->width, image->height),
|
||||
dy = cvCreateImage(cvSize(image->width, image->height),
|
||||
IPL_DEPTH_32F, 3);
|
||||
|
||||
sizeX = width / k;
|
||||
@@ -77,7 +77,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
|
||||
cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
|
||||
cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));
|
||||
|
||||
|
||||
float arg_vector;
|
||||
for(i = 0; i <= NUM_SECTOR; i++)
|
||||
{
|
||||
@@ -113,20 +113,20 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
y = ty;
|
||||
}
|
||||
}/*for(ch = 1; ch < numChannels; ch++)*/
|
||||
|
||||
|
||||
max = boundary_x[0] * x + boundary_y[0] * y;
|
||||
maxi = 0;
|
||||
for (kk = 0; kk < NUM_SECTOR; kk++)
|
||||
for (kk = 0; kk < NUM_SECTOR; kk++)
|
||||
{
|
||||
dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
|
||||
if (dotProd > max)
|
||||
if (dotProd > max)
|
||||
{
|
||||
max = dotProd;
|
||||
maxi = kk;
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
if (-dotProd > max)
|
||||
if (-dotProd > max)
|
||||
{
|
||||
max = -dotProd;
|
||||
maxi = kk + NUM_SECTOR;
|
||||
@@ -134,14 +134,14 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
}
|
||||
}
|
||||
alfa[j * width * 2 + i * 2 ] = maxi % NUM_SECTOR;
|
||||
alfa[j * width * 2 + i * 2 + 1] = maxi;
|
||||
alfa[j * width * 2 + i * 2 + 1] = maxi;
|
||||
}/*for(i = 0; i < width; i++)*/
|
||||
}/*for(j = 0; j < height; j++)*/
|
||||
|
||||
//<2F><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
nearest = (int *)malloc(sizeof(int ) * k);
|
||||
w = (float*)malloc(sizeof(float) * (k * 2));
|
||||
|
||||
|
||||
for(i = 0; i < k / 2; i++)
|
||||
{
|
||||
nearest[i] = -1;
|
||||
@@ -155,15 +155,15 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
{
|
||||
b_x = k / 2 + j + 0.5f;
|
||||
a_x = k / 2 - j - 0.5f;
|
||||
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
}/*for(j = 0; j < k / 2; j++)*/
|
||||
for(j = k / 2; j < k; j++)
|
||||
{
|
||||
a_x = j - k / 2 + 0.5f;
|
||||
b_x =-j + k / 2 - 0.5f + k;
|
||||
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
|
||||
}/*for(j = k / 2; j < k; j++)*/
|
||||
|
||||
|
||||
@@ -176,40 +176,40 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
{
|
||||
for(jj = 0; jj < k; jj++)
|
||||
{
|
||||
if ((i * k + ii > 0) &&
|
||||
(i * k + ii < height - 1) &&
|
||||
(j * k + jj > 0) &&
|
||||
if ((i * k + ii > 0) &&
|
||||
(i * k + ii < height - 1) &&
|
||||
(j * k + jj > 0) &&
|
||||
(j * k + jj < width - 1))
|
||||
{
|
||||
d = (k * i + ii) * width + (j * k + jj);
|
||||
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] +=
|
||||
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] +=
|
||||
r[d] * w[ii * 2] * w[jj * 2];
|
||||
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
r[d] * w[ii * 2] * w[jj * 2];
|
||||
if ((i + nearest[ii] >= 0) &&
|
||||
if ((i + nearest[ii] >= 0) &&
|
||||
(i + nearest[ii] <= sizeY - 1))
|
||||
{
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] +=
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] +=
|
||||
r[d] * w[ii * 2 + 1] * w[jj * 2 ];
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
r[d] * w[ii * 2 + 1] * w[jj * 2 ];
|
||||
}
|
||||
if ((j + nearest[jj] >= 0) &&
|
||||
if ((j + nearest[jj] >= 0) &&
|
||||
(j + nearest[jj] <= sizeX - 1))
|
||||
{
|
||||
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
|
||||
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
|
||||
r[d] * w[ii * 2] * w[jj * 2 + 1];
|
||||
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
r[d] * w[ii * 2] * w[jj * 2 + 1];
|
||||
}
|
||||
if ((i + nearest[ii] >= 0) &&
|
||||
(i + nearest[ii] <= sizeY - 1) &&
|
||||
(j + nearest[jj] >= 0) &&
|
||||
if ((i + nearest[ii] >= 0) &&
|
||||
(i + nearest[ii] <= sizeY - 1) &&
|
||||
(j + nearest[jj] >= 0) &&
|
||||
(j + nearest[jj] <= sizeX - 1))
|
||||
{
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
|
||||
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
|
||||
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
|
||||
}
|
||||
}
|
||||
@@ -217,14 +217,14 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
}/*for(ii = 0; ii < k; ii++)*/
|
||||
}/*for(j = 1; j < sizeX - 1; j++)*/
|
||||
}/*for(i = 1; i < sizeY - 1; i++)*/
|
||||
|
||||
|
||||
cvReleaseImage(&dx);
|
||||
cvReleaseImage(&dy);
|
||||
|
||||
|
||||
free(w);
|
||||
free(nearest);
|
||||
|
||||
|
||||
free(r);
|
||||
free(alfa);
|
||||
|
||||
@@ -232,7 +232,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
|
||||
}
|
||||
|
||||
/*
|
||||
// Feature map Normalization and Truncation
|
||||
// Feature map Normalization and Truncation
|
||||
//
|
||||
// API
|
||||
// int normalizeAndTruncate(featureMap *map, const float alfa);
|
||||
@@ -270,7 +270,7 @@ int normalizeAndTruncate(CvLSVMFeatureMap *map, const float alfa)
|
||||
}/*for(j = 0; j < p; j++)*/
|
||||
partOfNorm[i] = valOfNorm;
|
||||
}/*for(i = 0; i < sizeX * sizeY; i++)*/
|
||||
|
||||
|
||||
sizeX -= 2;
|
||||
sizeY -= 2;
|
||||
|
||||
@@ -369,13 +369,13 @@ int normalizeAndTruncate(CvLSVMFeatureMap *map, const float alfa)
|
||||
// Error status
|
||||
*/
|
||||
int PCAFeatureMaps(CvLSVMFeatureMap *map)
|
||||
{
|
||||
{
|
||||
int i,j, ii, jj, k;
|
||||
int sizeX, sizeY, p, pp, xp, yp, pos1, pos2;
|
||||
float * newData;
|
||||
float val;
|
||||
float nx, ny;
|
||||
|
||||
|
||||
sizeX = map->sizeX;
|
||||
sizeY = map->sizeY;
|
||||
p = map->numFeatures;
|
||||
@@ -424,7 +424,7 @@ int PCAFeatureMaps(CvLSVMFeatureMap *map)
|
||||
}/*for(jj = 0; jj < xp; jj++)*/
|
||||
newData[pos2 + k] = val * nx;
|
||||
k++;
|
||||
} /*for(ii = 0; ii < yp; ii++)*/
|
||||
} /*for(ii = 0; ii < yp; ii++)*/
|
||||
}/*for(j = 0; j < sizeX; j++)*/
|
||||
}/*for(i = 0; i < sizeY; i++)*/
|
||||
//swop data
|
||||
@@ -439,22 +439,22 @@ int PCAFeatureMaps(CvLSVMFeatureMap *map)
|
||||
}
|
||||
|
||||
|
||||
int getPathOfFeaturePyramid(IplImage * image,
|
||||
static int getPathOfFeaturePyramid(IplImage * image,
|
||||
float step, int numStep, int startIndex,
|
||||
int sideLength, CvLSVMFeaturePyramid **maps)
|
||||
{
|
||||
CvLSVMFeatureMap *map;
|
||||
IplImage *scaleTmp;
|
||||
float scale;
|
||||
int i, err;
|
||||
|
||||
int i;
|
||||
|
||||
for(i = 0; i < numStep; i++)
|
||||
{
|
||||
scale = 1.0f / powf(step, (float)i);
|
||||
scaleTmp = resize_opencv (image, scale);
|
||||
err = getFeatureMaps(scaleTmp, sideLength, &map);
|
||||
err = normalizeAndTruncate(map, VAL_OF_TRUNCATE);
|
||||
err = PCAFeatureMaps(map);
|
||||
getFeatureMaps(scaleTmp, sideLength, &map);
|
||||
normalizeAndTruncate(map, VAL_OF_TRUNCATE);
|
||||
PCAFeatureMaps(map);
|
||||
(*maps)->pyramid[startIndex + i] = map;
|
||||
cvReleaseImage(&scaleTmp);
|
||||
}/*for(i = 0; i < numStep; i++)*/
|
||||
@@ -462,13 +462,13 @@ int getPathOfFeaturePyramid(IplImage * image,
|
||||
}
|
||||
|
||||
/*
|
||||
// Getting feature pyramid
|
||||
// Getting feature pyramid
|
||||
//
|
||||
// API
|
||||
// int getFeaturePyramid(IplImage * image, const filterObject **all_F,
|
||||
// int getFeaturePyramid(IplImage * image, const filterObject **all_F,
|
||||
const int n_f,
|
||||
const int lambda, const int k,
|
||||
const int startX, const int startY,
|
||||
const int lambda, const int k,
|
||||
const int startX, const int startY,
|
||||
const int W, const int H, featurePyramid **maps);
|
||||
// INPUT
|
||||
// image - image
|
||||
@@ -484,7 +484,7 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
|
||||
int numStep;
|
||||
int maxNumCells;
|
||||
int W, H;
|
||||
|
||||
|
||||
if(image->depth == IPL_DEPTH_32F)
|
||||
{
|
||||
imgResize = image;
|
||||
@@ -493,9 +493,9 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
|
||||
{
|
||||
imgResize = cvCreateImage(cvSize(image->width , image->height) ,
|
||||
IPL_DEPTH_32F , 3);
|
||||
cvConvert(image, imgResize);
|
||||
cvConvert(image, imgResize);
|
||||
}
|
||||
|
||||
|
||||
W = imgResize->width;
|
||||
H = imgResize->height;
|
||||
|
||||
@@ -506,14 +506,14 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
|
||||
maxNumCells = H / SIDE_LENGTH;
|
||||
}
|
||||
numStep = (int)(logf((float) maxNumCells / (5.0f)) / logf( step )) + 1;
|
||||
|
||||
|
||||
allocFeaturePyramidObject(maps, numStep + LAMBDA);
|
||||
|
||||
getPathOfFeaturePyramid(imgResize, step , LAMBDA, 0,
|
||||
getPathOfFeaturePyramid(imgResize, step , LAMBDA, 0,
|
||||
SIDE_LENGTH / 2, maps);
|
||||
getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,
|
||||
getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,
|
||||
SIDE_LENGTH , maps);
|
||||
|
||||
|
||||
if(image->depth != IPL_DEPTH_32F)
|
||||
{
|
||||
cvReleaseImage(&imgResize);
|
||||
|
@@ -1,14 +1,14 @@
|
||||
#include "precomp.hpp"
|
||||
#include "_lsvm_fft.h"
|
||||
|
||||
int getEntireRes(int number, int divisor, int *entire, int *res)
|
||||
{
|
||||
*entire = number / divisor;
|
||||
*res = number % divisor;
|
||||
return FFT_OK;
|
||||
}
|
||||
// static int getEntireRes(int number, int divisor, int *entire, int *res)
|
||||
// {
|
||||
// *entire = number / divisor;
|
||||
// *res = number % divisor;
|
||||
// return FFT_OK;
|
||||
// }
|
||||
|
||||
int getMultipliers(int n, int *n1, int *n2)
|
||||
static int getMultipliers(int n, int *n1, int *n2)
|
||||
{
|
||||
int multiplier, i;
|
||||
if (n == 1)
|
||||
@@ -36,13 +36,13 @@ int getMultipliers(int n, int *n1, int *n2)
|
||||
// 1-dimensional FFT
|
||||
//
|
||||
// API
|
||||
// int fft(float *x_in, float *x_out, int n, int shift);
|
||||
// int fft(float *x_in, float *x_out, int n, int shift);
|
||||
// INPUT
|
||||
// x_in - input signal
|
||||
// n - number of elements for searching Fourier image
|
||||
// shift - shift between input elements
|
||||
// OUTPUT
|
||||
// x_out - output signal (contains 2n elements in order
|
||||
// x_out - output signal (contains 2n elements in order
|
||||
Re(x_in[0]), Im(x_in[0]), Re(x_in[1]), Im(x_in[1]) and etc.)
|
||||
// RESULT
|
||||
// Error status
|
||||
@@ -107,8 +107,8 @@ int fft(float *x_in, float *x_out, int n, int shift)
|
||||
// API
|
||||
// int fftInverse(float *x_in, float *x_out, int n, int shift);
|
||||
// INPUT
|
||||
// x_in - Fourier image of 1d input signal(contains 2n elements
|
||||
in order Re(x_in[0]), Im(x_in[0]),
|
||||
// x_in - Fourier image of 1d input signal(contains 2n elements
|
||||
in order Re(x_in[0]), Im(x_in[0]),
|
||||
Re(x_in[1]), Im(x_in[1]) and etc.)
|
||||
// n - number of elements for searching counter FFT image
|
||||
// shift - shift between input elements
|
||||
@@ -180,7 +180,7 @@ int fftInverse(float *x_in, float *x_out, int n, int shift)
|
||||
// numColls - number of collumns
|
||||
// OUTPUT
|
||||
// x_out - output signal (contains (2 * numRows * numColls) elements
|
||||
in order Re(x_in[0][0]), Im(x_in[0][0]),
|
||||
in order Re(x_in[0][0]), Im(x_in[0][0]),
|
||||
Re(x_in[0][1]), Im(x_in[0][1]) and etc.)
|
||||
// RESULT
|
||||
// Error status
|
||||
@@ -193,14 +193,14 @@ int fft2d(float *x_in, float *x_out, int numRows, int numColls)
|
||||
x_outTmp = (float *)malloc(sizeof(float) * (2 * size));
|
||||
for (i = 0; i < numRows; i++)
|
||||
{
|
||||
fft(x_in + i * 2 * numColls,
|
||||
fft(x_in + i * 2 * numColls,
|
||||
x_outTmp + i * 2 * numColls,
|
||||
numColls, 2);
|
||||
}
|
||||
for (i = 0; i < numColls; i++)
|
||||
{
|
||||
fft(x_outTmp + 2 * i,
|
||||
x_out + 2 * i,
|
||||
fft(x_outTmp + 2 * i,
|
||||
x_out + 2 * i,
|
||||
numRows, 2 * numColls);
|
||||
}
|
||||
free(x_outTmp);
|
||||
@@ -213,8 +213,8 @@ int fft2d(float *x_in, float *x_out, int numRows, int numColls)
|
||||
// API
|
||||
// int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls);
|
||||
// INPUT
|
||||
// x_in - Fourier image of matrix (contains (2 * numRows * numColls)
|
||||
elements in order Re(x_in[0][0]), Im(x_in[0][0]),
|
||||
// x_in - Fourier image of matrix (contains (2 * numRows * numColls)
|
||||
elements in order Re(x_in[0][0]), Im(x_in[0][0]),
|
||||
Re(x_in[0][1]), Im(x_in[0][1]) and etc.)
|
||||
// numRows - number of rows
|
||||
// numColls - number of collumns
|
||||
@@ -237,8 +237,8 @@ int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls)
|
||||
}
|
||||
for (i = 0; i < numColls; i++)
|
||||
{
|
||||
fftInverse(x_outTmp + 2 * i,
|
||||
x_out + 2 * i,
|
||||
fftInverse(x_outTmp + 2 * i,
|
||||
x_out + 2 * i,
|
||||
numRows, 2 * numColls);
|
||||
}
|
||||
free(x_outTmp);
|
||||
|
@@ -653,7 +653,7 @@ double icvEvalHidHaarClassifier( CvHidHaarClassifier* classifier,
|
||||
}
|
||||
|
||||
|
||||
CV_IMPL int
|
||||
static int
|
||||
cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
|
||||
CvPoint pt, double& stage_sum, int start_stage )
|
||||
{
|
||||
@@ -759,7 +759,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
|
||||
sum += calc_sum(node->feature.rect[1],p_offset) * node->feature.rect[1].weight;
|
||||
if( node->feature.rect[2].p0 )
|
||||
sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
|
||||
|
||||
|
||||
stage_sum += classifier->alpha[sum >= t];
|
||||
#else
|
||||
// ayasin - NHM perf optim. Avoid use of costly flaky jcc
|
||||
@@ -771,7 +771,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
|
||||
if( node->feature.rect[2].p0 )
|
||||
_sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
|
||||
__m128d sum = _mm_set_sd(_sum);
|
||||
|
||||
|
||||
t = _mm_cmpgt_sd(t, sum);
|
||||
stage_sum = _mm_add_sd(stage_sum, _mm_blendv_pd(b, a, t));
|
||||
#endif
|
||||
@@ -823,7 +823,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
HaarDetectObjects_ScaleImage_Invoker( const CvHaarClassifierCascade* _cascade,
|
||||
int _stripSize, double _factor,
|
||||
const Mat& _sum1, const Mat& _sqsum1, Mat* _norm1,
|
||||
Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
|
||||
Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
|
||||
std::vector<int>& _levels, std::vector<double>& _weights,
|
||||
bool _outputLevels )
|
||||
{
|
||||
@@ -839,19 +839,19 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
rejectLevels = _outputLevels ? &_levels : 0;
|
||||
levelWeights = _outputLevels ? &_weights : 0;
|
||||
}
|
||||
|
||||
|
||||
void operator()( const BlockedRange& range ) const
|
||||
{
|
||||
Size winSize0 = cascade->orig_window_size;
|
||||
Size winSize(cvRound(winSize0.width*factor), cvRound(winSize0.height*factor));
|
||||
int y1 = range.begin()*stripSize, y2 = min(range.end()*stripSize, sum1.rows - 1 - winSize0.height);
|
||||
|
||||
|
||||
if (y2 <= y1 || sum1.cols <= 1 + winSize0.width)
|
||||
return;
|
||||
|
||||
|
||||
Size ssz(sum1.cols - 1 - winSize0.width, y2 - y1);
|
||||
int x, y, ystep = factor > 2 ? 1 : 2;
|
||||
|
||||
|
||||
#ifdef HAVE_IPP
|
||||
if( cascade->hid_cascade->ipp_stages )
|
||||
{
|
||||
@@ -860,7 +860,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
sqsum1.ptr<double>(y1), sqsum1.step,
|
||||
norm1->ptr<float>(y1), norm1->step,
|
||||
ippiSize(ssz.width, ssz.height), iequRect );
|
||||
|
||||
|
||||
int positive = (ssz.width/ystep)*((ssz.height + ystep-1)/ystep);
|
||||
|
||||
if( ystep == 1 )
|
||||
@@ -870,12 +870,12 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
{
|
||||
uchar* mask1row = mask1->ptr(y);
|
||||
memset( mask1row, 0, ssz.width );
|
||||
|
||||
|
||||
if( y % ystep == 0 )
|
||||
for( x = 0; x < ssz.width; x += ystep )
|
||||
mask1row[x] = (uchar)1;
|
||||
}
|
||||
|
||||
|
||||
for( int j = 0; j < cascade->count; j++ )
|
||||
{
|
||||
if( ippiApplyHaarClassifier_32f_C1R(
|
||||
@@ -889,7 +889,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
if( positive <= 0 )
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if( positive > 0 )
|
||||
for( y = y1; y < y2; y += ystep )
|
||||
{
|
||||
@@ -929,11 +929,11 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
{
|
||||
if( result > 0 )
|
||||
vec->push_back(Rect(cvRound(x*factor), cvRound(y*factor),
|
||||
winSize.width, winSize.height));
|
||||
winSize.width, winSize.height));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const CvHaarClassifierCascade* cascade;
|
||||
int stripSize;
|
||||
double factor;
|
||||
@@ -943,7 +943,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
|
||||
std::vector<int>* rejectLevels;
|
||||
std::vector<double>* levelWeights;
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct HaarDetectObjects_ScaleCascade_Invoker
|
||||
{
|
||||
@@ -960,7 +960,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
|
||||
p = _p; pq = _pq;
|
||||
vec = &_vec;
|
||||
}
|
||||
|
||||
|
||||
void operator()( const BlockedRange& range ) const
|
||||
{
|
||||
int iy, startY = range.begin(), endY = range.end();
|
||||
@@ -968,14 +968,14 @@ struct HaarDetectObjects_ScaleCascade_Invoker
|
||||
const int *pq0 = pq[0], *pq1 = pq[1], *pq2 = pq[2], *pq3 = pq[3];
|
||||
bool doCannyPruning = p0 != 0;
|
||||
int sstep = (int)(sumstep/sizeof(p0[0]));
|
||||
|
||||
|
||||
for( iy = startY; iy < endY; iy++ )
|
||||
{
|
||||
int ix, y = cvRound(iy*ystep), ixstep = 1;
|
||||
for( ix = xrange.start; ix < xrange.end; ix += ixstep )
|
||||
{
|
||||
int x = cvRound(ix*ystep); // it should really be ystep, not ixstep
|
||||
|
||||
|
||||
if( doCannyPruning )
|
||||
{
|
||||
int offset = y*sstep + x;
|
||||
@@ -987,7 +987,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int result = cvRunHaarClassifierCascade( cascade, cvPoint(x, y), 0 );
|
||||
if( result > 0 )
|
||||
vec->push_back(Rect(x, y, winsize.width, winsize.height));
|
||||
@@ -995,7 +995,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const CvHaarClassifierCascade* cascade;
|
||||
double ystep;
|
||||
size_t sumstep;
|
||||
@@ -1005,16 +1005,16 @@ struct HaarDetectObjects_ScaleCascade_Invoker
|
||||
const int** pq;
|
||||
ConcurrentRectVector* vec;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
CvSeq*
|
||||
cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
||||
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
|
||||
double scaleFactor, int minNeighbors, int flags,
|
||||
double scaleFactor, int minNeighbors, int flags,
|
||||
CvSize minSize, CvSize maxSize, bool outputRejectLevels )
|
||||
{
|
||||
const double GROUP_EPS = 0.2;
|
||||
@@ -1044,13 +1044,13 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
|
||||
if( CV_MAT_DEPTH(img->type) != CV_8U )
|
||||
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );
|
||||
|
||||
|
||||
if( scaleFactor <= 1 )
|
||||
CV_Error( CV_StsOutOfRange, "scale factor must be > 1" );
|
||||
|
||||
if( findBiggestObject )
|
||||
flags &= ~CV_HAAR_SCALE_IMAGE;
|
||||
|
||||
|
||||
if( maxSize.height == 0 || maxSize.width == 0 )
|
||||
{
|
||||
maxSize.height = img->rows;
|
||||
@@ -1132,7 +1132,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
#else
|
||||
const int stripCount = 1;
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_IPP
|
||||
if( use_ipp )
|
||||
{
|
||||
@@ -1141,8 +1141,8 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
}
|
||||
else
|
||||
#endif
|
||||
cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
|
||||
|
||||
cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
|
||||
|
||||
cv::Mat _norm1(&norm1), _mask1(&mask1);
|
||||
cv::parallel_for(cv::BlockedRange(0, stripCount),
|
||||
cv::HaarDetectObjects_ScaleImage_Invoker(cascade,
|
||||
@@ -1242,22 +1242,22 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
{
|
||||
rectList.resize(allCandidates.size());
|
||||
std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
|
||||
|
||||
|
||||
groupRectangles(rectList, std::max(minNeighbors, 1), GROUP_EPS);
|
||||
|
||||
|
||||
if( !rectList.empty() )
|
||||
{
|
||||
size_t i, sz = rectList.size();
|
||||
cv::Rect maxRect;
|
||||
|
||||
|
||||
for( i = 0; i < sz; i++ )
|
||||
{
|
||||
if( rectList[i].area() > maxRect.area() )
|
||||
maxRect = rectList[i];
|
||||
}
|
||||
|
||||
|
||||
allCandidates.push_back(maxRect);
|
||||
|
||||
|
||||
scanROI = maxRect;
|
||||
int dx = cvRound(maxRect.width*GROUP_EPS);
|
||||
int dy = cvRound(maxRect.height*GROUP_EPS);
|
||||
@@ -1265,7 +1265,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
scanROI.y = std::max(scanROI.y - dy, 0);
|
||||
scanROI.width = std::min(scanROI.width + dx*2, img->cols-1-scanROI.x);
|
||||
scanROI.height = std::min(scanROI.height + dy*2, img->rows-1-scanROI.y);
|
||||
|
||||
|
||||
double minScale = roughSearch ? 0.6 : 0.4;
|
||||
minSize.width = cvRound(maxRect.width*minScale);
|
||||
minSize.height = cvRound(maxRect.height*minScale);
|
||||
@@ -1277,7 +1277,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
rectList.resize(allCandidates.size());
|
||||
if(!allCandidates.empty())
|
||||
std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
|
||||
|
||||
|
||||
if( minNeighbors != 0 || findBiggestObject )
|
||||
{
|
||||
if( outputRejectLevels )
|
||||
@@ -1291,11 +1291,11 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
}
|
||||
else
|
||||
rweights.resize(rectList.size(),0);
|
||||
|
||||
|
||||
if( findBiggestObject && rectList.size() )
|
||||
{
|
||||
CvAvgComp result_comp = {{0,0,0,0},0};
|
||||
|
||||
|
||||
for( size_t i = 0; i < rectList.size(); i++ )
|
||||
{
|
||||
cv::Rect r = rectList[i];
|
||||
@@ -1322,14 +1322,14 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
|
||||
}
|
||||
|
||||
CV_IMPL CvSeq*
|
||||
cvHaarDetectObjects( const CvArr* _img,
|
||||
cvHaarDetectObjects( const CvArr* _img,
|
||||
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
|
||||
double scaleFactor,
|
||||
int minNeighbors, int flags, CvSize minSize, CvSize maxSize )
|
||||
{
|
||||
std::vector<int> fakeLevels;
|
||||
std::vector<double> fakeWeights;
|
||||
return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
|
||||
return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
|
||||
scaleFactor, minNeighbors, flags, minSize, maxSize, false );
|
||||
|
||||
}
|
||||
@@ -2091,7 +2091,7 @@ namespace cv
|
||||
HaarClassifierCascade::HaarClassifierCascade() {}
|
||||
HaarClassifierCascade::HaarClassifierCascade(const String& filename)
|
||||
{ load(filename); }
|
||||
|
||||
|
||||
bool HaarClassifierCascade::load(const String& filename)
|
||||
{
|
||||
cascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
|
||||
|
@@ -456,7 +456,6 @@ void HOGCache::init(const HOGDescriptor* _descriptor,
|
||||
Size blockSize = descriptor->blockSize;
|
||||
Size blockStride = descriptor->blockStride;
|
||||
Size cellSize = descriptor->cellSize;
|
||||
Size winSize = descriptor->winSize;
|
||||
int i, j, nbins = descriptor->nbins;
|
||||
int rawBlockSize = blockSize.width*blockSize.height;
|
||||
|
||||
@@ -471,10 +470,10 @@ void HOGCache::init(const HOGDescriptor* _descriptor,
|
||||
(winSize.height/cacheStride.height)+1);
|
||||
blockCache.create(cacheSize.height, cacheSize.width*blockHistogramSize);
|
||||
blockCacheFlags.create(cacheSize);
|
||||
size_t i, cacheRows = blockCache.rows;
|
||||
size_t cacheRows = blockCache.rows;
|
||||
ymaxCached.resize(cacheRows);
|
||||
for( i = 0; i < cacheRows; i++ )
|
||||
ymaxCached[i] = -1;
|
||||
for(size_t ii = 0; ii < cacheRows; ii++ )
|
||||
ymaxCached[ii] = -1;
|
||||
}
|
||||
|
||||
Mat_<float> weights(blockSize);
|
||||
|
@@ -3,11 +3,11 @@
|
||||
#include "_lsvm_matching.h"
|
||||
|
||||
/*
|
||||
// Transformation filter displacement from the block space
|
||||
// Transformation filter displacement from the block space
|
||||
// to the space of pixels at the initial image
|
||||
//
|
||||
// API
|
||||
// int convertPoints(int countLevel, CvPoint *points, int *levels,
|
||||
// int convertPoints(int countLevel, CvPoint *points, int *levels,
|
||||
CvPoint **partsDisplacement, int kPoints, int n);
|
||||
// INPUT
|
||||
// countLevel - the number of levels in the feature pyramid
|
||||
@@ -25,10 +25,10 @@
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int convertPoints(int /*countLevel*/, int lambda,
|
||||
int convertPoints(int /*countLevel*/, int lambda,
|
||||
int initialImageLevel,
|
||||
CvPoint *points, int *levels,
|
||||
CvPoint **partsDisplacement, int kPoints, int n,
|
||||
CvPoint *points, int *levels,
|
||||
CvPoint **partsDisplacement, int kPoints, int n,
|
||||
int maxXBorder,
|
||||
int maxYBorder)
|
||||
{
|
||||
@@ -37,7 +37,7 @@ int convertPoints(int /*countLevel*/, int lambda,
|
||||
step = powf( 2.0f, 1.0f / ((float)lambda) );
|
||||
|
||||
computeBorderSize(maxXBorder, maxYBorder, &bx, &by);
|
||||
|
||||
|
||||
for (i = 0; i < kPoints; i++)
|
||||
{
|
||||
// scaling factor for root filter
|
||||
@@ -48,10 +48,10 @@ int convertPoints(int /*countLevel*/, int lambda,
|
||||
// scaling factor for part filters
|
||||
scale = SIDE_LENGTH * powf(step, (float)(levels[i] - lambda - initialImageLevel));
|
||||
for (j = 0; j < n; j++)
|
||||
{
|
||||
partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x -
|
||||
{
|
||||
partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x -
|
||||
2 * bx + 1) * scale);
|
||||
partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y -
|
||||
partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y -
|
||||
2 * by + 1) * scale);
|
||||
}
|
||||
}
|
||||
@@ -62,7 +62,7 @@ int convertPoints(int /*countLevel*/, int lambda,
|
||||
// Elimination boxes that are outside the image boudaries
|
||||
//
|
||||
// API
|
||||
// int clippingBoxes(int width, int height,
|
||||
// int clippingBoxes(int width, int height,
|
||||
CvPoint *points, int kPoints);
|
||||
// INPUT
|
||||
// width - image wediht
|
||||
@@ -72,12 +72,12 @@ int convertPoints(int /*countLevel*/, int lambda,
|
||||
// kPoints - points number
|
||||
// OUTPUT
|
||||
// points - updated points (if coordinates less than zero then
|
||||
set zero coordinate, if coordinates more than image
|
||||
set zero coordinate, if coordinates more than image
|
||||
size then set coordinates equal image size)
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int clippingBoxes(int width, int height,
|
||||
int clippingBoxes(int width, int height,
|
||||
CvPoint *points, int kPoints)
|
||||
{
|
||||
int i;
|
||||
@@ -111,7 +111,7 @@ int clippingBoxes(int width, int height,
|
||||
int maxXBorder, int maxYBorder);
|
||||
|
||||
// INPUT
|
||||
// image - initial image
|
||||
// image - initial image
|
||||
// maxXBorder - the largest root filter size (X-direction)
|
||||
// maxYBorder - the largest root filter size (Y-direction)
|
||||
// OUTPUT
|
||||
@@ -149,54 +149,54 @@ CvLSVMFeaturePyramid* createFeaturePyramidWithBorder(IplImage *image,
|
||||
// Computation of the root filter displacement and values of score function
|
||||
//
|
||||
// API
|
||||
// int searchObject(const featurePyramid *H, const filterObject **all_F, int n,
|
||||
float b,
|
||||
// int searchObject(const featurePyramid *H, const filterObject **all_F, int n,
|
||||
float b,
|
||||
int maxXBorder,
|
||||
int maxYBorder,
|
||||
int maxYBorder,
|
||||
CvPoint **points, int **levels, int *kPoints, float *score,
|
||||
CvPoint ***partsDisplacement);
|
||||
// INPUT
|
||||
// image - initial image for searhing object
|
||||
// all_F - the set of filters (the first element is root filter,
|
||||
// all_F - the set of filters (the first element is root filter,
|
||||
other elements - part filters)
|
||||
// n - the number of part filters
|
||||
// b - linear term of the score function
|
||||
// maxXBorder - the largest root filter size (X-direction)
|
||||
// maxYBorder - the largest root filter size (Y-direction)
|
||||
// OUTPUT
|
||||
// points - positions (x, y) of the upper-left corner
|
||||
// points - positions (x, y) of the upper-left corner
|
||||
of root filter frame
|
||||
// levels - levels that correspond to each position
|
||||
// kPoints - number of positions
|
||||
// score - value of the score function
|
||||
// partsDisplacement - part filters displacement for each position
|
||||
// partsDisplacement - part filters displacement for each position
|
||||
of the root filter
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
|
||||
int n, float b,
|
||||
int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
|
||||
int n, float b,
|
||||
int maxXBorder,
|
||||
int maxYBorder,
|
||||
int maxYBorder,
|
||||
CvPoint **points, int **levels, int *kPoints, float *score,
|
||||
CvPoint ***partsDisplacement)
|
||||
{
|
||||
int opResult;
|
||||
|
||||
// Matching
|
||||
opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,
|
||||
score, points, levels,
|
||||
opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,
|
||||
score, points, levels,
|
||||
kPoints, partsDisplacement);
|
||||
if (opResult != LATENT_SVM_OK)
|
||||
{
|
||||
return LATENT_SVM_SEARCH_OBJECT_FAILED;
|
||||
}
|
||||
|
||||
// Transformation filter displacement from the block space
|
||||
|
||||
// Transformation filter displacement from the block space
|
||||
// to the space of pixels at the initial image
|
||||
// that settles at the level number LAMBDA
|
||||
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
|
||||
(*levels), (*partsDisplacement), (*kPoints), n,
|
||||
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
|
||||
(*levels), (*partsDisplacement), (*kPoints), n,
|
||||
maxXBorder, maxYBorder);
|
||||
|
||||
return LATENT_SVM_OK;
|
||||
@@ -206,7 +206,7 @@ int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F
|
||||
// Computation right bottom corners coordinates of bounding boxes
|
||||
//
|
||||
// API
|
||||
// int estimateBoxes(CvPoint *points, int *levels, int kPoints,
|
||||
// int estimateBoxes(CvPoint *points, int *levels, int kPoints,
|
||||
int sizeX, int sizeY, CvPoint **oppositePoints);
|
||||
// INPUT
|
||||
// points - left top corners coordinates of bounding boxes
|
||||
@@ -217,7 +217,7 @@ int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int estimateBoxes(CvPoint *points, int *levels, int kPoints,
|
||||
static int estimateBoxes(CvPoint *points, int *levels, int kPoints,
|
||||
int sizeX, int sizeY, CvPoint **oppositePoints)
|
||||
{
|
||||
int i;
|
||||
@@ -237,16 +237,16 @@ int estimateBoxes(CvPoint *points, int *levels, int kPoints,
|
||||
// Computation of the root filter displacement and values of score function
|
||||
//
|
||||
// API
|
||||
// int searchObjectThreshold(const featurePyramid *H,
|
||||
// int searchObjectThreshold(const featurePyramid *H,
|
||||
const filterObject **all_F, int n,
|
||||
float b,
|
||||
int maxXBorder, int maxYBorder,
|
||||
float b,
|
||||
int maxXBorder, int maxYBorder,
|
||||
float scoreThreshold,
|
||||
CvPoint **points, int **levels, int *kPoints,
|
||||
CvPoint **points, int **levels, int *kPoints,
|
||||
float **score, CvPoint ***partsDisplacement);
|
||||
// INPUT
|
||||
// H - feature pyramid
|
||||
// all_F - the set of filters (the first element is root filter,
|
||||
// all_F - the set of filters (the first element is root filter,
|
||||
other elements - part filters)
|
||||
// n - the number of part filters
|
||||
// b - linear term of the score function
|
||||
@@ -254,22 +254,22 @@ int estimateBoxes(CvPoint *points, int *levels, int kPoints,
|
||||
// maxYBorder - the largest root filter size (Y-direction)
|
||||
// scoreThreshold - score threshold
|
||||
// OUTPUT
|
||||
// points - positions (x, y) of the upper-left corner
|
||||
// points - positions (x, y) of the upper-left corner
|
||||
of root filter frame
|
||||
// levels - levels that correspond to each position
|
||||
// kPoints - number of positions
|
||||
// score - values of the score function
|
||||
// partsDisplacement - part filters displacement for each position
|
||||
// partsDisplacement - part filters displacement for each position
|
||||
of the root filter
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
|
||||
int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
|
||||
const CvLSVMFilterObject **all_F, int n,
|
||||
float b,
|
||||
int maxXBorder, int maxYBorder,
|
||||
float b,
|
||||
int maxXBorder, int maxYBorder,
|
||||
float scoreThreshold,
|
||||
CvPoint **points, int **levels, int *kPoints,
|
||||
CvPoint **points, int **levels, int *kPoints,
|
||||
float **score, CvPoint ***partsDisplacement,
|
||||
int numThreads)
|
||||
{
|
||||
@@ -284,28 +284,28 @@ int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
|
||||
return opResult;
|
||||
}
|
||||
opResult = tbbThresholdFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,
|
||||
scoreThreshold, numThreads, score,
|
||||
points, levels, kPoints,
|
||||
scoreThreshold, numThreads, score,
|
||||
points, levels, kPoints,
|
||||
partsDisplacement);
|
||||
#else
|
||||
opResult = thresholdFunctionalScore(all_F, n, H, b,
|
||||
maxXBorder, maxYBorder,
|
||||
scoreThreshold,
|
||||
score, points, levels,
|
||||
opResult = thresholdFunctionalScore(all_F, n, H, b,
|
||||
maxXBorder, maxYBorder,
|
||||
scoreThreshold,
|
||||
score, points, levels,
|
||||
kPoints, partsDisplacement);
|
||||
|
||||
(void)numThreads;
|
||||
(void)numThreads;
|
||||
#endif
|
||||
if (opResult != LATENT_SVM_OK)
|
||||
{
|
||||
return LATENT_SVM_SEARCH_OBJECT_FAILED;
|
||||
}
|
||||
|
||||
// Transformation filter displacement from the block space
|
||||
}
|
||||
|
||||
// Transformation filter displacement from the block space
|
||||
// to the space of pixels at the initial image
|
||||
// that settles at the level number LAMBDA
|
||||
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
|
||||
(*levels), (*partsDisplacement), (*kPoints), n,
|
||||
convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),
|
||||
(*levels), (*partsDisplacement), (*kPoints), n,
|
||||
maxXBorder, maxYBorder);
|
||||
|
||||
return LATENT_SVM_OK;
|
||||
@@ -350,9 +350,9 @@ int getOppositePoint(CvPoint point,
|
||||
//
|
||||
// API
|
||||
// int showRootFilterBoxes(const IplImage *image,
|
||||
const filterObject *filter,
|
||||
const filterObject *filter,
|
||||
CvPoint *points, int *levels, int kPoints,
|
||||
CvScalar color, int thickness,
|
||||
CvScalar color, int thickness,
|
||||
int line_type, int shift);
|
||||
// INPUT
|
||||
// image - initial image
|
||||
@@ -370,22 +370,22 @@ int getOppositePoint(CvPoint point,
|
||||
// Error status
|
||||
*/
|
||||
int showRootFilterBoxes(IplImage *image,
|
||||
const CvLSVMFilterObject *filter,
|
||||
const CvLSVMFilterObject *filter,
|
||||
CvPoint *points, int *levels, int kPoints,
|
||||
CvScalar color, int thickness,
|
||||
CvScalar color, int thickness,
|
||||
int line_type, int shift)
|
||||
{
|
||||
{
|
||||
int i;
|
||||
float step;
|
||||
CvPoint oppositePoint;
|
||||
step = powf( 2.0f, 1.0f / ((float)LAMBDA));
|
||||
|
||||
|
||||
for (i = 0; i < kPoints; i++)
|
||||
{
|
||||
// Drawing rectangle for filter
|
||||
getOppositePoint(points[i], filter->sizeX, filter->sizeY,
|
||||
getOppositePoint(points[i], filter->sizeX, filter->sizeY,
|
||||
step, levels[i] - LAMBDA, &oppositePoint);
|
||||
cvRectangle(image, points[i], oppositePoint,
|
||||
cvRectangle(image, points[i], oppositePoint,
|
||||
color, thickness, line_type, shift);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
@@ -399,9 +399,9 @@ int showRootFilterBoxes(IplImage *image,
|
||||
//
|
||||
// API
|
||||
// int showPartFilterBoxes(const IplImage *image,
|
||||
const filterObject *filter,
|
||||
const filterObject *filter,
|
||||
CvPoint *points, int *levels, int kPoints,
|
||||
CvScalar color, int thickness,
|
||||
CvScalar color, int thickness,
|
||||
int line_type, int shift);
|
||||
// INPUT
|
||||
// image - initial image
|
||||
@@ -421,9 +421,9 @@ int showRootFilterBoxes(IplImage *image,
|
||||
*/
|
||||
int showPartFilterBoxes(IplImage *image,
|
||||
const CvLSVMFilterObject **filters,
|
||||
int n, CvPoint **partsDisplacement,
|
||||
int n, CvPoint **partsDisplacement,
|
||||
int *levels, int kPoints,
|
||||
CvScalar color, int thickness,
|
||||
CvScalar color, int thickness,
|
||||
int line_type, int shift)
|
||||
{
|
||||
int i, j;
|
||||
@@ -437,10 +437,10 @@ int showPartFilterBoxes(IplImage *image,
|
||||
for (j = 0; j < n; j++)
|
||||
{
|
||||
// Drawing rectangles for part filters
|
||||
getOppositePoint(partsDisplacement[i][j],
|
||||
filters[j + 1]->sizeX, filters[j + 1]->sizeY,
|
||||
getOppositePoint(partsDisplacement[i][j],
|
||||
filters[j + 1]->sizeX, filters[j + 1]->sizeY,
|
||||
step, levels[i] - 2 * LAMBDA, &oppositePoint);
|
||||
cvRectangle(image, partsDisplacement[i][j], oppositePoint,
|
||||
cvRectangle(image, partsDisplacement[i][j], oppositePoint,
|
||||
color, thickness, line_type, shift);
|
||||
}
|
||||
}
|
||||
@@ -454,8 +454,8 @@ int showPartFilterBoxes(IplImage *image,
|
||||
// Drawing boxes
|
||||
//
|
||||
// API
|
||||
// int showBoxes(const IplImage *img,
|
||||
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
|
||||
// int showBoxes(const IplImage *img,
|
||||
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
|
||||
CvScalar color, int thickness, int line_type, int shift);
|
||||
// INPUT
|
||||
// img - initial image
|
||||
@@ -470,14 +470,14 @@ int showPartFilterBoxes(IplImage *image,
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int showBoxes(IplImage *img,
|
||||
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
|
||||
int showBoxes(IplImage *img,
|
||||
const CvPoint *points, const CvPoint *oppositePoints, int kPoints,
|
||||
CvScalar color, int thickness, int line_type, int shift)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < kPoints; i++)
|
||||
{
|
||||
cvRectangle(img, points[i], oppositePoints[i],
|
||||
cvRectangle(img, points[i], oppositePoints[i],
|
||||
color, thickness, line_type, shift);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
@@ -491,10 +491,10 @@ int showBoxes(IplImage *img,
|
||||
//
|
||||
// API
|
||||
// int getMaxFilterDims(const filterObject **filters, int kComponents,
|
||||
const int *kPartFilters,
|
||||
const int *kPartFilters,
|
||||
unsigned int *maxXBorder, unsigned int *maxYBorder);
|
||||
// INPUT
|
||||
// filters - a set of filters (at first root filter, then part filters
|
||||
// filters - a set of filters (at first root filter, then part filters
|
||||
and etc. for all components)
|
||||
// kComponents - number of components
|
||||
// kPartFilters - number of part filters for each component
|
||||
@@ -505,10 +505,10 @@ int showBoxes(IplImage *img,
|
||||
// Error status
|
||||
*/
|
||||
int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
|
||||
const int *kPartFilters,
|
||||
const int *kPartFilters,
|
||||
unsigned int *maxXBorder, unsigned int *maxYBorder)
|
||||
{
|
||||
int i, componentIndex;
|
||||
int i, componentIndex;
|
||||
*maxXBorder = filters[0]->sizeX;
|
||||
*maxYBorder = filters[0]->sizeY;
|
||||
componentIndex = kPartFilters[0] + 1;
|
||||
@@ -532,7 +532,7 @@ int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
|
||||
//
|
||||
// API
|
||||
// int searchObjectThresholdSomeComponents(const featurePyramid *H,
|
||||
const filterObject **filters,
|
||||
const filterObject **filters,
|
||||
int kComponents, const int *kPartFilters,
|
||||
const float *b, float scoreThreshold,
|
||||
CvPoint **points, CvPoint **oppPoints,
|
||||
@@ -553,20 +553,20 @@ int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
|
||||
// Error status
|
||||
*/
|
||||
int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
|
||||
const CvLSVMFilterObject **filters,
|
||||
const CvLSVMFilterObject **filters,
|
||||
int kComponents, const int *kPartFilters,
|
||||
const float *b, float scoreThreshold,
|
||||
CvPoint **points, CvPoint **oppPoints,
|
||||
float **score, int *kPoints,
|
||||
int numThreads)
|
||||
{
|
||||
int error = 0;
|
||||
//int error = 0;
|
||||
int i, j, s, f, componentIndex;
|
||||
unsigned int maxXBorder, maxYBorder;
|
||||
CvPoint **pointsArr, **oppPointsArr, ***partsDisplacementArr;
|
||||
float **scoreArr;
|
||||
int *kPointsArr, **levelsArr;
|
||||
|
||||
|
||||
// Allocation memory
|
||||
pointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);
|
||||
oppPointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);
|
||||
@@ -574,18 +574,18 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
|
||||
kPointsArr = (int *)malloc(sizeof(int) * kComponents);
|
||||
levelsArr = (int **)malloc(sizeof(int *) * kComponents);
|
||||
partsDisplacementArr = (CvPoint ***)malloc(sizeof(CvPoint **) * kComponents);
|
||||
|
||||
|
||||
// Getting maximum filter dimensions
|
||||
error = getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder);
|
||||
/*error = */getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder);
|
||||
componentIndex = 0;
|
||||
*kPoints = 0;
|
||||
// For each component perform searching
|
||||
for (i = 0; i < kComponents; i++)
|
||||
{
|
||||
#ifdef HAVE_TBB
|
||||
error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],
|
||||
int error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],
|
||||
b[i], maxXBorder, maxYBorder, scoreThreshold,
|
||||
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
|
||||
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
|
||||
&(scoreArr[i]), &(partsDisplacementArr[i]), numThreads);
|
||||
if (error != LATENT_SVM_OK)
|
||||
{
|
||||
@@ -599,17 +599,17 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
|
||||
return LATENT_SVM_SEARCH_OBJECT_FAILED;
|
||||
}
|
||||
#else
|
||||
(void)numThreads;
|
||||
(void)numThreads;
|
||||
searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],
|
||||
b[i], maxXBorder, maxYBorder, scoreThreshold,
|
||||
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
|
||||
b[i], maxXBorder, maxYBorder, scoreThreshold,
|
||||
&(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),
|
||||
&(scoreArr[i]), &(partsDisplacementArr[i]));
|
||||
#endif
|
||||
estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i],
|
||||
filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));
|
||||
estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i],
|
||||
filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));
|
||||
componentIndex += (kPartFilters[i] + 1);
|
||||
*kPoints += kPointsArr[i];
|
||||
}
|
||||
}
|
||||
|
||||
*points = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));
|
||||
*oppPoints = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));
|
||||
|
@@ -192,7 +192,7 @@ size_t LatentSvmDetector::getClassCount() const
|
||||
return classNames.size();
|
||||
}
|
||||
|
||||
string extractModelName( const string& filename )
|
||||
static string extractModelName( const string& filename )
|
||||
{
|
||||
size_t startPos = filename.rfind('/');
|
||||
if( startPos == string::npos )
|
||||
|
@@ -91,7 +91,7 @@ void Feature::write(FileStorage& fs) const
|
||||
*
|
||||
* \return The bounding box of all the templates in original image coordinates.
|
||||
*/
|
||||
Rect cropTemplates(std::vector<Template>& templates)
|
||||
static Rect cropTemplates(std::vector<Template>& templates)
|
||||
{
|
||||
int min_x = std::numeric_limits<int>::max();
|
||||
int min_y = std::numeric_limits<int>::max();
|
||||
@@ -113,7 +113,7 @@ Rect cropTemplates(std::vector<Template>& templates)
|
||||
max_y = std::max(max_y, y);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// @todo Why require even min_x, min_y?
|
||||
if (min_x % 2 == 1) --min_x;
|
||||
if (min_y % 2 == 1) --min_y;
|
||||
@@ -126,7 +126,7 @@ Rect cropTemplates(std::vector<Template>& templates)
|
||||
templ.height = (max_y - min_y) >> templ.pyramid_level;
|
||||
int offset_x = min_x >> templ.pyramid_level;
|
||||
int offset_y = min_y >> templ.pyramid_level;
|
||||
|
||||
|
||||
for (int j = 0; j < (int)templ.features.size(); ++j)
|
||||
{
|
||||
templ.features[j].x -= offset_x;
|
||||
@@ -265,7 +265,7 @@ void hysteresisGradient(Mat& magnitude, Mat& angle,
|
||||
* \param threshold Magnitude threshold. Keep only gradients whose norms are
|
||||
* larger than this.
|
||||
*/
|
||||
void quantizedOrientations(const Mat& src, Mat& magnitude,
|
||||
static void quantizedOrientations(const Mat& src, Mat& magnitude,
|
||||
Mat& angle, float threshold)
|
||||
{
|
||||
magnitude.create(src.size(), CV_32F);
|
||||
@@ -383,7 +383,7 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
|
||||
{
|
||||
if (mag_r[c] > threshold)
|
||||
{
|
||||
// Compute histogram of quantized bins in 3x3 patch around pixel
|
||||
// Compute histogram of quantized bins in 3x3 patch around pixel
|
||||
int histogram[8] = {0, 0, 0, 0, 0, 0, 0, 0};
|
||||
|
||||
uchar* patch3x3_row = &quantized_unfiltered(r-1, c-1);
|
||||
@@ -391,17 +391,17 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
|
||||
histogram[patch3x3_row[1]]++;
|
||||
histogram[patch3x3_row[2]]++;
|
||||
|
||||
patch3x3_row += quantized_unfiltered.step1();
|
||||
patch3x3_row += quantized_unfiltered.step1();
|
||||
histogram[patch3x3_row[0]]++;
|
||||
histogram[patch3x3_row[1]]++;
|
||||
histogram[patch3x3_row[2]]++;
|
||||
|
||||
patch3x3_row += quantized_unfiltered.step1();
|
||||
patch3x3_row += quantized_unfiltered.step1();
|
||||
histogram[patch3x3_row[0]]++;
|
||||
histogram[patch3x3_row[1]]++;
|
||||
histogram[patch3x3_row[2]]++;
|
||||
|
||||
// Find bin with the most votes from the patch
|
||||
// Find bin with the most votes from the patch
|
||||
int max_votes = 0;
|
||||
int index = -1;
|
||||
for (int i = 0; i < 8; ++i)
|
||||
@@ -413,10 +413,10 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
|
||||
}
|
||||
}
|
||||
|
||||
// Only accept the quantization if majority of pixels in the patch agree
|
||||
static const int NEIGHBOR_THRESHOLD = 5;
|
||||
// Only accept the quantization if majority of pixels in the patch agree
|
||||
static const int NEIGHBOR_THRESHOLD = 5;
|
||||
if (max_votes >= NEIGHBOR_THRESHOLD)
|
||||
quantized_angle.at<uchar>(r, c) = 1 << index;
|
||||
quantized_angle.at<uchar>(r, c) = uchar(1 << index);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -451,15 +451,15 @@ protected:
|
||||
float strong_threshold;
|
||||
};
|
||||
|
||||
ColorGradientPyramid::ColorGradientPyramid(const Mat& src, const Mat& mask,
|
||||
float weak_threshold, size_t num_features,
|
||||
float strong_threshold)
|
||||
: src(src),
|
||||
mask(mask),
|
||||
ColorGradientPyramid::ColorGradientPyramid(const Mat& _src, const Mat& _mask,
|
||||
float _weak_threshold, size_t _num_features,
|
||||
float _strong_threshold)
|
||||
: src(_src),
|
||||
mask(_mask),
|
||||
pyramid_level(0),
|
||||
weak_threshold(weak_threshold),
|
||||
num_features(num_features),
|
||||
strong_threshold(strong_threshold)
|
||||
weak_threshold(_weak_threshold),
|
||||
num_features(_num_features),
|
||||
strong_threshold(_strong_threshold)
|
||||
{
|
||||
update();
|
||||
}
|
||||
@@ -557,10 +557,10 @@ ColorGradient::ColorGradient()
|
||||
{
|
||||
}
|
||||
|
||||
ColorGradient::ColorGradient(float weak_threshold, size_t num_features, float strong_threshold)
|
||||
: weak_threshold(weak_threshold),
|
||||
num_features(num_features),
|
||||
strong_threshold(strong_threshold)
|
||||
ColorGradient::ColorGradient(float _weak_threshold, size_t _num_features, float _strong_threshold)
|
||||
: weak_threshold(_weak_threshold),
|
||||
num_features(_num_features),
|
||||
strong_threshold(_strong_threshold)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -630,7 +630,7 @@ static void accumBilateral(long delta, long i, long j, long * A, long * b, int t
|
||||
*
|
||||
* \todo Should also need camera model, or at least focal lengths? Replace distance_threshold with mask?
|
||||
*/
|
||||
void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
|
||||
static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
|
||||
int difference_threshold)
|
||||
{
|
||||
dst = Mat::zeros(src.size(), CV_8U);
|
||||
@@ -751,13 +751,13 @@ protected:
|
||||
int extract_threshold;
|
||||
};
|
||||
|
||||
DepthNormalPyramid::DepthNormalPyramid(const Mat& src, const Mat& mask,
|
||||
int distance_threshold, int difference_threshold, size_t num_features,
|
||||
int extract_threshold)
|
||||
: mask(mask),
|
||||
DepthNormalPyramid::DepthNormalPyramid(const Mat& src, const Mat& _mask,
|
||||
int distance_threshold, int difference_threshold, size_t _num_features,
|
||||
int _extract_threshold)
|
||||
: mask(_mask),
|
||||
pyramid_level(0),
|
||||
num_features(num_features),
|
||||
extract_threshold(extract_threshold)
|
||||
num_features(_num_features),
|
||||
extract_threshold(_extract_threshold)
|
||||
{
|
||||
quantizedNormals(src, normal, distance_threshold, difference_threshold);
|
||||
}
|
||||
@@ -876,12 +876,12 @@ DepthNormal::DepthNormal()
|
||||
{
|
||||
}
|
||||
|
||||
DepthNormal::DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,
|
||||
int extract_threshold)
|
||||
: distance_threshold(distance_threshold),
|
||||
difference_threshold(difference_threshold),
|
||||
num_features(num_features),
|
||||
extract_threshold(extract_threshold)
|
||||
DepthNormal::DepthNormal(int _distance_threshold, int _difference_threshold, size_t _num_features,
|
||||
int _extract_threshold)
|
||||
: distance_threshold(_distance_threshold),
|
||||
difference_threshold(_difference_threshold),
|
||||
num_features(_num_features),
|
||||
extract_threshold(_extract_threshold)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -923,7 +923,7 @@ void DepthNormal::write(FileStorage& fs) const
|
||||
* Response maps *
|
||||
\****************************************************************************************/
|
||||
|
||||
void orUnaligned8u(const uchar * src, const int src_stride,
|
||||
static void orUnaligned8u(const uchar * src, const int src_stride,
|
||||
uchar * dst, const int dst_stride,
|
||||
const int width, const int height)
|
||||
{
|
||||
@@ -971,7 +971,7 @@ void orUnaligned8u(const uchar * src, const int src_stride,
|
||||
__m128i* dst_ptr = reinterpret_cast<__m128i*>(dst + c);
|
||||
*dst_ptr = _mm_or_si128(*dst_ptr, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
for ( ; c < width; ++c)
|
||||
dst[c] |= src[c];
|
||||
@@ -991,7 +991,7 @@ void orUnaligned8u(const uchar * src, const int src_stride,
|
||||
* \param[out] dst Destination 8-bit spread image.
|
||||
* \param T Sampling step. Spread labels T/2 pixels in each direction.
|
||||
*/
|
||||
void spread(const Mat& src, Mat& dst, int T)
|
||||
static void spread(const Mat& src, Mat& dst, int T)
|
||||
{
|
||||
// Allocate and zero-initialize spread (OR'ed) image
|
||||
dst = Mat::zeros(src.size(), CV_8U);
|
||||
@@ -1019,7 +1019,7 @@ CV_DECL_ALIGNED(16) static const unsigned char SIMILARITY_LUT[256] = {0, 4, 3, 4
|
||||
* \param[in] src The source 8-bit spread quantized image.
|
||||
* \param[out] response_maps Vector of 8 response maps, one for each bit label.
|
||||
*/
|
||||
void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
|
||||
static void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
|
||||
{
|
||||
CV_Assert((src.rows * src.cols) % 16 == 0);
|
||||
|
||||
@@ -1027,16 +1027,16 @@ void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
|
||||
response_maps.resize(8);
|
||||
for (int i = 0; i < 8; ++i)
|
||||
response_maps[i].create(src.size(), CV_8U);
|
||||
|
||||
|
||||
Mat lsb4(src.size(), CV_8U);
|
||||
Mat msb4(src.size(), CV_8U);
|
||||
|
||||
|
||||
for (int r = 0; r < src.rows; ++r)
|
||||
{
|
||||
const uchar* src_r = src.ptr(r);
|
||||
uchar* lsb4_r = lsb4.ptr(r);
|
||||
uchar* msb4_r = msb4.ptr(r);
|
||||
|
||||
|
||||
for (int c = 0; c < src.cols; ++c)
|
||||
{
|
||||
// Least significant 4 bits of spread image pixel
|
||||
@@ -1100,7 +1100,7 @@ void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
|
||||
* each of which is a linear memory of length (W/T)*(H/T).
|
||||
* \param T Sampling step.
|
||||
*/
|
||||
void linearize(const Mat& response_map, Mat& linearized, int T)
|
||||
static void linearize(const Mat& response_map, Mat& linearized, int T)
|
||||
{
|
||||
CV_Assert(response_map.rows % T == 0);
|
||||
CV_Assert(response_map.cols % T == 0);
|
||||
@@ -1109,7 +1109,7 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
|
||||
int mem_width = response_map.cols / T;
|
||||
int mem_height = response_map.rows / T;
|
||||
linearized.create(T*T, mem_width * mem_height, CV_8U);
|
||||
|
||||
|
||||
// Outer two for loops iterate over top-left T^2 starting pixels
|
||||
int index = 0;
|
||||
for (int r_start = 0; r_start < T; ++r_start)
|
||||
@@ -1118,7 +1118,7 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
|
||||
{
|
||||
uchar* memory = linearized.ptr(index);
|
||||
++index;
|
||||
|
||||
|
||||
// Inner two loops copy every T-th pixel into the linear memory
|
||||
for (int r = r_start; r < response_map.rows; r += T)
|
||||
{
|
||||
@@ -1134,8 +1134,8 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
|
||||
* Linearized similarities *
|
||||
\****************************************************************************************/
|
||||
|
||||
const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
|
||||
const Feature& f, int T, int W)
|
||||
static const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
|
||||
const Feature& f, int T, int W)
|
||||
{
|
||||
// Retrieve the TxT grid of linear memories associated with the feature label
|
||||
const Mat& memory_grid = linear_memories[f.label];
|
||||
@@ -1170,7 +1170,7 @@ const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
|
||||
* \param size Size (W, H) of the original input image.
|
||||
* \param T Sampling step.
|
||||
*/
|
||||
void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
static void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
Mat& dst, Size size, int T)
|
||||
{
|
||||
// 63 features or less is a special case because the max similarity per-feature is 4.
|
||||
@@ -1252,7 +1252,7 @@ void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
}
|
||||
#endif
|
||||
for ( ; j < template_positions; ++j)
|
||||
dst_ptr[j] += lm_ptr[j];
|
||||
dst_ptr[j] = uchar(dst_ptr[j] + lm_ptr[j]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1266,7 +1266,7 @@ void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
* \param T Sampling step.
|
||||
* \param center Center of the local region.
|
||||
*/
|
||||
void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
static void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
|
||||
Mat& dst, Size size, int T, Point center)
|
||||
{
|
||||
// Similar to whole-image similarity() above. This version takes a position 'center'
|
||||
@@ -1334,7 +1334,7 @@ void similarityLocal(const std::vector<Mat>& linear_memories, const Template& te
|
||||
for (int row = 0; row < 16; ++row)
|
||||
{
|
||||
for (int col = 0; col < 16; ++col)
|
||||
dst_ptr[col] += lm_ptr[col];
|
||||
dst_ptr[col] = uchar(dst_ptr[col] + lm_ptr[col]);
|
||||
dst_ptr += 16;
|
||||
lm_ptr += W;
|
||||
}
|
||||
@@ -1342,7 +1342,7 @@ void similarityLocal(const std::vector<Mat>& linear_memories, const Template& te
|
||||
}
|
||||
}
|
||||
|
||||
void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
|
||||
static void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
|
||||
{
|
||||
const uchar * end = src1 + length;
|
||||
|
||||
@@ -1362,7 +1362,7 @@ void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int
|
||||
* \param[in] similarities Source 8-bit similarity images.
|
||||
* \param[out] dst Destination 16-bit similarity image.
|
||||
*/
|
||||
void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
|
||||
static void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
|
||||
{
|
||||
if (similarities.size() == 1)
|
||||
{
|
||||
@@ -1388,9 +1388,9 @@ Detector::Detector()
|
||||
{
|
||||
}
|
||||
|
||||
Detector::Detector(const std::vector< Ptr<Modality> >& modalities,
|
||||
Detector::Detector(const std::vector< Ptr<Modality> >& _modalities,
|
||||
const std::vector<int>& T_pyramid)
|
||||
: modalities(modalities),
|
||||
: modalities(_modalities),
|
||||
pyramid_levels(static_cast<int>(T_pyramid.size())),
|
||||
T_at_level(T_pyramid)
|
||||
{
|
||||
@@ -1480,7 +1480,7 @@ void Detector::match(const std::vector<Mat>& sources, float threshold, std::vect
|
||||
// Used to filter out weak matches
|
||||
struct MatchPredicate
|
||||
{
|
||||
MatchPredicate(float threshold) : threshold(threshold) {}
|
||||
MatchPredicate(float _threshold) : threshold(_threshold) {}
|
||||
bool operator() (const Match& m) { return m.similarity < threshold; }
|
||||
float threshold;
|
||||
};
|
||||
@@ -1554,13 +1554,13 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
|
||||
int max_x = size.width - tp[start].width - border;
|
||||
int max_y = size.height - tp[start].height - border;
|
||||
|
||||
std::vector<Mat> similarities(modalities.size());
|
||||
Mat total_similarity;
|
||||
std::vector<Mat> similarities2(modalities.size());
|
||||
Mat total_similarity2;
|
||||
for (int m = 0; m < (int)candidates.size(); ++m)
|
||||
{
|
||||
Match& match = candidates[m];
|
||||
int x = match.x * 2 + 1; /// @todo Support other pyramid distance
|
||||
int y = match.y * 2 + 1;
|
||||
Match& match2 = candidates[m];
|
||||
int x = match2.x * 2 + 1; /// @todo Support other pyramid distance
|
||||
int y = match2.y * 2 + 1;
|
||||
|
||||
// Require 8 (reduced) row/cols to the up/left
|
||||
x = std::max(x, border);
|
||||
@@ -1571,22 +1571,22 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
|
||||
y = std::min(y, max_y);
|
||||
|
||||
// Compute local similarity maps for each modality
|
||||
int num_features = 0;
|
||||
int numFeatures = 0;
|
||||
for (int i = 0; i < (int)modalities.size(); ++i)
|
||||
{
|
||||
const Template& templ = tp[start + i];
|
||||
num_features += static_cast<int>(templ.features.size());
|
||||
similarityLocal(lms[i], templ, similarities[i], size, T, Point(x, y));
|
||||
numFeatures += static_cast<int>(templ.features.size());
|
||||
similarityLocal(lms[i], templ, similarities2[i], size, T, Point(x, y));
|
||||
}
|
||||
addSimilarities(similarities, total_similarity);
|
||||
addSimilarities(similarities2, total_similarity2);
|
||||
|
||||
// Find best local adjustment
|
||||
int best_score = 0;
|
||||
int best_r = -1, best_c = -1;
|
||||
for (int r = 0; r < total_similarity.rows; ++r)
|
||||
for (int r = 0; r < total_similarity2.rows; ++r)
|
||||
{
|
||||
ushort* row = total_similarity.ptr<ushort>(r);
|
||||
for (int c = 0; c < total_similarity.cols; ++c)
|
||||
ushort* row = total_similarity2.ptr<ushort>(r);
|
||||
for (int c = 0; c < total_similarity2.cols; ++c)
|
||||
{
|
||||
int score = row[c];
|
||||
if (score > best_score)
|
||||
@@ -1598,9 +1598,9 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
|
||||
}
|
||||
}
|
||||
// Update current match
|
||||
match.x = (x / T - 8 + best_c) * T + offset;
|
||||
match.y = (y / T - 8 + best_r) * T + offset;
|
||||
match.similarity = (best_score * 100.f) / (4 * num_features);
|
||||
match2.x = (x / T - 8 + best_c) * T + offset;
|
||||
match2.y = (y / T - 8 + best_r) * T + offset;
|
||||
match2.similarity = (best_score * 100.f) / (4 * numFeatures);
|
||||
}
|
||||
|
||||
// Filter out any matches that drop below the similarity threshold
|
||||
@@ -1763,10 +1763,10 @@ void Detector::write(FileStorage& fs) const
|
||||
tps[template_id].resize(templates_fn.size());
|
||||
|
||||
FileNodeIterator templ_it = templates_fn.begin(), templ_it_end = templates_fn.end();
|
||||
int i = 0;
|
||||
int idx = 0;
|
||||
for ( ; templ_it != templ_it_end; ++templ_it)
|
||||
{
|
||||
tps[template_id][i++].read(*templ_it);
|
||||
tps[template_id][idx++].read(*templ_it);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -4,6 +4,8 @@
|
||||
#include "_lsvmparser.h"
|
||||
#include "_lsvm_error.h"
|
||||
|
||||
namespace
|
||||
{
|
||||
int isMODEL (char *str){
|
||||
char stag [] = "<Model>";
|
||||
char etag [] = "</Model>";
|
||||
@@ -213,9 +215,9 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == ERFILTER){
|
||||
//printf("</RootFilter>\n");
|
||||
return;
|
||||
@@ -267,7 +269,7 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
|
||||
}
|
||||
|
||||
tag = 0;
|
||||
i = 0;
|
||||
i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
@@ -275,7 +277,7 @@ void parserRFilter (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,9 +305,9 @@ void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == ETAGV){
|
||||
//printf(" </V>\n");
|
||||
return;
|
||||
@@ -331,7 +333,7 @@ void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
//printf(" <Vy>%d</Vy>\n", model->V.y);
|
||||
}
|
||||
tag = 0;
|
||||
i = 0;
|
||||
i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
@@ -339,7 +341,7 @@ void parserV (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
@@ -366,9 +368,9 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == ETAGD){
|
||||
//printf(" </D>\n");
|
||||
return;
|
||||
@@ -380,7 +382,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
if(tagVal == EDx){
|
||||
st = 0;
|
||||
buf[i] = '\0';
|
||||
|
||||
|
||||
model->fineFunction[0] = (float)atof(buf);
|
||||
//printf(" <Dx>%f</Dx>\n", model->fineFunction[0]);
|
||||
}
|
||||
@@ -391,7 +393,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
if(tagVal == EDy){
|
||||
st = 0;
|
||||
buf[i] = '\0';
|
||||
|
||||
|
||||
model->fineFunction[1] = (float)atof(buf);
|
||||
//printf(" <Dy>%f</Dy>\n", model->fineFunction[1]);
|
||||
}
|
||||
@@ -402,7 +404,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
if(tagVal == EDxx){
|
||||
st = 0;
|
||||
buf[i] = '\0';
|
||||
|
||||
|
||||
model->fineFunction[2] = (float)atof(buf);
|
||||
//printf(" <Dxx>%f</Dxx>\n", model->fineFunction[2]);
|
||||
}
|
||||
@@ -413,13 +415,13 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
if(tagVal == EDyy){
|
||||
st = 0;
|
||||
buf[i] = '\0';
|
||||
|
||||
|
||||
model->fineFunction[3] = (float)atof(buf);
|
||||
//printf(" <Dyy>%f</Dyy>\n", model->fineFunction[3]);
|
||||
}
|
||||
|
||||
tag = 0;
|
||||
i = 0;
|
||||
i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
@@ -427,7 +429,7 @@ void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -465,9 +467,9 @@ void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == EPFILTER){
|
||||
//printf("</PathFilter>\n");
|
||||
return;
|
||||
@@ -515,7 +517,7 @@ void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
|
||||
//printf("WEIGHTS OK\n");
|
||||
}
|
||||
tag = 0;
|
||||
i = 0;
|
||||
i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
@@ -523,7 +525,7 @@ void parserPFilter (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last, int *max){
|
||||
@@ -532,12 +534,12 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
|
||||
int tag;
|
||||
int tagVal;
|
||||
char ch;
|
||||
int i,j;
|
||||
char buf[1024];
|
||||
int /*i,*/j;
|
||||
//char buf[1024];
|
||||
char tagBuf[1024];
|
||||
//printf("<PartFilters>\n");
|
||||
|
||||
i = 0;
|
||||
//i = 0;
|
||||
j = 0;
|
||||
st = 0;
|
||||
tag = 0;
|
||||
@@ -551,9 +553,9 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == EPFILTERs){
|
||||
//printf("</PartFilters>\n");
|
||||
return;
|
||||
@@ -564,15 +566,15 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
|
||||
N_path++;
|
||||
}
|
||||
tag = 0;
|
||||
i = 0;
|
||||
//i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
//buf[i] = ch; i++;
|
||||
}else{
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model, float *b, int *last, int *max){
|
||||
@@ -580,12 +582,12 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
|
||||
int tag;
|
||||
int tagVal;
|
||||
char ch;
|
||||
int i,j;
|
||||
char buf[1024];
|
||||
int /*i,*/j;
|
||||
//char buf[1024];
|
||||
char tagBuf[1024];
|
||||
//printf("<Component> %d\n", *N_comp);
|
||||
|
||||
i = 0;
|
||||
//i = 0;
|
||||
j = 0;
|
||||
st = 0;
|
||||
tag = 0;
|
||||
@@ -599,9 +601,9 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == ECOMP){
|
||||
(*N_comp) ++;
|
||||
return;
|
||||
@@ -614,15 +616,15 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
|
||||
parserPFilterS (xmlf, p, model, last, max);
|
||||
}
|
||||
tag = 0;
|
||||
i = 0;
|
||||
//i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
//buf[i] = ch; i++;
|
||||
}else{
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){
|
||||
@@ -637,9 +639,9 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
|
||||
int i,j, ii = 0;
|
||||
char buf[1024];
|
||||
char tagBuf[1024];
|
||||
|
||||
|
||||
//printf("<Model>\n");
|
||||
|
||||
|
||||
i = 0;
|
||||
j = 0;
|
||||
st = 0;
|
||||
@@ -654,9 +656,9 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
|
||||
if(ch == '>'){
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
|
||||
|
||||
tagVal = getTeg(tagBuf);
|
||||
|
||||
|
||||
if(tagVal == EMODEL){
|
||||
//printf("</Model>\n");
|
||||
for(ii = 0; ii <= *last; ii++){
|
||||
@@ -671,7 +673,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
|
||||
bb = (float *)malloc(sizeof(float));
|
||||
* comp = cmp;
|
||||
* b = bb;
|
||||
* count = N_comp + 1;
|
||||
* count = N_comp + 1;
|
||||
} else {
|
||||
cmp = (int *)malloc(sizeof(int) * (N_comp + 1));
|
||||
bb = (float *)malloc(sizeof(float) * (N_comp + 1));
|
||||
@@ -683,7 +685,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
|
||||
free(* b );
|
||||
* comp = cmp;
|
||||
* b = bb;
|
||||
* count = N_comp + 1;
|
||||
* count = N_comp + 1;
|
||||
}
|
||||
parserComp(xmlf, p, &N_comp, model, &((*b)[N_comp]), last, max);
|
||||
cmp[N_comp - 1] = *last;
|
||||
@@ -709,7 +711,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
|
||||
//printf("<ScoreThreshold>%f</ScoreThreshold>\n", score);
|
||||
}
|
||||
tag = 0;
|
||||
i = 0;
|
||||
i = 0;
|
||||
}else{
|
||||
if((tag == 0)&& (st == 1)){
|
||||
buf[i] = ch; i++;
|
||||
@@ -717,17 +719,19 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}//namespace
|
||||
|
||||
int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){
|
||||
int st = 0;
|
||||
//int st = 0;
|
||||
int tag;
|
||||
char ch;
|
||||
int i,j;
|
||||
int /*i,*/j;
|
||||
FILE *xmlf;
|
||||
char buf[1024];
|
||||
//char buf[1024];
|
||||
char tagBuf[1024];
|
||||
|
||||
(*max) = 10;
|
||||
@@ -739,10 +743,10 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
|
||||
xmlf = fopen(filename, "rb");
|
||||
if(xmlf == NULL)
|
||||
return LSVM_PARSER_FILE_NOT_FOUND;
|
||||
|
||||
i = 0;
|
||||
|
||||
//i = 0;
|
||||
j = 0;
|
||||
st = 0;
|
||||
//st = 0;
|
||||
tag = 0;
|
||||
while(!feof(xmlf)){
|
||||
ch = (char)fgetc( xmlf );
|
||||
@@ -753,7 +757,7 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
|
||||
}else {
|
||||
if(ch == '>'){
|
||||
tag = 0;
|
||||
i = 0;
|
||||
//i = 0;
|
||||
tagBuf[j ] = ch;
|
||||
tagBuf[j + 1] = '\0';
|
||||
if(getTeg(tagBuf) == MODEL){
|
||||
@@ -761,14 +765,14 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
|
||||
}
|
||||
}else{
|
||||
if(tag == 0){
|
||||
buf[i] = ch; i++;
|
||||
//buf[i] = ch; i++;
|
||||
}else{
|
||||
tagBuf[j] = ch; j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fclose(xmlf);
|
||||
return LATENT_SVM_OK;
|
||||
}
|
||||
@@ -776,24 +780,24 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
|
||||
int loadModel(
|
||||
const char *modelPath,
|
||||
CvLSVMFilterObject ***filters,
|
||||
int *kFilters,
|
||||
int *kComponents,
|
||||
int **kPartFilters,
|
||||
float **b,
|
||||
float *scoreThreshold){
|
||||
int *kFilters,
|
||||
int *kComponents,
|
||||
int **kPartFilters,
|
||||
float **b,
|
||||
float *scoreThreshold){
|
||||
int last;
|
||||
int max;
|
||||
int *comp;
|
||||
int count;
|
||||
int i;
|
||||
int err;
|
||||
int err;
|
||||
float score;
|
||||
//printf("start_parse\n\n");
|
||||
|
||||
err = LSVMparser(modelPath, filters, &last, &max, &comp, b, &count, &score);
|
||||
if(err != LATENT_SVM_OK){
|
||||
return err;
|
||||
}
|
||||
if(err != LATENT_SVM_OK){
|
||||
return err;
|
||||
}
|
||||
(*kFilters) = last + 1;
|
||||
(*kComponents) = count;
|
||||
(*scoreThreshold) = (float) score;
|
||||
|
@@ -23,7 +23,7 @@
|
||||
*/
|
||||
int convolution(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap *map, float *f)
|
||||
{
|
||||
int n1, m1, n2, m2, p, size, diff1, diff2;
|
||||
int n1, m1, n2, m2, p, /*size,*/ diff1, diff2;
|
||||
int i1, i2, j1, j2, k;
|
||||
float tmp_f1, tmp_f2, tmp_f3, tmp_f4;
|
||||
float *pMap = NULL;
|
||||
@@ -37,7 +37,7 @@ int convolution(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap *map, float
|
||||
|
||||
diff1 = n1 - n2 + 1;
|
||||
diff2 = m1 - m2 + 1;
|
||||
size = diff1 * diff2;
|
||||
//size = diff1 * diff2;
|
||||
for (j1 = diff2 - 1; j1 >= 0; j1--)
|
||||
{
|
||||
|
||||
@@ -333,7 +333,7 @@ int filterDispositionLevel(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap
|
||||
float **scoreFi,
|
||||
int **pointsX, int **pointsY)
|
||||
{
|
||||
int n1, m1, n2, m2, p, size, diff1, diff2;
|
||||
int n1, m1, n2, m2, /*p,*/ size, diff1, diff2;
|
||||
float *f;
|
||||
int i1, j1;
|
||||
int res;
|
||||
@@ -342,7 +342,7 @@ int filterDispositionLevel(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap
|
||||
m1 = pyramid->sizeX;
|
||||
n2 = Fi->sizeY;
|
||||
m2 = Fi->sizeX;
|
||||
p = pyramid->numFeatures;
|
||||
//p = pyramid->numFeatures;
|
||||
(*scoreFi) = NULL;
|
||||
(*pointsX) = NULL;
|
||||
(*pointsY) = NULL;
|
||||
@@ -418,7 +418,7 @@ int filterDispositionLevelFFT(const CvLSVMFilterObject *Fi, const CvLSVMFftImage
|
||||
float **scoreFi,
|
||||
int **pointsX, int **pointsY)
|
||||
{
|
||||
int n1, m1, n2, m2, p, size, diff1, diff2;
|
||||
int n1, m1, n2, m2, /*p,*/ size, diff1, diff2;
|
||||
float *f;
|
||||
int i1, j1;
|
||||
int res;
|
||||
@@ -428,7 +428,7 @@ int filterDispositionLevelFFT(const CvLSVMFilterObject *Fi, const CvLSVMFftImage
|
||||
m1 = featMapImage->dimX;
|
||||
n2 = Fi->sizeY;
|
||||
m2 = Fi->sizeX;
|
||||
p = featMapImage->numFeatures;
|
||||
//p = featMapImage->numFeatures;
|
||||
(*scoreFi) = NULL;
|
||||
(*pointsX) = NULL;
|
||||
(*pointsY) = NULL;
|
||||
@@ -547,7 +547,7 @@ int addNullableBorder(CvLSVMFeatureMap *map, int bx, int by)
|
||||
return LATENT_SVM_OK;
|
||||
}
|
||||
|
||||
CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,
|
||||
static CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,
|
||||
int maxXBorder, int maxYBorder)
|
||||
{
|
||||
int bx, by;
|
||||
@@ -611,7 +611,7 @@ int maxFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n,
|
||||
float *score, CvPoint **points,
|
||||
int *kPoints, CvPoint ***partsDisplacement)
|
||||
{
|
||||
int i, j, k, dimX, dimY, nF0, mF0, p;
|
||||
int i, j, k, dimX, dimY, nF0, mF0/*, p*/;
|
||||
int diff1, diff2, index, last, partsLevel;
|
||||
CvLSVMFilterDisposition **disposition;
|
||||
float *f;
|
||||
@@ -639,7 +639,7 @@ int maxFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n,
|
||||
dimY = H->pyramid[level]->sizeY;
|
||||
|
||||
// Number of features
|
||||
p = H->pyramid[level]->numFeatures;
|
||||
//p = H->pyramid[level]->numFeatures;
|
||||
|
||||
// Getting dimension of root filter
|
||||
nF0 = all_F[0]->sizeY;
|
||||
@@ -860,7 +860,7 @@ int thresholdFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n,
|
||||
float **score, CvPoint **points, int *kPoints,
|
||||
CvPoint ***partsDisplacement)
|
||||
{
|
||||
int i, j, k, dimX, dimY, nF0, mF0, p;
|
||||
int i, j, k, dimX, dimY, nF0, mF0/*, p*/;
|
||||
int diff1, diff2, index, last, partsLevel;
|
||||
CvLSVMFilterDisposition **disposition;
|
||||
float *f;
|
||||
@@ -887,7 +887,7 @@ int thresholdFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n,
|
||||
dimY = H->pyramid[level]->sizeY;
|
||||
|
||||
// Number of features
|
||||
p = H->pyramid[level]->numFeatures;
|
||||
//p = H->pyramid[level]->numFeatures;
|
||||
|
||||
// Getting dimension of root filter
|
||||
nF0 = all_F[0]->sizeY;
|
||||
@@ -1366,6 +1366,7 @@ int thresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
|
||||
return LATENT_SVM_OK;
|
||||
}
|
||||
|
||||
#ifdef HAVE_TBB
|
||||
/*
|
||||
// Creating schedule of pyramid levels processing
|
||||
//
|
||||
@@ -1390,12 +1391,12 @@ int thresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
|
||||
// RESULT
|
||||
// Error status
|
||||
*/
|
||||
int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
|
||||
static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,
|
||||
const int n, const int bx, const int by,
|
||||
const int threadsNum, int *kLevels, int **processingLevels)
|
||||
{
|
||||
int rootFilterDim, sumPartFiltersDim, i, numLevels, dbx, dby, numDotProducts;
|
||||
int averNumDotProd, j, minValue, argMin, lambda, maxValue, k;
|
||||
int j, minValue, argMin, lambda, maxValue, k;
|
||||
int *dotProd, *weights, *disp;
|
||||
if (H == NULL || all_F == NULL)
|
||||
{
|
||||
@@ -1429,8 +1430,6 @@ int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all
|
||||
(H->pyramid[i]->sizeY + dby) * sumPartFiltersDim;
|
||||
numDotProducts += dotProd[i];
|
||||
}
|
||||
// Average number of dot products that would be performed at the best
|
||||
averNumDotProd = numDotProducts / threadsNum;
|
||||
// Allocation memory for saving dot product number performed by each thread
|
||||
weights = (int *)malloc(sizeof(int) * threadsNum);
|
||||
// Allocation memory for saving dispertion
|
||||
@@ -1521,7 +1520,6 @@ int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all
|
||||
return LATENT_SVM_OK;
|
||||
}
|
||||
|
||||
#ifdef HAVE_TBB
|
||||
/*
|
||||
// int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
|
||||
const CvLSVMFeaturePyramid *H,
|
||||
@@ -1679,7 +1677,7 @@ int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
|
||||
}
|
||||
#endif
|
||||
|
||||
void sort(int n, const float* x, int* indices)
|
||||
static void sort(int n, const float* x, int* indices)
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < n; i++)
|
||||
|
@@ -43,11 +43,7 @@
|
||||
#ifndef __OPENCV_PRECOMP_H__
|
||||
#define __OPENCV_PRECOMP_H__
|
||||
|
||||
#if _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4251 4710 4711 4514 4996 )
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CVCONFIG_H
|
||||
#ifdef HAVE_CVCONFIG_H
|
||||
#include "cvconfig.h"
|
||||
#endif
|
||||
|
||||
|
Reference in New Issue
Block a user