Revert "Merge pull request #836 from jet47:gpu-modules"
This reverts commitfba72cb60d
, reversing changes made to02131ffb62
.
This commit is contained in:
@@ -41,6 +41,8 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include "opencv2/objdetect/objdetect_c.h"
|
||||
|
||||
using namespace cv;
|
||||
@@ -73,37 +75,6 @@ public:
|
||||
virtual bool read(const String& classifierAsXml) = 0;
|
||||
};
|
||||
|
||||
#ifndef HAVE_OPENCV_GPULEGACY
|
||||
|
||||
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
HaarCascade()
|
||||
{
|
||||
throw_no_cuda();
|
||||
}
|
||||
|
||||
unsigned int process(const GpuMat&, GpuMat&, float, int, bool, bool, cv::Size, cv::Size)
|
||||
{
|
||||
throw_no_cuda();
|
||||
return 0;
|
||||
}
|
||||
|
||||
cv::Size getClassifierCvSize() const
|
||||
{
|
||||
throw_no_cuda();
|
||||
return cv::Size();
|
||||
}
|
||||
|
||||
bool read(const String&)
|
||||
{
|
||||
throw_no_cuda();
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
@@ -313,8 +284,6 @@ private:
|
||||
virtual ~HaarCascade(){}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
cv::Size operator -(const cv::Size& a, const cv::Size& b)
|
||||
{
|
||||
return cv::Size(a.width - b.width, a.height - b.height);
|
||||
@@ -508,8 +477,6 @@ private:
|
||||
resuzeBuffer.create(frame, CV_8UC1);
|
||||
|
||||
integral.create(frame.height + 1, integralFactor * (frame.width + 1), CV_32SC1);
|
||||
|
||||
#ifdef HAVE_OPENCV_GPULEGACY
|
||||
NcvSize32u roiSize;
|
||||
roiSize.width = frame.width;
|
||||
roiSize.height = frame.height;
|
||||
@@ -520,7 +487,6 @@ private:
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
|
||||
integralBuffer.create(1, bufSize, CV_8UC1);
|
||||
#endif
|
||||
|
||||
candidates.create(1 , frame.width >> 1, CV_32SC4);
|
||||
}
|
||||
@@ -756,3 +722,240 @@ bool cv::gpu::CascadeClassifier_GPU::load(const String& filename)
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined (HAVE_CUDA)
|
||||
|
||||
struct RectConvert
|
||||
{
|
||||
Rect operator()(const NcvRect32u& nr) const { return Rect(nr.x, nr.y, nr.width, nr.height); }
|
||||
NcvRect32u operator()(const Rect& nr) const
|
||||
{
|
||||
NcvRect32u rect;
|
||||
rect.x = nr.x;
|
||||
rect.y = nr.y;
|
||||
rect.width = nr.width;
|
||||
rect.height = nr.height;
|
||||
return rect;
|
||||
}
|
||||
};
|
||||
|
||||
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights)
|
||||
{
|
||||
std::vector<Rect> rects(hypotheses.size());
|
||||
std::transform(hypotheses.begin(), hypotheses.end(), rects.begin(), RectConvert());
|
||||
|
||||
if (weights)
|
||||
{
|
||||
std::vector<int> weights_int;
|
||||
weights_int.assign(weights->begin(), weights->end());
|
||||
cv::groupRectangles(rects, weights_int, groupThreshold, eps);
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::groupRectangles(rects, groupThreshold, eps);
|
||||
}
|
||||
std::transform(rects.begin(), rects.end(), hypotheses.begin(), RectConvert());
|
||||
hypotheses.resize(rects.size());
|
||||
}
|
||||
|
||||
NCVStatus loadFromXML(const String &filename,
|
||||
HaarClassifierCascadeDescriptor &haar,
|
||||
std::vector<HaarStage64> &haarStages,
|
||||
std::vector<HaarClassifierNode128> &haarClassifierNodes,
|
||||
std::vector<HaarFeature64> &haarFeatures)
|
||||
{
|
||||
NCVStatus ncvStat;
|
||||
|
||||
haar.NumStages = 0;
|
||||
haar.NumClassifierRootNodes = 0;
|
||||
haar.NumClassifierTotalNodes = 0;
|
||||
haar.NumFeatures = 0;
|
||||
haar.ClassifierSize.width = 0;
|
||||
haar.ClassifierSize.height = 0;
|
||||
haar.bHasStumpsOnly = true;
|
||||
haar.bNeedsTiltedII = false;
|
||||
Ncv32u curMaxTreeDepth;
|
||||
|
||||
std::vector<char> xmlFileCont;
|
||||
|
||||
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
|
||||
haarStages.resize(0);
|
||||
haarClassifierNodes.resize(0);
|
||||
haarFeatures.resize(0);
|
||||
|
||||
Ptr<CvHaarClassifierCascade> oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0);
|
||||
if (oldCascade.empty())
|
||||
{
|
||||
return NCV_HAAR_XML_LOADING_EXCEPTION;
|
||||
}
|
||||
|
||||
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
|
||||
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
|
||||
|
||||
int stagesCound = oldCascade->count;
|
||||
for(int s = 0; s < stagesCound; ++s) // by stages
|
||||
{
|
||||
HaarStage64 curStage;
|
||||
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
|
||||
|
||||
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
|
||||
|
||||
int treesCount = oldCascade->stage_classifier[s].count;
|
||||
for(int t = 0; t < treesCount; ++t) // by trees
|
||||
{
|
||||
Ncv32u nodeId = 0;
|
||||
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
|
||||
|
||||
int nodesCount = tree->count;
|
||||
for(int n = 0; n < nodesCount; ++n) //by features
|
||||
{
|
||||
CvHaarFeature* feature = &tree->haar_feature[n];
|
||||
|
||||
HaarClassifierNode128 curNode;
|
||||
curNode.setThreshold(tree->threshold[n]);
|
||||
|
||||
NcvBool bIsLeftNodeLeaf = false;
|
||||
NcvBool bIsRightNodeLeaf = false;
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeLeft;
|
||||
if ( tree->left[n] <= 0 )
|
||||
{
|
||||
Ncv32f leftVal = tree->alpha[-tree->left[n]];
|
||||
ncvStat = nodeLeft.create(leftVal);
|
||||
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
|
||||
bIsLeftNodeLeaf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
Ncv32u leftNodeOffset = tree->left[n];
|
||||
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
|
||||
haar.bHasStumpsOnly = false;
|
||||
}
|
||||
curNode.setLeftNodeDesc(nodeLeft);
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeRight;
|
||||
if ( tree->right[n] <= 0 )
|
||||
{
|
||||
Ncv32f rightVal = tree->alpha[-tree->right[n]];
|
||||
ncvStat = nodeRight.create(rightVal);
|
||||
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
|
||||
bIsRightNodeLeaf = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
Ncv32u rightNodeOffset = tree->right[n];
|
||||
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
|
||||
haar.bHasStumpsOnly = false;
|
||||
}
|
||||
curNode.setRightNodeDesc(nodeRight);
|
||||
|
||||
Ncv32u tiltedVal = feature->tilted;
|
||||
haar.bNeedsTiltedII = (tiltedVal != 0);
|
||||
|
||||
Ncv32u featureId = 0;
|
||||
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
|
||||
{
|
||||
Ncv32u rectX = feature->rect[l].r.x;
|
||||
Ncv32u rectY = feature->rect[l].r.y;
|
||||
Ncv32u rectWidth = feature->rect[l].r.width;
|
||||
Ncv32u rectHeight = feature->rect[l].r.height;
|
||||
|
||||
Ncv32f rectWeight = feature->rect[l].weight;
|
||||
|
||||
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
|
||||
break;
|
||||
|
||||
HaarFeature64 curFeature;
|
||||
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
|
||||
curFeature.setWeight(rectWeight);
|
||||
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
|
||||
haarFeatures.push_back(curFeature);
|
||||
|
||||
featureId++;
|
||||
}
|
||||
|
||||
HaarFeatureDescriptor32 tmpFeatureDesc;
|
||||
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
|
||||
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
|
||||
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
|
||||
curNode.setFeatureDesc(tmpFeatureDesc);
|
||||
|
||||
if (!nodeId)
|
||||
{
|
||||
//root node
|
||||
haarClassifierNodes.push_back(curNode);
|
||||
curMaxTreeDepth = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
//other node
|
||||
h_TmpClassifierNotRootNodes.push_back(curNode);
|
||||
curMaxTreeDepth++;
|
||||
}
|
||||
|
||||
nodeId++;
|
||||
}
|
||||
}
|
||||
|
||||
curStage.setNumClassifierRootNodes(treesCount);
|
||||
haarStages.push_back(curStage);
|
||||
}
|
||||
|
||||
//fill in cascade stats
|
||||
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
|
||||
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
|
||||
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
|
||||
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
|
||||
|
||||
//merge root and leaf nodes in one classifiers array
|
||||
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
|
||||
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
|
||||
{
|
||||
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
|
||||
if (!featureDesc.isLeftNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
|
||||
nodeLeft.create(newOffset);
|
||||
}
|
||||
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
|
||||
if (!featureDesc.isRightNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
|
||||
nodeRight.create(newOffset);
|
||||
}
|
||||
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
|
||||
}
|
||||
|
||||
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
|
||||
{
|
||||
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
|
||||
if (!featureDesc.isLeftNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
|
||||
nodeLeft.create(newOffset);
|
||||
}
|
||||
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
|
||||
|
||||
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
|
||||
if (!featureDesc.isRightNodeLeaf())
|
||||
{
|
||||
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
|
||||
nodeRight.create(newOffset);
|
||||
}
|
||||
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
|
||||
|
||||
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
|
||||
}
|
||||
|
||||
return NCV_SUCCESS;
|
||||
}
|
||||
|
||||
#endif /* HAVE_CUDA */
|
||||
|
Reference in New Issue
Block a user