initial support of GPU LBP classifier: added new style xml format loading
This commit is contained in:
@@ -34,7 +34,7 @@ PERF_TEST_P(ImageName_MinSize, CascadeClassifierLBPFrontalFace,
|
||||
if (cc.empty())
|
||||
FAIL() << "Can't load cascade file";
|
||||
|
||||
Mat img=imread(getDataPath(filename), 0);
|
||||
Mat img = imread(getDataPath(filename), 0);
|
||||
if (img.empty())
|
||||
FAIL() << "Can't load source image";
|
||||
|
||||
|
@@ -56,8 +56,8 @@ namespace cv
|
||||
+ (step) * ((rect).y + (rect).width + (rect).height)
|
||||
|
||||
#define CALC_SUM_(p0, p1, p2, p3, offset) \
|
||||
((p0)[offset] - (p1)[offset] - (p2)[offset] + (p3)[offset])
|
||||
|
||||
((p0)[offset] - (p1)[offset] - (p2)[offset] + (p3)[offset])
|
||||
|
||||
#define CALC_SUM(rect,offset) CALC_SUM_((rect)[0], (rect)[1], (rect)[2], (rect)[3], offset)
|
||||
|
||||
|
||||
@@ -68,24 +68,24 @@ public:
|
||||
struct Feature
|
||||
{
|
||||
Feature();
|
||||
|
||||
|
||||
float calc( int offset ) const;
|
||||
void updatePtrs( const Mat& sum );
|
||||
bool read( const FileNode& node );
|
||||
|
||||
|
||||
bool tilted;
|
||||
|
||||
|
||||
enum { RECT_NUM = 3 };
|
||||
|
||||
|
||||
struct
|
||||
{
|
||||
Rect r;
|
||||
float weight;
|
||||
} rect[RECT_NUM];
|
||||
|
||||
|
||||
const int* p[RECT_NUM][4];
|
||||
};
|
||||
|
||||
|
||||
HaarEvaluator();
|
||||
virtual ~HaarEvaluator();
|
||||
|
||||
@@ -109,13 +109,13 @@ protected:
|
||||
|
||||
Mat sum0, sqsum0, tilted0;
|
||||
Mat sum, sqsum, tilted;
|
||||
|
||||
|
||||
Rect normrect;
|
||||
const int *p[4];
|
||||
const double *pq[4];
|
||||
|
||||
|
||||
int offset;
|
||||
double varianceNormFactor;
|
||||
double varianceNormFactor;
|
||||
};
|
||||
|
||||
inline HaarEvaluator::Feature :: Feature()
|
||||
@@ -123,8 +123,8 @@ inline HaarEvaluator::Feature :: Feature()
|
||||
tilted = false;
|
||||
rect[0].r = rect[1].r = rect[2].r = Rect();
|
||||
rect[0].weight = rect[1].weight = rect[2].weight = 0;
|
||||
p[0][0] = p[0][1] = p[0][2] = p[0][3] =
|
||||
p[1][0] = p[1][1] = p[1][2] = p[1][3] =
|
||||
p[0][0] = p[0][1] = p[0][2] = p[0][3] =
|
||||
p[1][0] = p[1][1] = p[1][2] = p[1][3] =
|
||||
p[2][0] = p[2][1] = p[2][2] = p[2][3] = 0;
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ inline float HaarEvaluator::Feature :: calc( int offset ) const
|
||||
|
||||
if( rect[2].weight != 0.0f )
|
||||
ret += rect[2].weight * CALC_SUM(p[2], offset);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -167,27 +167,27 @@ public:
|
||||
struct Feature
|
||||
{
|
||||
Feature();
|
||||
Feature( int x, int y, int _block_w, int _block_h ) :
|
||||
Feature( int x, int y, int _block_w, int _block_h ) :
|
||||
rect(x, y, _block_w, _block_h) {}
|
||||
|
||||
|
||||
int calc( int offset ) const;
|
||||
void updatePtrs( const Mat& sum );
|
||||
bool read(const FileNode& node );
|
||||
|
||||
|
||||
Rect rect; // weight and height for block
|
||||
const int* p[16]; // fast
|
||||
};
|
||||
|
||||
|
||||
LBPEvaluator();
|
||||
virtual ~LBPEvaluator();
|
||||
|
||||
|
||||
virtual bool read( const FileNode& node );
|
||||
virtual Ptr<FeatureEvaluator> clone() const;
|
||||
virtual int getFeatureType() const { return FeatureEvaluator::LBP; }
|
||||
|
||||
virtual bool setImage(const Mat& image, Size _origWinSize);
|
||||
virtual bool setWindow(Point pt);
|
||||
|
||||
|
||||
int operator()(int featureIdx) const
|
||||
{ return featuresPtr[featureIdx].calc(offset); }
|
||||
virtual int calcCat(int featureIdx) const
|
||||
@@ -200,9 +200,9 @@ protected:
|
||||
Rect normrect;
|
||||
|
||||
int offset;
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
inline LBPEvaluator::Feature :: Feature()
|
||||
{
|
||||
rect = Rect();
|
||||
@@ -213,7 +213,7 @@ inline LBPEvaluator::Feature :: Feature()
|
||||
inline int LBPEvaluator::Feature :: calc( int offset ) const
|
||||
{
|
||||
int cval = CALC_SUM_( p[5], p[6], p[9], p[10], offset );
|
||||
|
||||
|
||||
return (CALC_SUM_( p[0], p[1], p[4], p[5], offset ) >= cval ? 128 : 0) | // 0
|
||||
(CALC_SUM_( p[1], p[2], p[5], p[6], offset ) >= cval ? 64 : 0) | // 1
|
||||
(CALC_SUM_( p[2], p[3], p[6], p[7], offset ) >= cval ? 32 : 0) | // 2
|
||||
@@ -248,7 +248,7 @@ public:
|
||||
Feature();
|
||||
float calc( int offset ) const;
|
||||
void updatePtrs( const vector<Mat>& _hist, const Mat &_normSum );
|
||||
bool read( const FileNode& node );
|
||||
bool read( const FileNode& node );
|
||||
|
||||
enum { CELL_NUM = 4, BIN_NUM = 9 };
|
||||
|
||||
@@ -331,13 +331,13 @@ inline int predictOrdered( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &_f
|
||||
CascadeClassifier::Data::DTreeNode* cascadeNodes = &cascade.data.nodes[0];
|
||||
CascadeClassifier::Data::DTree* cascadeWeaks = &cascade.data.classifiers[0];
|
||||
CascadeClassifier::Data::Stage* cascadeStages = &cascade.data.stages[0];
|
||||
|
||||
|
||||
for( int si = 0; si < nstages; si++ )
|
||||
{
|
||||
CascadeClassifier::Data::Stage& stage = cascadeStages[si];
|
||||
int wi, ntrees = stage.ntrees;
|
||||
sum = 0;
|
||||
|
||||
|
||||
for( wi = 0; wi < ntrees; wi++ )
|
||||
{
|
||||
CascadeClassifier::Data::DTree& weak = cascadeWeaks[stage.first + wi];
|
||||
@@ -355,7 +355,7 @@ inline int predictOrdered( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &_f
|
||||
leafOfs += weak.nodeCount + 1;
|
||||
}
|
||||
if( sum < stage.threshold )
|
||||
return -si;
|
||||
return -si;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -372,13 +372,13 @@ inline int predictCategorical( CascadeClassifier& cascade, Ptr<FeatureEvaluator>
|
||||
CascadeClassifier::Data::DTreeNode* cascadeNodes = &cascade.data.nodes[0];
|
||||
CascadeClassifier::Data::DTree* cascadeWeaks = &cascade.data.classifiers[0];
|
||||
CascadeClassifier::Data::Stage* cascadeStages = &cascade.data.stages[0];
|
||||
|
||||
|
||||
for(int si = 0; si < nstages; si++ )
|
||||
{
|
||||
CascadeClassifier::Data::Stage& stage = cascadeStages[si];
|
||||
int wi, ntrees = stage.ntrees;
|
||||
sum = 0;
|
||||
|
||||
|
||||
for( wi = 0; wi < ntrees; wi++ )
|
||||
{
|
||||
CascadeClassifier::Data::DTree& weak = cascadeWeaks[stage.first + wi];
|
||||
@@ -396,7 +396,7 @@ inline int predictCategorical( CascadeClassifier& cascade, Ptr<FeatureEvaluator>
|
||||
leafOfs += weak.nodeCount + 1;
|
||||
}
|
||||
if( sum < stage.threshold )
|
||||
return -si;
|
||||
return -si;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -444,7 +444,7 @@ inline int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvalu
|
||||
CascadeClassifier::Data::Stage* cascadeStages = &cascade.data.stages[0];
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
float tmp; // float accumulator -- float operations are quicker
|
||||
float tmp; // float accumulator -- float operations are quicker
|
||||
#endif
|
||||
for( int si = 0; si < nstages; si++ )
|
||||
{
|
||||
@@ -472,11 +472,11 @@ inline int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvalu
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if( tmp < stage.threshold ) {
|
||||
sum = (double)tmp;
|
||||
return -si;
|
||||
return -si;
|
||||
}
|
||||
#else
|
||||
if( sum < stage.threshold )
|
||||
return -si;
|
||||
return -si;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -53,13 +53,13 @@ IplImage* resize_opencv(IplImage* img, float scale)
|
||||
//}
|
||||
//// resize along each column
|
||||
//// result is transposed, so we can apply it twice for a complete resize
|
||||
//void resize1dtran(float *src, int sheight, float *dst, int dheight,
|
||||
//void resize1dtran(float *src, int sheight, float *dst, int dheight,
|
||||
// int width, int chan) {
|
||||
// alphainfo *ofs;
|
||||
// float scale = (float)dheight/(float)sheight;
|
||||
// float invscale = (float)sheight/(float)dheight;
|
||||
//
|
||||
// // we cache the interpolation values since they can be
|
||||
//
|
||||
// // we cache the interpolation values since they can be
|
||||
// // shared among different columns
|
||||
// int len = (int)ceilf(dheight*invscale) + 2*dheight;
|
||||
// int k = 0;
|
||||
@@ -126,7 +126,7 @@ IplImage* resize_opencv(IplImage* img, float scale)
|
||||
// int index;
|
||||
// int widthStep;
|
||||
// int tW, tH;
|
||||
//
|
||||
//
|
||||
// W = (float)img->width;
|
||||
// H = (float)img->height;
|
||||
// channels = img->nChannels;
|
||||
@@ -149,16 +149,16 @@ IplImage* resize_opencv(IplImage* img, float scale)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//
|
||||
// imgTmp = cvCreateImage(cvSize(tW , tH), IPL_DEPTH_32F, channels);
|
||||
//
|
||||
// dst = (float *)malloc(sizeof(float) * (int)(tH * tW) * channels);
|
||||
// tmp = (float *)malloc(sizeof(float) * (int)(tH * W) * channels);
|
||||
//
|
||||
// resize1dtran(src, (int)H, tmp, (int)tH, (int)W , 3);
|
||||
//
|
||||
//
|
||||
// resize1dtran(tmp, (int)W, dst, (int)tW, (int)tH, 3);
|
||||
//
|
||||
//
|
||||
// index = 0;
|
||||
// //dataf = (float*)imgTmp->imageData;
|
||||
// for (kk = 0; kk < channels; kk++)
|
||||
@@ -188,7 +188,7 @@ IplImage* resize_opencv(IplImage* img, float scale)
|
||||
// int index;
|
||||
// int widthStep;
|
||||
// int tW, tH;
|
||||
//
|
||||
//
|
||||
// W = (float)img->width;
|
||||
// H = (float)img->height;
|
||||
// channels = img->nChannels;
|
||||
@@ -210,16 +210,16 @@ IplImage* resize_opencv(IplImage* img, float scale)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//
|
||||
// imgTmp = cvCreateImage(cvSize(tW , tH), IPL_DEPTH_32F, channels);
|
||||
//
|
||||
// dst = (float *)malloc(sizeof(float) * (int)(tH * tW) * channels);
|
||||
// tmp = (float *)malloc(sizeof(float) * (int)(tH * W) * channels);
|
||||
//
|
||||
// resize1dtran(src, (int)H, tmp, (int)tH, (int)W , 3);
|
||||
//
|
||||
//
|
||||
// resize1dtran(tmp, (int)W, dst, (int)tW, (int)tH, 3);
|
||||
//
|
||||
//
|
||||
// index = 0;
|
||||
// for (kk = 0; kk < channels; kk++)
|
||||
// {
|
||||
@@ -232,7 +232,7 @@ IplImage* resize_opencv(IplImage* img, float scale)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//
|
||||
// free(src);
|
||||
// free(dst);
|
||||
// free(tmp);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
#include "_lsvm_routine.h"
|
||||
|
||||
int allocFilterObject(CvLSVMFilterObject **obj, const int sizeX,
|
||||
const int sizeY, const int numFeatures)
|
||||
const int sizeY, const int numFeatures)
|
||||
{
|
||||
int i;
|
||||
(*obj) = (CvLSVMFilterObject *)malloc(sizeof(CvLSVMFilterObject));
|
||||
@@ -16,7 +16,7 @@ int allocFilterObject(CvLSVMFilterObject **obj, const int sizeX,
|
||||
(*obj)->V.x = 0;
|
||||
(*obj)->V.y = 0;
|
||||
(*obj)->V.l = 0;
|
||||
(*obj)->H = (float *) malloc(sizeof (float) *
|
||||
(*obj)->H = (float *) malloc(sizeof (float) *
|
||||
(sizeX * sizeY * numFeatures));
|
||||
for(i = 0; i < sizeX * sizeY * numFeatures; i++)
|
||||
{
|
||||
@@ -33,7 +33,7 @@ int freeFilterObject (CvLSVMFilterObject **obj)
|
||||
return LATENT_SVM_OK;
|
||||
}
|
||||
|
||||
int allocFeatureMapObject(CvLSVMFeatureMap **obj, const int sizeX,
|
||||
int allocFeatureMapObject(CvLSVMFeatureMap **obj, const int sizeX,
|
||||
const int sizeY, const int numFeatures)
|
||||
{
|
||||
int i;
|
||||
@@ -41,7 +41,7 @@ int allocFeatureMapObject(CvLSVMFeatureMap **obj, const int sizeX,
|
||||
(*obj)->sizeX = sizeX;
|
||||
(*obj)->sizeY = sizeY;
|
||||
(*obj)->numFeatures = numFeatures;
|
||||
(*obj)->map = (float *) malloc(sizeof (float) *
|
||||
(*obj)->map = (float *) malloc(sizeof (float) *
|
||||
(sizeX * sizeY * numFeatures));
|
||||
for(i = 0; i < sizeX * sizeY * numFeatures; i++)
|
||||
{
|
||||
@@ -59,7 +59,7 @@ int freeFeatureMapObject (CvLSVMFeatureMap **obj)
|
||||
}
|
||||
|
||||
int allocFeaturePyramidObject(CvLSVMFeaturePyramid **obj,
|
||||
const int numLevels)
|
||||
const int numLevels)
|
||||
{
|
||||
(*obj) = (CvLSVMFeaturePyramid *)malloc(sizeof(CvLSVMFeaturePyramid));
|
||||
(*obj)->numLevels = numLevels;
|
||||
@@ -70,7 +70,7 @@ int allocFeaturePyramidObject(CvLSVMFeaturePyramid **obj,
|
||||
|
||||
int freeFeaturePyramidObject (CvLSVMFeaturePyramid **obj)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
if(*obj == NULL) return LATENT_SVM_MEM_NULL;
|
||||
for(i = 0; i < (*obj)->numLevels; i++)
|
||||
{
|
||||
|
Reference in New Issue
Block a user