code cleaning of cascade classifier
This commit is contained in:
parent
33c44fcd7a
commit
d8415ed44e
@ -337,7 +337,7 @@ public:
|
|||||||
bool setImage( Ptr<FeatureEvaluator>&, const Mat& );
|
bool setImage( Ptr<FeatureEvaluator>&, const Mat& );
|
||||||
int runAt( Ptr<FeatureEvaluator>&, Point );
|
int runAt( Ptr<FeatureEvaluator>&, Point );
|
||||||
|
|
||||||
bool is_stump_based;
|
bool isStumpBased;
|
||||||
|
|
||||||
int stageType;
|
int stageType;
|
||||||
int featureType;
|
int featureType;
|
||||||
|
@ -717,26 +717,30 @@ inline int predictCategorical( CascadeClassifier& cascade, Ptr<FeatureEvaluator>
|
|||||||
template<class FEval>
|
template<class FEval>
|
||||||
inline int predictOrderedStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &_feval )
|
inline int predictOrderedStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &_feval )
|
||||||
{
|
{
|
||||||
int si, nstages = (int)cascade.stages.size();
|
|
||||||
int nodeOfs = 0, leafOfs = 0;
|
int nodeOfs = 0, leafOfs = 0;
|
||||||
FEval& feval = (FEval&)*_feval;
|
FEval& feval = (FEval&)*_feval;
|
||||||
float* cascadeLeaves = &cascade.leaves[0];
|
float* cascadeLeaves = &cascade.leaves[0];
|
||||||
CascadeClassifier::DTreeNode* cascadeNodes = &cascade.nodes[0];
|
CascadeClassifier::DTreeNode* cascadeNodes = &cascade.nodes[0];
|
||||||
CascadeClassifier::Stage* cascadeStages = &cascade.stages[0];
|
CascadeClassifier::Stage* cascadeStages = &cascade.stages[0];
|
||||||
for( si = 0; si < nstages; si++ )
|
|
||||||
|
int nstages = (int)cascade.stages.size();
|
||||||
|
for( int stageIdx = 0; stageIdx < nstages; stageIdx++ )
|
||||||
{
|
{
|
||||||
CascadeClassifier::Stage& stage = cascadeStages[si];
|
CascadeClassifier::Stage& stage = cascadeStages[stageIdx];
|
||||||
int wi, ntrees = stage.ntrees;
|
double sum = 0.0;
|
||||||
double sum = 0;
|
|
||||||
for( wi = 0; wi < ntrees; wi++, nodeOfs++, leafOfs+= 2 )
|
int ntrees = stage.ntrees;
|
||||||
|
for( int i = 0; i < ntrees; i++, nodeOfs++, leafOfs+= 2 )
|
||||||
{
|
{
|
||||||
CascadeClassifier::DTreeNode& node = cascadeNodes[nodeOfs];
|
CascadeClassifier::DTreeNode& node = cascadeNodes[nodeOfs];
|
||||||
double val = feval(node.featureIdx);
|
double value = feval(node.featureIdx);
|
||||||
sum += cascadeLeaves[ val < node.threshold ? leafOfs : leafOfs+1 ];
|
sum += cascadeLeaves[ value < node.threshold ? leafOfs : leafOfs + 1 ];
|
||||||
}
|
}
|
||||||
|
|
||||||
if( sum < stage.threshold )
|
if( sum < stage.threshold )
|
||||||
return -si;
|
return -stageIdx;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -773,7 +777,7 @@ inline int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvalu
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int CascadeClassifier::runAt( Ptr<FeatureEvaluator> &_feval, Point pt )
|
int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& featureEvaluator, Point pt )
|
||||||
{
|
{
|
||||||
CV_Assert( oldCascade.empty() );
|
CV_Assert( oldCascade.empty() );
|
||||||
/*if( !oldCascade.empty() )
|
/*if( !oldCascade.empty() )
|
||||||
@ -781,17 +785,17 @@ int CascadeClassifier::runAt( Ptr<FeatureEvaluator> &_feval, Point pt )
|
|||||||
|
|
||||||
assert(featureType == FeatureEvaluator::HAAR ||
|
assert(featureType == FeatureEvaluator::HAAR ||
|
||||||
featureType == FeatureEvaluator::LBP);
|
featureType == FeatureEvaluator::LBP);
|
||||||
return !_feval->setWindow(pt) ? -1 :
|
|
||||||
is_stump_based ? ( featureType == FeatureEvaluator::HAAR ?
|
return !featureEvaluator->setWindow(pt) ? -1 :
|
||||||
predictOrderedStump<HaarEvaluator>( *this, _feval ) :
|
isStumpBased ? ( featureType == FeatureEvaluator::HAAR ?
|
||||||
predictCategoricalStump<LBPEvaluator>( *this, _feval ) ) :
|
predictOrderedStump<HaarEvaluator>( *this, featureEvaluator ) :
|
||||||
|
predictCategoricalStump<LBPEvaluator>( *this, featureEvaluator ) ) :
|
||||||
( featureType == FeatureEvaluator::HAAR ?
|
( featureType == FeatureEvaluator::HAAR ?
|
||||||
predictOrdered<HaarEvaluator>( *this, _feval ) :
|
predictOrdered<HaarEvaluator>( *this, featureEvaluator ) :
|
||||||
predictCategorical<LBPEvaluator>( *this, _feval ) );
|
predictCategorical<LBPEvaluator>( *this, featureEvaluator ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& featureEvaluator, const Mat& image )
|
||||||
bool CascadeClassifier::setImage( Ptr<FeatureEvaluator> &_feval, const Mat& image )
|
|
||||||
{
|
{
|
||||||
/*if( !oldCascade.empty() )
|
/*if( !oldCascade.empty() )
|
||||||
{
|
{
|
||||||
@ -803,7 +807,7 @@ bool CascadeClassifier::setImage( Ptr<FeatureEvaluator> &_feval, const Mat& imag
|
|||||||
cvSetImagesForHaarClassifierCascade( oldCascade, &_sum, &_sqsum, &_tilted, 1. );
|
cvSetImagesForHaarClassifierCascade( oldCascade, &_sum, &_sqsum, &_tilted, 1. );
|
||||||
return true;
|
return true;
|
||||||
}*/
|
}*/
|
||||||
return empty() ? false : _feval->setImage(image, origWinSize );
|
return empty() ? false : featureEvaluator->setImage(image, origWinSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -811,37 +815,40 @@ struct CascadeClassifierInvoker
|
|||||||
{
|
{
|
||||||
CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor, ConcurrentRectVector& _vec )
|
CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor, ConcurrentRectVector& _vec )
|
||||||
{
|
{
|
||||||
cc = &_cc;
|
classifier = &_cc;
|
||||||
sz1 = _sz1;
|
processingAreaSize = _sz1;
|
||||||
stripSize = _stripSize;
|
stripSize = _stripSize;
|
||||||
yStep = _yStep;
|
yStep = _yStep;
|
||||||
factor = _factor;
|
scalingFactor = _factor;
|
||||||
vec = &_vec;
|
rectangles = &_vec;
|
||||||
}
|
}
|
||||||
|
|
||||||
void operator()(const BlockedRange& range) const
|
void operator()(const BlockedRange& range) const
|
||||||
{
|
{
|
||||||
Ptr<FeatureEvaluator> feval = cc->feval->clone();
|
Ptr<FeatureEvaluator> evaluator = classifier->feval->clone();
|
||||||
int y1 = range.begin()*stripSize, y2 = min(range.end()*stripSize, sz1.height);
|
Size winSize(cvRound(classifier->origWinSize.width * scalingFactor), cvRound(classifier->origWinSize.height * scalingFactor));
|
||||||
Size winSize(cvRound(cc->origWinSize.width*factor), cvRound(cc->origWinSize.height*factor));
|
|
||||||
|
|
||||||
|
int y1 = range.begin() * stripSize;
|
||||||
|
int y2 = min(range.end() * stripSize, processingAreaSize.height);
|
||||||
for( int y = y1; y < y2; y += yStep )
|
for( int y = y1; y < y2; y += yStep )
|
||||||
for( int x = 0; x < sz1.width; x += yStep )
|
|
||||||
{
|
{
|
||||||
int r = cc->runAt(feval, Point(x, y));
|
for( int x = 0; x < processingAreaSize.width; x += yStep )
|
||||||
if( r > 0 )
|
{
|
||||||
vec->push_back(Rect(cvRound(x*factor), cvRound(y*factor),
|
int result = classifier->runAt(evaluator, Point(x, y));
|
||||||
|
if( result > 0 )
|
||||||
|
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor),
|
||||||
winSize.width, winSize.height));
|
winSize.width, winSize.height));
|
||||||
if( r == 0 )
|
if( result == 0 )
|
||||||
x += yStep;
|
x += yStep;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
CascadeClassifier* cc;
|
CascadeClassifier* classifier;
|
||||||
Size sz1;
|
ConcurrentRectVector* rectangles;
|
||||||
|
Size processingAreaSize;
|
||||||
int stripSize, yStep;
|
int stripSize, yStep;
|
||||||
double factor;
|
double scalingFactor;
|
||||||
ConcurrentRectVector* vec;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -849,7 +856,7 @@ struct getRect { Rect operator ()(const CvAvgComp& e) const { return e.rect; } }
|
|||||||
|
|
||||||
void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
|
void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
|
||||||
double scaleFactor, int minNeighbors,
|
double scaleFactor, int minNeighbors,
|
||||||
int flags, Size minSize, Size maxSize )
|
int flags, Size minObjectSize, Size maxObjectSize )
|
||||||
{
|
{
|
||||||
const double GROUP_EPS = 0.2;
|
const double GROUP_EPS = 0.2;
|
||||||
|
|
||||||
@ -863,7 +870,7 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
|||||||
MemStorage storage(cvCreateMemStorage(0));
|
MemStorage storage(cvCreateMemStorage(0));
|
||||||
CvMat _image = image;
|
CvMat _image = image;
|
||||||
CvSeq* _objects = cvHaarDetectObjects( &_image, oldCascade, storage, scaleFactor,
|
CvSeq* _objects = cvHaarDetectObjects( &_image, oldCascade, storage, scaleFactor,
|
||||||
minNeighbors, flags, minSize );
|
minNeighbors, flags, minObjectSize );
|
||||||
vector<CvAvgComp> vecAvgComp;
|
vector<CvAvgComp> vecAvgComp;
|
||||||
Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);
|
Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);
|
||||||
objects.resize(vecAvgComp.size());
|
objects.resize(vecAvgComp.size());
|
||||||
@ -871,57 +878,60 @@ void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& object
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( maxSize.height == 0 || maxSize.width == 0 )
|
|
||||||
maxSize = image.size();
|
|
||||||
|
|
||||||
objects.clear();
|
objects.clear();
|
||||||
|
|
||||||
Mat img = image, imgbuf(image.rows+1, image.cols+1, CV_8U);
|
if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
|
||||||
|
maxObjectSize = image.size();
|
||||||
|
|
||||||
if( img.channels() > 1 )
|
Mat grayImage = image;
|
||||||
|
if( grayImage.channels() > 1 )
|
||||||
{
|
{
|
||||||
Mat temp;
|
Mat temp;
|
||||||
cvtColor(img, temp, CV_BGR2GRAY);
|
cvtColor(grayImage, temp, CV_BGR2GRAY);
|
||||||
img = temp;
|
grayImage = temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentRectVector allCandidates;
|
Mat imageBuffer(image.rows + 1, image.cols + 1, CV_8U);
|
||||||
|
ConcurrentRectVector candidates;
|
||||||
|
|
||||||
for( double factor = 1; ; factor *= scaleFactor )
|
for( double factor = 1; ; factor *= scaleFactor )
|
||||||
{
|
{
|
||||||
int stripCount, stripSize;
|
int stripCount, stripSize;
|
||||||
Size winSize( cvRound(origWinSize.width*factor), cvRound(origWinSize.height*factor) );
|
|
||||||
Size sz( cvRound( img.cols/factor ), cvRound( img.rows/factor ) );
|
|
||||||
Size sz1( sz.width - origWinSize.width, sz.height - origWinSize.height );
|
|
||||||
|
|
||||||
if( sz1.width <= 0 || sz1.height <= 0 )
|
Size windowSize( cvRound(origWinSize.width*factor), cvRound(origWinSize.height*factor) );
|
||||||
|
Size scaledImageSize( cvRound( grayImage.cols/factor ), cvRound( grayImage.rows/factor ) );
|
||||||
|
Size processingAreaSize( scaledImageSize.width - origWinSize.width, scaledImageSize.height - origWinSize.height );
|
||||||
|
|
||||||
|
if( processingAreaSize.width <= 0 || processingAreaSize.height <= 0 )
|
||||||
break;
|
break;
|
||||||
if( winSize.width > maxSize.width || winSize.height > maxSize.height )
|
if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
|
||||||
break;
|
break;
|
||||||
if( winSize.width < minSize.width || winSize.height < minSize.height )
|
if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
int yStep = factor > 2. ? 1 : 2;
|
int yStep = factor > 2. ? 1 : 2;
|
||||||
|
|
||||||
#ifdef HAVE_TBB
|
#ifdef HAVE_TBB
|
||||||
const int PTS_PER_THREAD = 1000;
|
const int PTS_PER_THREAD = 1000;
|
||||||
stripCount = ((sz1.width/yStep)*(sz1.height + yStep-1)/yStep + PTS_PER_THREAD/2)/PTS_PER_THREAD;
|
stripCount = ((processingAreaSize.width/yStep)*(processingAreaSize.height + yStep-1)/yStep + PTS_PER_THREAD/2)/PTS_PER_THREAD;
|
||||||
stripCount = std::min(std::max(stripCount, 1), 100);
|
stripCount = std::min(std::max(stripCount, 1), 100);
|
||||||
stripSize = (((sz1.height + stripCount - 1)/stripCount + yStep-1)/yStep)*yStep;
|
stripSize = (((processingAreaSize.height + stripCount - 1)/stripCount + yStep-1)/yStep)*yStep;
|
||||||
#else
|
#else
|
||||||
stripCount = 1;
|
stripCount = 1;
|
||||||
stripSize = sz1.height;
|
stripSize = processingAreaSize.height;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Mat img1( sz, CV_8U, imgbuf.data );
|
Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
|
||||||
resize( img, img1, sz, 0, 0, CV_INTER_LINEAR );
|
resize( grayImage, scaledImage, scaledImageSize, 0, 0, CV_INTER_LINEAR );
|
||||||
if( !feval->setImage( img1, origWinSize ) )
|
if( !feval->setImage( scaledImage, origWinSize ) )
|
||||||
break;
|
break;
|
||||||
|
|
||||||
parallel_for(BlockedRange(0, stripCount), CascadeClassifierInvoker(*this, sz1, stripSize, yStep, factor, allCandidates));
|
parallel_for(BlockedRange(0, stripCount), CascadeClassifierInvoker(*this, processingAreaSize, stripSize, yStep, factor, candidates));
|
||||||
}
|
}
|
||||||
|
|
||||||
objects.resize(allCandidates.size());
|
objects.resize(candidates.size());
|
||||||
std::copy(allCandidates.begin(), allCandidates.end(), objects.begin());
|
std::copy(candidates.begin(), candidates.end(), objects.begin());
|
||||||
|
|
||||||
groupRectangles( objects, minNeighbors, GROUP_EPS );
|
groupRectangles( objects, minNeighbors, GROUP_EPS );
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -947,7 +957,7 @@ bool CascadeClassifier::read(const FileNode& root)
|
|||||||
origWinSize.height = (int)root[CC_HEIGHT];
|
origWinSize.height = (int)root[CC_HEIGHT];
|
||||||
CV_Assert( origWinSize.height > 0 && origWinSize.width > 0 );
|
CV_Assert( origWinSize.height > 0 && origWinSize.width > 0 );
|
||||||
|
|
||||||
is_stump_based = (int)(root[CC_STAGE_PARAMS][CC_MAX_DEPTH]) == 1 ? true : false;
|
isStumpBased = (int)(root[CC_STAGE_PARAMS][CC_MAX_DEPTH]) == 1 ? true : false;
|
||||||
|
|
||||||
// load feature params
|
// load feature params
|
||||||
FileNode fn = root[CC_FEATURE_PARAMS];
|
FileNode fn = root[CC_FEATURE_PARAMS];
|
||||||
|
@ -112,7 +112,7 @@ CvHidHaarStageClassifier;
|
|||||||
struct CvHidHaarClassifierCascade
|
struct CvHidHaarClassifierCascade
|
||||||
{
|
{
|
||||||
int count;
|
int count;
|
||||||
int is_stump_based;
|
int isStumpBased;
|
||||||
int has_tilted_features;
|
int has_tilted_features;
|
||||||
int is_tree;
|
int is_tree;
|
||||||
double inv_window_area;
|
double inv_window_area;
|
||||||
@ -272,7 +272,7 @@ icvCreateHidHaarClassifierCascade( CvHaarClassifierCascade* cascade )
|
|||||||
haar_classifier_ptr = (CvHidHaarClassifier*)(out->stage_classifier + cascade->count);
|
haar_classifier_ptr = (CvHidHaarClassifier*)(out->stage_classifier + cascade->count);
|
||||||
haar_node_ptr = (CvHidHaarTreeNode*)(haar_classifier_ptr + total_classifiers);
|
haar_node_ptr = (CvHidHaarTreeNode*)(haar_classifier_ptr + total_classifiers);
|
||||||
|
|
||||||
out->is_stump_based = 1;
|
out->isStumpBased = 1;
|
||||||
out->has_tilted_features = has_tilted_features;
|
out->has_tilted_features = has_tilted_features;
|
||||||
out->is_tree = 0;
|
out->is_tree = 0;
|
||||||
|
|
||||||
@ -329,12 +329,12 @@ icvCreateHidHaarClassifierCascade( CvHaarClassifierCascade* cascade )
|
|||||||
haar_node_ptr =
|
haar_node_ptr =
|
||||||
(CvHidHaarTreeNode*)cvAlignPtr(alpha_ptr+node_count+1, sizeof(void*));
|
(CvHidHaarTreeNode*)cvAlignPtr(alpha_ptr+node_count+1, sizeof(void*));
|
||||||
|
|
||||||
out->is_stump_based &= node_count == 1;
|
out->isStumpBased &= node_count == 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_IPP
|
#ifdef HAVE_IPP
|
||||||
int can_use_ipp = !out->has_tilted_features && !out->is_tree && out->is_stump_based;
|
int can_use_ipp = !out->has_tilted_features && !out->is_tree && out->isStumpBased;
|
||||||
|
|
||||||
if( can_use_ipp )
|
if( can_use_ipp )
|
||||||
{
|
{
|
||||||
@ -719,7 +719,7 @@ cvRunHaarClassifierCascade( const CvHaarClassifierCascade* _cascade,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if( cascade->is_stump_based )
|
else if( cascade->isStumpBased )
|
||||||
{
|
{
|
||||||
for( i = start_stage; i < cascade->count; i++ )
|
for( i = start_stage; i < cascade->count; i++ )
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user