fixed various warnings from the “doc” and other builders

This commit is contained in:
Vadim Pisarevsky
2013-12-19 16:31:30 +04:00
parent a1784b7320
commit f41f633d2d
8 changed files with 105 additions and 105 deletions

View File

@@ -113,12 +113,12 @@ struct Logger
namespace cv
{
template<typename _Tp> void copyVectorToUMat(const std::vector<_Tp>& v, UMat& um)
{
if(v.empty())
um.release();
Mat(1, (int)(v.size()*sizeof(v[0])), CV_8U, (void*)&v[0]).copyTo(um);
if(v.empty())
um.release();
Mat(1, (int)(v.size()*sizeof(v[0])), CV_8U, (void*)&v[0]).copyTo(um);
}
void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps, std::vector<int>* weights, std::vector<double>* levelWeights)
@@ -528,24 +528,24 @@ bool HaarEvaluator::setImage( InputArray _image, Size _origWinSize, Size _sumSiz
{
Size imgsz = _image.size();
int cols = imgsz.width, rows = imgsz.height;
if (imgsz.width < origWinSize.width || imgsz.height < origWinSize.height)
return false;
origWinSize = _origWinSize;
normrect = Rect(1, 1, origWinSize.width-2, origWinSize.height-2);
int rn = _sumSize.height, cn = _sumSize.width, rn_scale = hasTiltedFeatures ? 2 : 1;
int sumStep, tofs = 0;
CV_Assert(rn >= rows+1 && cn >= cols+1);
if( _image.isUMat() )
{
usum0.create(rn*rn_scale, cn, CV_32S);
usqsum0.create(rn, cn, CV_32S);
usum = UMat(usum0, Rect(0, 0, cols+1, rows+1));
usqsum = UMat(usqsum0, Rect(0, 0, cols, rows));
if( hasTiltedFeatures )
{
UMat utilted(usum0, Rect(0, _sumSize.height, cols+1, rows+1));
@@ -556,7 +556,7 @@ bool HaarEvaluator::setImage( InputArray _image, Size _origWinSize, Size _sumSiz
{
integral(_image, usum, noArray(), noArray(), CV_32S);
}
sqrBoxFilter(_image, usqsum, CV_32S,
Size(normrect.width, normrect.height),
Point(0, 0), false);
@@ -572,7 +572,7 @@ bool HaarEvaluator::setImage( InputArray _image, Size _origWinSize, Size _sumSiz
sqsum0.create(rn, cn, CV_32S);
sum = sum0(Rect(0, 0, cols+1, rows+1));
sqsum = sqsum0(Rect(0, 0, cols, rows));
if( hasTiltedFeatures )
{
Mat tilted = sum0(Rect(0, _sumSize.height, cols+1, rows+1));
@@ -591,7 +591,7 @@ bool HaarEvaluator::setImage( InputArray _image, Size _origWinSize, Size _sumSiz
size_t fi, nfeatures = features->size();
const std::vector<Feature>& ff = *features;
if( sumSize0 != _sumSize )
{
optfeatures->resize(nfeatures);
@@ -600,13 +600,13 @@ bool HaarEvaluator::setImage( InputArray _image, Size _origWinSize, Size _sumSiz
optfeaturesPtr[fi].setOffsets( ff[fi], sumStep, tofs );
}
if( _image.isUMat() && (sumSize0 != _sumSize || ufbuf.empty()) )
copyVectorToUMat(*optfeatures, ufbuf);
copyVectorToUMat(*optfeatures, ufbuf);
sumSize0 = _sumSize;
return true;
}
bool HaarEvaluator::setWindow( Point pt )
{
if( pt.x < 0 || pt.y < 0 ||
@@ -628,12 +628,12 @@ bool HaarEvaluator::setWindow( Point pt )
return true;
}
Rect HaarEvaluator::getNormRect() const
{
return normrect;
}
void HaarEvaluator::getUMats(std::vector<UMat>& bufs)
{
bufs.clear();
@@ -1100,14 +1100,14 @@ bool CascadeClassifierImpl::detectSingleScale( InputArray _image, Size processin
std::vector<Rect> candidatesVector;
std::vector<int> rejectLevels;
std::vector<double> levelWeights;
int stripCount, stripSize;
const int PTS_PER_THREAD = 1000;
stripCount = ((processingRectSize.width/yStep)*(processingRectSize.height + yStep-1)/yStep + PTS_PER_THREAD/2)/PTS_PER_THREAD;
stripCount = std::min(std::max(stripCount, 1), 100);
stripSize = (((processingRectSize.height + stripCount - 1)/stripCount + yStep-1)/yStep)*yStep;
if( outputRejectLevels )
{
parallel_for_(Range(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor,
@@ -1129,7 +1129,7 @@ bool CascadeClassifierImpl::detectSingleScale( InputArray _image, Size processin
return true;
}
bool CascadeClassifierImpl::ocl_detectSingleScale( InputArray _image, Size processingRectSize,
int yStep, double factor, Size sumSize0 )
{
@@ -1137,9 +1137,9 @@ bool CascadeClassifierImpl::ocl_detectSingleScale( InputArray _image, Size proce
Ptr<HaarEvaluator> haar = featureEvaluator.dynamicCast<HaarEvaluator>();
if( haar.empty() )
return false;
haar->setImage(_image, data.origWinSize, sumSize0);
if( cascadeKernel.empty() )
{
cascadeKernel.create("runHaarClassifierStump", ocl::objdetect::cascadedetect_oclsrc,
@@ -1147,31 +1147,31 @@ bool CascadeClassifierImpl::ocl_detectSingleScale( InputArray _image, Size proce
if( cascadeKernel.empty() )
return false;
}
if( ustages.empty() )
{
copyVectorToUMat(data.stages, ustages);
copyVectorToUMat(data.stumps, ustumps);
copyVectorToUMat(data.stages, ustages);
copyVectorToUMat(data.stumps, ustumps);
}
std::vector<UMat> bufs;
haar->getUMats(bufs);
CV_Assert(bufs.size() == 3);
Rect normrect = haar->getNormRect();
//processingRectSize = Size(yStep, yStep);
size_t globalsize[] = { (processingRectSize.width/yStep + VECTOR_SIZE-1)/VECTOR_SIZE, processingRectSize.height/yStep };
cascadeKernel.args(ocl::KernelArg::ReadOnlyNoSize(bufs[0]), // sum
ocl::KernelArg::ReadOnlyNoSize(bufs[1]), // sqsum
ocl::KernelArg::PtrReadOnly(bufs[2]), // optfeatures
// cascade classifier
(int)data.stages.size(),
ocl::KernelArg::PtrReadOnly(ustages),
ocl::KernelArg::PtrReadOnly(ustumps),
ocl::KernelArg::PtrWriteOnly(ufacepos), // positions
processingRectSize,
yStep, (float)factor,
@@ -1219,7 +1219,7 @@ static void detectMultiScaleOldFormat( const Mat& image, Ptr<CvHaarClassifierCas
std::transform(vecAvgComp.begin(), vecAvgComp.end(), objects.begin(), getRect());
}
void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::vector<Rect>& candidates,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
double scaleFactor, Size minObjectSize, Size maxObjectSize,
@@ -1227,16 +1227,16 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
{
Size imgsz = _image.size();
int imgtype = _image.type();
Mat grayImage, imageBuffer;
candidates.clear();
rejectLevels.clear();
levelWeights.clear();
if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
maxObjectSize = imgsz;
bool use_ocl = ocl::useOpenCL() &&
getFeatureType() == FeatureEvaluator::HAAR &&
!isOldFormatCascade() &&
@@ -1244,7 +1244,7 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
maskGenerator.empty() &&
!outputRejectLevels &&
tryOpenCL;
if( !use_ocl )
{
Mat image = _image.getMat();
@@ -1270,9 +1270,9 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
uimage.copyTo(ugrayImage);
uimageBuffer.create(imgsz.height + 1, imgsz.width + 1, CV_8U);
}
Size sumSize0((imgsz.width + SUM_ALIGN) & -SUM_ALIGN, imgsz.height+1);
if( use_ocl )
{
ufacepos.create(1, MAX_FACES*4 + 1, CV_32S);
@@ -1295,7 +1295,7 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
break;
if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
continue;
int yStep;
if( getFeatureType() == cv::FeatureEvaluator::HOG )
{
@@ -1310,14 +1310,14 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
{
UMat uscaledImage(uimageBuffer, Rect(0, 0, scaledImageSize.width, scaledImageSize.height));
resize( ugrayImage, uscaledImage, scaledImageSize, 0, 0, INTER_LINEAR );
if( ocl_detectSingleScale( uscaledImage, processingRectSize, yStep, factor, sumSize0 ) )
continue;
/////// if the OpenCL branch has been executed but failed, fall back to CPU: /////
tryOpenCL = false; // for this cascade do not try OpenCL anymore
// since we may already have some partial results from OpenCL code (unlikely, but still),
// we just recursively call the function again, but with tryOpenCL==false it will
// go with CPU route, so there is no infinite recursion
@@ -1330,13 +1330,13 @@ void CascadeClassifierImpl::detectMultiScaleNoGrouping( InputArray _image, std::
{
Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
resize( grayImage, scaledImage, scaledImageSize, 0, 0, INTER_LINEAR );
if( !detectSingleScale( scaledImage, processingRectSize, yStep, factor, candidates,
rejectLevels, levelWeights, sumSize0, outputRejectLevels ) )
break;
}
}
if( use_ocl && tryOpenCL )
{
Mat facepos = ufacepos.getMat(ACCESS_READ);
@@ -1424,12 +1424,12 @@ void CascadeClassifierImpl::detectMultiScale( InputArray _image, std::vector<Rec
}
}
CascadeClassifierImpl::Data::Data()
{
stageType = featureType = ncategories = maxNodesPerTree = 0;
}
bool CascadeClassifierImpl::Data::read(const FileNode &root)
{
static const float THRESHOLD_EPS = 1e-5f;
@@ -1503,7 +1503,7 @@ bool CascadeClassifierImpl::Data::read(const FileNode &root)
DTree tree;
tree.nodeCount = (int)internalNodes.size()/nodeStep;
maxNodesPerTree = std::max(maxNodesPerTree, tree.nodeCount);
classifiers.push_back(tree);
nodes.reserve(nodes.size() + tree.nodeCount);
@@ -1538,7 +1538,7 @@ bool CascadeClassifierImpl::Data::read(const FileNode &root)
leaves.push_back((float)*internalNodesIter);
}
}
if( isStumpBased() )
{
int nodeOfs = 0, leafOfs = 0;
@@ -1546,7 +1546,7 @@ bool CascadeClassifierImpl::Data::read(const FileNode &root)
for( size_t stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
const Stage& stage = stages[stageIdx];
int ntrees = stage.ntrees;
for( int i = 0; i < ntrees; i++, nodeOfs++, leafOfs+= 2 )
{
@@ -1560,7 +1560,7 @@ bool CascadeClassifierImpl::Data::read(const FileNode &root)
return true;
}
bool CascadeClassifierImpl::read_(const FileNode& root)
{
tryOpenCL = true;

View File

@@ -49,14 +49,14 @@ public:
protected:
enum { SUM_ALIGN = 64 };
bool detectSingleScale( InputArray image, Size processingRectSize,
int yStep, double factor, std::vector<Rect>& candidates,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
Size sumSize0, bool outputRejectLevels = false );
bool ocl_detectSingleScale( InputArray image, Size processingRectSize,
int yStep, double factor, Size sumSize0 );
void detectMultiScaleNoGrouping( InputArray image, std::vector<Rect>& candidates,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
@@ -109,19 +109,19 @@ protected:
int ntrees;
float threshold;
};
struct Stump
{
Stump() {};
Stump(int _featureIdx, float _threshold, float _left, float _right)
: featureIdx(_featureIdx), threshold(_threshold), left(_left), right(_right) {}
int featureIdx;
float threshold;
float left;
float right;
};
Data();
bool read(const FileNode &node);
@@ -151,7 +151,7 @@ protected:
UMat ufacepos, ustages, ustumps, usubsets;
ocl::Kernel cascadeKernel;
bool tryOpenCL;
Mutex mtx;
};
@@ -240,7 +240,7 @@ protected:
#define CALC_SUM_OFS_(p0, p1, p2, p3, ptr) \
((ptr)[p0] - (ptr)[p1] - (ptr)[p2] + (ptr)[p3])
#define CALC_SUM_OFS(rect, ptr) CALC_SUM_OFS_((rect)[0], (rect)[1], (rect)[2], (rect)[3], ptr)
//---------------------------------------------- HaarEvaluator ---------------------------------------
@@ -343,7 +343,7 @@ inline void HaarEvaluator::OptFeature :: setOffsets( const Feature& _f, int step
weight[0] = _f.rect[0].weight;
weight[1] = _f.rect[1].weight;
weight[2] = _f.rect[2].weight;
Rect r2 = weight[2] > 0 ? _f.rect[2].r : Rect(0,0,0,0);
if (_f.tilted)
{
@@ -615,7 +615,7 @@ inline int predictOrderedStump( CascadeClassifierImpl& cascade,
int nstages = (int)cascade.data.stages.size();
double tmp = 0;
for( int stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
const CascadeClassifierImpl::Data::Stage& stage = cascadeStages[stageIdx];
@@ -677,7 +677,7 @@ inline int predictCategoricalStump( CascadeClassifierImpl& cascade,
sum = (double)tmp;
return -si;
}
cascadeStumps += ntrees;
cascadeSubsets += ntrees*subsetSize;
}

View File

@@ -121,16 +121,15 @@ static bool convert(const String& oldcascade, const String& newcascade)
FileNode sznode = oldroot[ICV_HAAR_SIZE_NAME];
if( sznode.empty() )
return false;
int maxdepth = 0;
Size cascadesize;
cascadesize.width = (int)sznode[0];
cascadesize.height = (int)sznode[1];
std::vector<HaarFeature> features;
size_t i, j, k, n;
int i, j, k, n;
FileNode stages_seq = oldroot[ICV_HAAR_STAGES_NAME];
size_t nstages = stages_seq.size();
int nstages = (int)stages_seq.size();
std::vector<HaarStageClassifier> stages(nstages);
for( i = 0; i < nstages; i++ )
@@ -139,14 +138,14 @@ static bool convert(const String& oldcascade, const String& newcascade)
HaarStageClassifier& stage = stages[i];
stage.threshold = (double)stagenode[ICV_HAAR_STAGE_THRESHOLD_NAME];
FileNode weaks_seq = stagenode[ICV_HAAR_TREES_NAME];
size_t nweaks = weaks_seq.size();
int nweaks = (int)weaks_seq.size();
stage.weaks.resize(nweaks);
for( j = 0; j < nweaks; j++ )
{
HaarClassifier& weak = stage.weaks[j];
FileNode weaknode = weaks_seq[j];
size_t nnodes = weaknode.size();
int nnodes = (int)weaknode.size();
for( n = 0; n < nnodes; n++ )
{
@@ -157,7 +156,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
node.f = (int)features.size();
f.tilted = (int)fnode[ICV_HAAR_TILTED_NAME] != 0;
FileNode rects_seq = fnode[ICV_HAAR_RECTS_NAME];
size_t nrects = rects_seq.size();
int nrects = (int)rects_seq.size();
for( k = 0; k < nrects; k++ )
{
@@ -199,9 +198,9 @@ static bool convert(const String& oldcascade, const String& newcascade)
if( !newfs.isOpened() )
return false;
size_t maxWeakCount = 0, nfeatures = features.size();
int maxWeakCount = 0, nfeatures = (int)features.size();
for( i = 0; i < nstages; i++ )
maxWeakCount = std::max(maxWeakCount, stages[i].weaks.size());
maxWeakCount = std::max(maxWeakCount, (int)stages[i].weaks.size());
newfs << "cascade" << "{:opencv-cascade-classifier"
<< "stageType" << "BOOST"
@@ -219,7 +218,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
for( i = 0; i < nstages; i++ )
{
size_t nweaks = stages[i].weaks.size();
int nweaks = (int)stages[i].weaks.size();
newfs << "{" << "maxWeakCount" << (int)nweaks
<< "stageThreshold" << stages[i].threshold
<< "weakClassifiers" << "[";
@@ -227,7 +226,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
{
const HaarClassifier& c = stages[i].weaks[j];
newfs << "{" << "internalNodes" << "[";
size_t nnodes = c.nodes.size(), nleaves = c.leaves.size();
int nnodes = (int)c.nodes.size(), nleaves = (int)c.leaves.size();
for( k = 0; k < nnodes; k++ )
newfs << c.nodes[k].left << c.nodes[k].right
<< c.nodes[k].f << c.nodes[k].threshold;
@@ -246,7 +245,7 @@ static bool convert(const String& oldcascade, const String& newcascade)
{
const HaarFeature& f = features[i];
newfs << "{" << "rects" << "[";
for( j = 0; j < (size_t)HaarFeature::RECT_NUM; j++ )
for( j = 0; j < HaarFeature::RECT_NUM; j++ )
{
if( j >= 2 && fabs(f.rect[j].weight) < FLT_EPSILON )
break;

View File

@@ -44,7 +44,7 @@ __kernel void runHaarClassifierStump(
int iy = get_global_id(1)*xyscale;
sumstep /= sizeof(int);
sqsumstep /= sizeof(int);
if( ix < imgsize.x && iy < imgsize.y )
{
int ntrees;
@@ -52,7 +52,7 @@ __kernel void runHaarClassifierStump(
float s = 0.f;
__global const Stump* stump = stumps;
__global const OptFeature* f;
__global const int* psum = sum + mad24(iy, sumstep, ix);
__global const int* pnsum = psum + mad24(normrect.y, sumstep, normrect.x);
int normarea = normrect.z * normrect.w;
@@ -64,7 +64,7 @@ __kernel void runHaarClassifierStump(
float4 weight, vsval;
int4 ofs, ofs0, ofs1, ofs2;
nf = nf > 0 ? nf : 1.f;
for( stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
ntrees = stages[stageIdx].ntrees;
@@ -73,7 +73,7 @@ __kernel void runHaarClassifierStump(
{
f = optfeatures + stump->featureIdx;
weight = f->weight;
ofs = f->ofs[0];
sval = (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.x;
ofs = f->ofs[1];
@@ -83,14 +83,14 @@ __kernel void runHaarClassifierStump(
ofs = f->ofs[2];
sval += (psum[ofs.x] - psum[ofs.y] - psum[ofs.z] + psum[ofs.w])*weight.z;
}
s += (sval < stump->threshold*nf) ? stump->left : stump->right;
}
if( s < stages[stageIdx].threshold )
break;
}
if( stageIdx == nstages )
{
int nfaces = atomic_inc(facepos);
@@ -128,7 +128,7 @@ __kernel void runLBPClassifierStump(
int iy = get_global_id(1)*xyscale;
sumstep /= sizeof(int);
sqsumstep /= sizeof(int);
if( ix < imgsize.x && iy < imgsize.y )
{
int ntrees;
@@ -137,7 +137,7 @@ __kernel void runLBPClassifierStump(
__global const Stump* stump = stumps;
__global const int* bitset = bitsets;
__global const OptFeature* f;
__global const int* psum = sum + mad24(iy, sumstep, ix);
__global const int* pnsum = psum + mad24(normrect.y, sumstep, normrect.x);
int normarea = normrect.z * normrect.w;
@@ -149,7 +149,7 @@ __kernel void runLBPClassifierStump(
float4 weight;
int4 ofs;
nf = nf > 0 ? nf : 1.f;
for( stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
ntrees = stages[stageIdx].ntrees;
@@ -157,17 +157,17 @@ __kernel void runLBPClassifierStump(
for( i = 0; i < ntrees; i++, stump++, bitset += bitsetSize )
{
f = optfeatures + stump->featureIdx;
weight = f->weight;
// compute LBP feature to val
s += (bitset[val >> 5] & (1 << (val & 31))) ? stump->left : stump->right;
}
if( s < stages[stageIdx].threshold )
break;
}
if( stageIdx == nstages )
{
int nfaces = atomic_inc(facepos);
@@ -183,4 +183,3 @@ __kernel void runLBPClassifierStump(
}
}
#endif