diff --git a/modules/gpu/src/cascadeclassifier.cpp b/modules/gpu/src/cascadeclassifier.cpp index 61f5c9431..1f277f0cf 100644 --- a/modules/gpu/src/cascadeclassifier.cpp +++ b/modules/gpu/src/cascadeclassifier.cpp @@ -1,51 +1,51 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other GpuMaterials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or bpied warranties, including, but not limited to, the bpied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "precomp.hpp" -#include -#include - -using namespace cv; -using namespace cv::gpu; +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other GpuMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or bpied warranties, including, but not limited to, the bpied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include +#include + +using namespace cv; +using namespace cv::gpu; using namespace std; #if !defined (HAVE_CUDA) @@ -94,219 +94,221 @@ public: /*out*/unsigned int& numDetections) { calculateMemReqsAndAllocate(src.size()); - - NCVMemPtr src_beg; - src_beg.ptr = (void*)src.ptr(); - src_beg.memtype = NCVMemoryTypeDevice; - - NCVMemSegment src_seg; - src_seg.begin = src_beg; - src_seg.size = src.step * src.rows; - - NCVMatrixReuse d_src(src_seg, static_cast(devProp.textureAlignment), src.cols, src.rows, static_cast(src.step), true); - ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); - - CV_Assert(objects.rows == 1); - - NCVMemPtr objects_beg; - objects_beg.ptr = (void*)objects.ptr(); - objects_beg.memtype = NCVMemoryTypeDevice; - - NCVMemSegment objects_seg; - objects_seg.begin = objects_beg; - objects_seg.size = objects.step * objects.rows; - NCVVectorReuse d_rects(objects_seg, objects.cols); - ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); - - NcvSize32u roi; - roi.width = d_src.width(); - roi.height = d_src.height(); - + + NCVMemPtr src_beg; + src_beg.ptr = (void*)src.ptr(); + src_beg.memtype = NCVMemoryTypeDevice; + + NCVMemSegment src_seg; + src_seg.begin = src_beg; + src_seg.size = src.step * src.rows; + + NCVMatrixReuse d_src(src_seg, static_cast(devProp.textureAlignment), src.cols, src.rows, static_cast(src.step), true); + ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); + + CV_Assert(objects.rows == 1); + + NCVMemPtr objects_beg; + objects_beg.ptr = (void*)objects.ptr(); + objects_beg.memtype = NCVMemoryTypeDevice; + + NCVMemSegment objects_seg; + objects_seg.begin = objects_beg; + objects_seg.size = objects.step * objects.rows; + NCVVectorReuse d_rects(objects_seg, objects.cols); + ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); + + NcvSize32u roi; + roi.width = d_src.width(); + roi.height = d_src.height(); + NcvSize32u winMinSize(ncvMinSize.width, ncvMinSize.height); - Ncv32u flags = 0; - flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0; - flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0; - - ncvStat = ncvDetectObjectsMultiScale_device( - d_src, roi, d_rects, numDetections, haar, *h_haarStages, - *d_haarStages, *d_haarNodes, *d_haarFeatures, + Ncv32u flags = 0; + flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0; + flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0; + + ncvStat = ncvDetectObjectsMultiScale_device( + d_src, roi, d_rects, numDetections, haar, *h_haarStages, + *d_haarStages, *d_haarNodes, *d_haarFeatures, winMinSize, - minNeighbors, - scaleStep, 1, - flags, - *gpuAllocator, *cpuAllocator, devProp, 0); - ncvAssertReturnNcvStat(ncvStat); - ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); - - return NCV_SUCCESS; - } - + minNeighbors, + scaleStep, 1, + flags, + *gpuAllocator, *cpuAllocator, devProp, 0); + ncvAssertReturnNcvStat(ncvStat); + ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); + + return NCV_SUCCESS; + } + unsigned int process(const GpuMat& image, GpuMat& objectsBuf, float scaleFactor, int minNeighbors, bool findLargestObject, bool visualizeInPlace, cv::Size minSize, cv::Size maxObjectSize) { CV_Assert( scaleFactor > 1 && image.depth() == CV_8U); - + const int defaultObjSearchNum = 100; if (objectsBuf.empty()) { objectsBuf.create(1, defaultObjSearchNum, DataType::type); } - + cv::Size ncvMinSize = this->getClassifierCvSize(); - + if (ncvMinSize.width < (unsigned)minSize.width && ncvMinSize.height < (unsigned)minSize.height) { ncvMinSize.width = minSize.width; ncvMinSize.height = minSize.height; } - + unsigned int numDetections; ncvSafeCall(this->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, ncvMinSize, numDetections)); - + return numDetections; } cv::Size getClassifierCvSize() const { return cv::Size(haar.ClassifierSize.width, haar.ClassifierSize.height); } - + private: static void NCVDebugOutputHandler(const std::string &msg) { CV_Error(CV_GpuApiCallError, msg.c_str()); } - - NCVStatus load(const string& classifierFile) - { - int devId = cv::gpu::getDevice(); - ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR); - - // Load the classifier from file (assuming its size is about 1 mb) using a simple allocator - gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast(devProp.textureAlignment)); - cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast(devProp.textureAlignment)); - - ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR); - ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR); - - Ncv32u haarNumStages, haarNumNodes, haarNumFeatures; - ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures); - ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR); - - h_haarStages = new NCVVectorAlloc(*cpuCascadeAllocator, haarNumStages); - h_haarNodes = new NCVVectorAlloc(*cpuCascadeAllocator, haarNumNodes); - h_haarFeatures = new NCVVectorAlloc(*cpuCascadeAllocator, haarNumFeatures); - - ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR); - ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR); - ncvAssertPrintReturn(h_haarFeatures->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR); - - ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures); - ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR); - - d_haarStages = new NCVVectorAlloc(*gpuCascadeAllocator, haarNumStages); - d_haarNodes = new NCVVectorAlloc(*gpuCascadeAllocator, haarNumNodes); - d_haarFeatures = new NCVVectorAlloc(*gpuCascadeAllocator, haarNumFeatures); - - ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR); - ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR); - ncvAssertPrintReturn(d_haarFeatures->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR); - - ncvStat = h_haarStages->copySolid(*d_haarStages, 0); - ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR); - ncvStat = h_haarNodes->copySolid(*d_haarNodes, 0); - ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR); - ncvStat = h_haarFeatures->copySolid(*d_haarFeatures, 0); - ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR); - - return NCV_SUCCESS; - } - - NCVStatus calculateMemReqsAndAllocate(const Size& frameSize) - { - if (lastAllocatedFrameSize == frameSize) - { - return NCV_SUCCESS; - } - - // Calculate memory requirements and create real allocators - NCVMemStackAllocator gpuCounter(static_cast(devProp.textureAlignment)); - NCVMemStackAllocator cpuCounter(static_cast(devProp.textureAlignment)); - - ncvAssertPrintReturn(gpuCounter.isInitialized(), "Error creating GPU memory counter", NCV_CUDA_ERROR); - ncvAssertPrintReturn(cpuCounter.isInitialized(), "Error creating CPU memory counter", NCV_CUDA_ERROR); - - NCVMatrixAlloc d_src(gpuCounter, frameSize.width, frameSize.height); - NCVMatrixAlloc h_src(cpuCounter, frameSize.width, frameSize.height); - - ncvAssertReturn(d_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); - ncvAssertReturn(h_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); - - NCVVectorAlloc d_rects(gpuCounter, 100); - ncvAssertReturn(d_rects.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); - - NcvSize32u roi; - roi.width = d_src.width(); - roi.height = d_src.height(); - Ncv32u numDetections; - ncvStat = ncvDetectObjectsMultiScale_device(d_src, roi, d_rects, numDetections, haar, *h_haarStages, - *d_haarStages, *d_haarNodes, *d_haarFeatures, haar.ClassifierSize, 4, 1.2f, 1, 0, gpuCounter, cpuCounter, devProp, 0); - - ncvAssertReturnNcvStat(ncvStat); - ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); - - gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast(devProp.textureAlignment)); - cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast(devProp.textureAlignment)); - - ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR); - ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR); - return NCV_SUCCESS; - } - - cudaDeviceProp devProp; - NCVStatus ncvStat; - - Ptr gpuCascadeAllocator; - Ptr cpuCascadeAllocator; - - Ptr > h_haarStages; - Ptr > h_haarNodes; - Ptr > h_haarFeatures; - - HaarClassifierCascadeDescriptor haar; - - Ptr > d_haarStages; - Ptr > d_haarNodes; - Ptr > d_haarFeatures; - - Size lastAllocatedFrameSize; - - Ptr gpuAllocator; - Ptr cpuAllocator; + + NCVStatus load(const string& classifierFile) + { + int devId = cv::gpu::getDevice(); + ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR); + + // Load the classifier from file (assuming its size is about 1 mb) using a simple allocator + gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast(devProp.textureAlignment)); + cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast(devProp.textureAlignment)); + + ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR); + ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR); + + Ncv32u haarNumStages, haarNumNodes, haarNumFeatures; + ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures); + ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR); + + h_haarStages = new NCVVectorAlloc(*cpuCascadeAllocator, haarNumStages); + h_haarNodes = new NCVVectorAlloc(*cpuCascadeAllocator, haarNumNodes); + h_haarFeatures = new NCVVectorAlloc(*cpuCascadeAllocator, haarNumFeatures); + + ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR); + ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR); + ncvAssertPrintReturn(h_haarFeatures->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR); + + ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures); + ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR); + + d_haarStages = new NCVVectorAlloc(*gpuCascadeAllocator, haarNumStages); + d_haarNodes = new NCVVectorAlloc(*gpuCascadeAllocator, haarNumNodes); + d_haarFeatures = new NCVVectorAlloc(*gpuCascadeAllocator, haarNumFeatures); + + ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR); + ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR); + ncvAssertPrintReturn(d_haarFeatures->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR); + + ncvStat = h_haarStages->copySolid(*d_haarStages, 0); + ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR); + ncvStat = h_haarNodes->copySolid(*d_haarNodes, 0); + ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR); + ncvStat = h_haarFeatures->copySolid(*d_haarFeatures, 0); + ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR); + + return NCV_SUCCESS; + } + + NCVStatus calculateMemReqsAndAllocate(const Size& frameSize) + { + if (lastAllocatedFrameSize == frameSize) + { + return NCV_SUCCESS; + } + + // Calculate memory requirements and create real allocators + NCVMemStackAllocator gpuCounter(static_cast(devProp.textureAlignment)); + NCVMemStackAllocator cpuCounter(static_cast(devProp.textureAlignment)); + + ncvAssertPrintReturn(gpuCounter.isInitialized(), "Error creating GPU memory counter", NCV_CUDA_ERROR); + ncvAssertPrintReturn(cpuCounter.isInitialized(), "Error creating CPU memory counter", NCV_CUDA_ERROR); + + NCVMatrixAlloc d_src(gpuCounter, frameSize.width, frameSize.height); + NCVMatrixAlloc h_src(cpuCounter, frameSize.width, frameSize.height); + + ncvAssertReturn(d_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); + ncvAssertReturn(h_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); + + NCVVectorAlloc d_rects(gpuCounter, 100); + ncvAssertReturn(d_rects.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); + + NcvSize32u roi; + roi.width = d_src.width(); + roi.height = d_src.height(); + Ncv32u numDetections; + ncvStat = ncvDetectObjectsMultiScale_device(d_src, roi, d_rects, numDetections, haar, *h_haarStages, + *d_haarStages, *d_haarNodes, *d_haarFeatures, haar.ClassifierSize, 4, 1.2f, 1, 0, gpuCounter, cpuCounter, devProp, 0); + + ncvAssertReturnNcvStat(ncvStat); + ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); + + gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast(devProp.textureAlignment)); + cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast(devProp.textureAlignment)); + + ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR); + ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR); + + lastAllocatedFrameSize = frameSize; + return NCV_SUCCESS; + } + + cudaDeviceProp devProp; + NCVStatus ncvStat; + + Ptr gpuCascadeAllocator; + Ptr cpuCascadeAllocator; + + Ptr > h_haarStages; + Ptr > h_haarNodes; + Ptr > h_haarFeatures; + + HaarClassifierCascadeDescriptor haar; + + Ptr > d_haarStages; + Ptr > d_haarNodes; + Ptr > d_haarFeatures; + + Size lastAllocatedFrameSize; + + Ptr gpuAllocator; + Ptr cpuAllocator; virtual ~HaarCascade(){} -}; - +}; + cv::Size operator -(const cv::Size& a, const cv::Size& b) { return cv::Size(a.width - b.width, a.height - b.height); } - + cv::Size operator +(const cv::Size& a, const int& i) { return cv::Size(a.width + i, a.height + i); } - + cv::Size operator *(const cv::Size& a, const float& f) { return cv::Size(cvRound(a.width * f), cvRound(a.height * f)); } - + cv::Size operator /(const cv::Size& a, const float& f) -{ +{ return cv::Size(cvRound(a.width / f), cvRound(a.height / f)); } bool operator <=(const cv::Size& a, const cv::Size& b) { return a.width <= b.width && a.height <= b.width; -} - +} + struct PyrLavel { PyrLavel(int _order, float _scale, cv::Size frame, cv::Size window, cv::Size minObjectSize) @@ -669,18 +671,18 @@ cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); } void cv::gpu::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } } bool cv::gpu::CascadeClassifier_GPU::empty() const { return impl == 0; } - -Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const -{ - return this->empty() ? Size() : impl->getClassifierCvSize(); -} - -int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize) -{ - CV_Assert( !this->empty()); + +Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const +{ + return this->empty() ? Size() : impl->getClassifierCvSize(); +} + +int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize) +{ + CV_Assert( !this->empty()); return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, cv::Size()); } - + int cv::gpu::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors) { CV_Assert( !this->empty()); @@ -695,261 +697,261 @@ bool cv::gpu::CascadeClassifier_GPU::load(const string& filename) std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); if (fext == "nvbin") - { + { impl = new HaarCascade(); return impl->read(filename); - } - + } + FileStorage fs(filename, FileStorage::READ); - + if (!fs.isOpened()) - { + { impl = new HaarCascade(); return impl->read(filename); - } - + } + const char *GPU_CC_LBP = "LBP"; string featureTypeStr = (string)fs.getFirstTopLevelNode()["featureType"]; if (featureTypeStr == GPU_CC_LBP) impl = new LbpCascade(); else impl = new HaarCascade(); - + impl->read(filename); return !this->empty(); -} - +} + ////////////////////////////////////////////////////////////////////////////////////////////////////// - -struct RectConvert -{ - Rect operator()(const NcvRect32u& nr) const { return Rect(nr.x, nr.y, nr.width, nr.height); } - NcvRect32u operator()(const Rect& nr) const - { - NcvRect32u rect; - rect.x = nr.x; - rect.y = nr.y; - rect.width = nr.width; - rect.height = nr.height; - return rect; - } -}; - -void groupRectangles(std::vector &hypotheses, int groupThreshold, double eps, std::vector *weights) -{ - vector rects(hypotheses.size()); - std::transform(hypotheses.begin(), hypotheses.end(), rects.begin(), RectConvert()); - - if (weights) - { - vector weights_int; - weights_int.assign(weights->begin(), weights->end()); - cv::groupRectangles(rects, weights_int, groupThreshold, eps); - } - else - { - cv::groupRectangles(rects, groupThreshold, eps); - } - std::transform(rects.begin(), rects.end(), hypotheses.begin(), RectConvert()); - hypotheses.resize(rects.size()); -} - -NCVStatus loadFromXML(const std::string &filename, - HaarClassifierCascadeDescriptor &haar, - std::vector &haarStages, - std::vector &haarClassifierNodes, - std::vector &haarFeatures) -{ - NCVStatus ncvStat; - - haar.NumStages = 0; - haar.NumClassifierRootNodes = 0; - haar.NumClassifierTotalNodes = 0; - haar.NumFeatures = 0; - haar.ClassifierSize.width = 0; - haar.ClassifierSize.height = 0; - haar.bHasStumpsOnly = true; - haar.bNeedsTiltedII = false; - Ncv32u curMaxTreeDepth; - - std::vector xmlFileCont; - - std::vector h_TmpClassifierNotRootNodes; - haarStages.resize(0); - haarClassifierNodes.resize(0); - haarFeatures.resize(0); - - Ptr oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0); - if (oldCascade.empty()) - { - return NCV_HAAR_XML_LOADING_EXCEPTION; - } - - haar.ClassifierSize.width = oldCascade->orig_window_size.width; - haar.ClassifierSize.height = oldCascade->orig_window_size.height; - - int stagesCound = oldCascade->count; - for(int s = 0; s < stagesCound; ++s) // by stages - { - HaarStage64 curStage; - curStage.setStartClassifierRootNodeOffset(static_cast(haarClassifierNodes.size())); - - curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold); - - int treesCount = oldCascade->stage_classifier[s].count; - for(int t = 0; t < treesCount; ++t) // by trees - { - Ncv32u nodeId = 0; - CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t]; - - int nodesCount = tree->count; - for(int n = 0; n < nodesCount; ++n) //by features - { - CvHaarFeature* feature = &tree->haar_feature[n]; - - HaarClassifierNode128 curNode; - curNode.setThreshold(tree->threshold[n]); - - NcvBool bIsLeftNodeLeaf = false; - NcvBool bIsRightNodeLeaf = false; - - HaarClassifierNodeDescriptor32 nodeLeft; - if ( tree->left[n] <= 0 ) - { - Ncv32f leftVal = tree->alpha[-tree->left[n]]; - ncvStat = nodeLeft.create(leftVal); - ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); - bIsLeftNodeLeaf = true; - } - else - { - Ncv32u leftNodeOffset = tree->left[n]; - nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1)); - haar.bHasStumpsOnly = false; - } - curNode.setLeftNodeDesc(nodeLeft); - - HaarClassifierNodeDescriptor32 nodeRight; - if ( tree->right[n] <= 0 ) - { - Ncv32f rightVal = tree->alpha[-tree->right[n]]; - ncvStat = nodeRight.create(rightVal); - ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); - bIsRightNodeLeaf = true; - } - else - { - Ncv32u rightNodeOffset = tree->right[n]; - nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1)); - haar.bHasStumpsOnly = false; - } - curNode.setRightNodeDesc(nodeRight); - - Ncv32u tiltedVal = feature->tilted; - haar.bNeedsTiltedII = (tiltedVal != 0); - - Ncv32u featureId = 0; - for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects - { - Ncv32u rectX = feature->rect[l].r.x; - Ncv32u rectY = feature->rect[l].r.y; - Ncv32u rectWidth = feature->rect[l].r.width; - Ncv32u rectHeight = feature->rect[l].r.height; - - Ncv32f rectWeight = feature->rect[l].weight; - - if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/) - break; - - HaarFeature64 curFeature; - ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height); - curFeature.setWeight(rectWeight); - ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); - haarFeatures.push_back(curFeature); - - featureId++; - } - - HaarFeatureDescriptor32 tmpFeatureDesc; - ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf, - featureId, static_cast(haarFeatures.size()) - featureId); - ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); - curNode.setFeatureDesc(tmpFeatureDesc); - - if (!nodeId) - { - //root node - haarClassifierNodes.push_back(curNode); - curMaxTreeDepth = 1; - } - else - { - //other node - h_TmpClassifierNotRootNodes.push_back(curNode); - curMaxTreeDepth++; - } - - nodeId++; - } - } - - curStage.setNumClassifierRootNodes(treesCount); - haarStages.push_back(curStage); - } - - //fill in cascade stats - haar.NumStages = static_cast(haarStages.size()); - haar.NumClassifierRootNodes = static_cast(haarClassifierNodes.size()); - haar.NumClassifierTotalNodes = static_cast(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size()); - haar.NumFeatures = static_cast(haarFeatures.size()); - - //merge root and leaf nodes in one classifiers array - Ncv32u offsetRoot = static_cast(haarClassifierNodes.size()); - for (Ncv32u i=0; i &hypotheses, int groupThreshold, double eps, std::vector *weights) +{ + vector rects(hypotheses.size()); + std::transform(hypotheses.begin(), hypotheses.end(), rects.begin(), RectConvert()); + + if (weights) + { + vector weights_int; + weights_int.assign(weights->begin(), weights->end()); + cv::groupRectangles(rects, weights_int, groupThreshold, eps); + } + else + { + cv::groupRectangles(rects, groupThreshold, eps); + } + std::transform(rects.begin(), rects.end(), hypotheses.begin(), RectConvert()); + hypotheses.resize(rects.size()); +} + +NCVStatus loadFromXML(const std::string &filename, + HaarClassifierCascadeDescriptor &haar, + std::vector &haarStages, + std::vector &haarClassifierNodes, + std::vector &haarFeatures) +{ + NCVStatus ncvStat; + + haar.NumStages = 0; + haar.NumClassifierRootNodes = 0; + haar.NumClassifierTotalNodes = 0; + haar.NumFeatures = 0; + haar.ClassifierSize.width = 0; + haar.ClassifierSize.height = 0; + haar.bHasStumpsOnly = true; + haar.bNeedsTiltedII = false; + Ncv32u curMaxTreeDepth; + + std::vector xmlFileCont; + + std::vector h_TmpClassifierNotRootNodes; + haarStages.resize(0); + haarClassifierNodes.resize(0); + haarFeatures.resize(0); + + Ptr oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0); + if (oldCascade.empty()) + { + return NCV_HAAR_XML_LOADING_EXCEPTION; + } + + haar.ClassifierSize.width = oldCascade->orig_window_size.width; + haar.ClassifierSize.height = oldCascade->orig_window_size.height; + + int stagesCound = oldCascade->count; + for(int s = 0; s < stagesCound; ++s) // by stages + { + HaarStage64 curStage; + curStage.setStartClassifierRootNodeOffset(static_cast(haarClassifierNodes.size())); + + curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold); + + int treesCount = oldCascade->stage_classifier[s].count; + for(int t = 0; t < treesCount; ++t) // by trees + { + Ncv32u nodeId = 0; + CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t]; + + int nodesCount = tree->count; + for(int n = 0; n < nodesCount; ++n) //by features + { + CvHaarFeature* feature = &tree->haar_feature[n]; + + HaarClassifierNode128 curNode; + curNode.setThreshold(tree->threshold[n]); + + NcvBool bIsLeftNodeLeaf = false; + NcvBool bIsRightNodeLeaf = false; + + HaarClassifierNodeDescriptor32 nodeLeft; + if ( tree->left[n] <= 0 ) + { + Ncv32f leftVal = tree->alpha[-tree->left[n]]; + ncvStat = nodeLeft.create(leftVal); + ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); + bIsLeftNodeLeaf = true; + } + else + { + Ncv32u leftNodeOffset = tree->left[n]; + nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1)); + haar.bHasStumpsOnly = false; + } + curNode.setLeftNodeDesc(nodeLeft); + + HaarClassifierNodeDescriptor32 nodeRight; + if ( tree->right[n] <= 0 ) + { + Ncv32f rightVal = tree->alpha[-tree->right[n]]; + ncvStat = nodeRight.create(rightVal); + ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); + bIsRightNodeLeaf = true; + } + else + { + Ncv32u rightNodeOffset = tree->right[n]; + nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1)); + haar.bHasStumpsOnly = false; + } + curNode.setRightNodeDesc(nodeRight); + + Ncv32u tiltedVal = feature->tilted; + haar.bNeedsTiltedII = (tiltedVal != 0); + + Ncv32u featureId = 0; + for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects + { + Ncv32u rectX = feature->rect[l].r.x; + Ncv32u rectY = feature->rect[l].r.y; + Ncv32u rectWidth = feature->rect[l].r.width; + Ncv32u rectHeight = feature->rect[l].r.height; + + Ncv32f rectWeight = feature->rect[l].weight; + + if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/) + break; + + HaarFeature64 curFeature; + ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height); + curFeature.setWeight(rectWeight); + ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); + haarFeatures.push_back(curFeature); + + featureId++; + } + + HaarFeatureDescriptor32 tmpFeatureDesc; + ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf, + featureId, static_cast(haarFeatures.size()) - featureId); + ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); + curNode.setFeatureDesc(tmpFeatureDesc); + + if (!nodeId) + { + //root node + haarClassifierNodes.push_back(curNode); + curMaxTreeDepth = 1; + } + else + { + //other node + h_TmpClassifierNotRootNodes.push_back(curNode); + curMaxTreeDepth++; + } + + nodeId++; + } + } + + curStage.setNumClassifierRootNodes(treesCount); + haarStages.push_back(curStage); + } + + //fill in cascade stats + haar.NumStages = static_cast(haarStages.size()); + haar.NumClassifierRootNodes = static_cast(haarClassifierNodes.size()); + haar.NumClassifierTotalNodes = static_cast(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size()); + haar.NumFeatures = static_cast(haarFeatures.size()); + + //merge root and leaf nodes in one classifiers array + Ncv32u offsetRoot = static_cast(haarClassifierNodes.size()); + for (Ncv32u i=0; i