renmaed gpu module -> cuda
This commit is contained in:
292
modules/cuda/src/calib3d.cpp
Normal file
292
modules/cuda/src/calib3d.cpp
Normal file
@@ -0,0 +1,292 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::cuda;
|
||||
|
||||
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
|
||||
|
||||
void cv::cuda::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::cuda::projectPoints(const GpuMat&, const Mat&, const Mat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::cuda::solvePnPRansac(const Mat&, const Mat&, const Mat&, const Mat&, Mat&, Mat&, bool, int, float, int, std::vector<int>*) { throw_no_cuda(); }
|
||||
|
||||
#else
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace transform_points
|
||||
{
|
||||
void call(const PtrStepSz<float3> src, const float* rot, const float* transl, PtrStepSz<float3> dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
namespace project_points
|
||||
{
|
||||
void call(const PtrStepSz<float3> src, const float* rot, const float* transl, const float* proj, PtrStepSz<float2> dst, cudaStream_t stream);
|
||||
}
|
||||
|
||||
namespace solve_pnp_ransac
|
||||
{
|
||||
int maxNumIters();
|
||||
|
||||
void computeHypothesisScores(
|
||||
const int num_hypotheses, const int num_points, const float* rot_matrices,
|
||||
const float3* transl_vectors, const float3* object, const float2* image,
|
||||
const float dist_threshold, int* hypothesis_scores);
|
||||
}
|
||||
}}}
|
||||
|
||||
using namespace ::cv::cuda::device;
|
||||
|
||||
namespace
|
||||
{
|
||||
void transformPointsCaller(const GpuMat& src, const Mat& rvec, const Mat& tvec, GpuMat& dst, cudaStream_t stream)
|
||||
{
|
||||
CV_Assert(src.rows == 1 && src.cols > 0 && src.type() == CV_32FC3);
|
||||
CV_Assert(rvec.size() == Size(3, 1) && rvec.type() == CV_32F);
|
||||
CV_Assert(tvec.size() == Size(3, 1) && tvec.type() == CV_32F);
|
||||
|
||||
// Convert rotation vector into matrix
|
||||
Mat rot;
|
||||
Rodrigues(rvec, rot);
|
||||
|
||||
dst.create(src.size(), src.type());
|
||||
transform_points::call(src, rot.ptr<float>(), tvec.ptr<float>(), dst, stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::cuda::transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
transformPointsCaller(src, rvec, tvec, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
void projectPointsCaller(const GpuMat& src, const Mat& rvec, const Mat& tvec, const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, cudaStream_t stream)
|
||||
{
|
||||
CV_Assert(src.rows == 1 && src.cols > 0 && src.type() == CV_32FC3);
|
||||
CV_Assert(rvec.size() == Size(3, 1) && rvec.type() == CV_32F);
|
||||
CV_Assert(tvec.size() == Size(3, 1) && tvec.type() == CV_32F);
|
||||
CV_Assert(camera_mat.size() == Size(3, 3) && camera_mat.type() == CV_32F);
|
||||
CV_Assert(dist_coef.empty()); // Undistortion isn't supported
|
||||
|
||||
// Convert rotation vector into matrix
|
||||
Mat rot;
|
||||
Rodrigues(rvec, rot);
|
||||
|
||||
dst.create(src.size(), CV_32FC2);
|
||||
project_points::call(src, rot.ptr<float>(), tvec.ptr<float>(), camera_mat.ptr<float>(), dst,stream);
|
||||
}
|
||||
}
|
||||
|
||||
void cv::cuda::projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
projectPointsCaller(src, rvec, tvec, camera_mat, dist_coef, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
// Selects subset_size random different points from [0, num_points - 1] range
|
||||
void selectRandom(int subset_size, int num_points, std::vector<int>& subset)
|
||||
{
|
||||
subset.resize(subset_size);
|
||||
for (int i = 0; i < subset_size; ++i)
|
||||
{
|
||||
bool was;
|
||||
do
|
||||
{
|
||||
subset[i] = rand() % num_points;
|
||||
was = false;
|
||||
for (int j = 0; j < i; ++j)
|
||||
if (subset[j] == subset[i])
|
||||
{
|
||||
was = true;
|
||||
break;
|
||||
}
|
||||
} while (was);
|
||||
}
|
||||
}
|
||||
|
||||
// Computes rotation, translation pair for small subsets if the input data
|
||||
class TransformHypothesesGenerator : public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
TransformHypothesesGenerator(const Mat& object_, const Mat& image_, const Mat& dist_coef_,
|
||||
const Mat& camera_mat_, int num_points_, int subset_size_,
|
||||
Mat rot_matrices_, Mat transl_vectors_)
|
||||
: object(&object_), image(&image_), dist_coef(&dist_coef_), camera_mat(&camera_mat_),
|
||||
num_points(num_points_), subset_size(subset_size_), rot_matrices(rot_matrices_),
|
||||
transl_vectors(transl_vectors_) {}
|
||||
|
||||
void operator()(const Range& range) const
|
||||
{
|
||||
// Input data for generation of the current hypothesis
|
||||
std::vector<int> subset_indices(subset_size);
|
||||
Mat_<Point3f> object_subset(1, subset_size);
|
||||
Mat_<Point2f> image_subset(1, subset_size);
|
||||
|
||||
// Current hypothesis data
|
||||
Mat rot_vec(1, 3, CV_64F);
|
||||
Mat rot_mat(3, 3, CV_64F);
|
||||
Mat transl_vec(1, 3, CV_64F);
|
||||
|
||||
for (int iter = range.start; iter < range.end; ++iter)
|
||||
{
|
||||
selectRandom(subset_size, num_points, subset_indices);
|
||||
for (int i = 0; i < subset_size; ++i)
|
||||
{
|
||||
object_subset(0, i) = object->at<Point3f>(subset_indices[i]);
|
||||
image_subset(0, i) = image->at<Point2f>(subset_indices[i]);
|
||||
}
|
||||
|
||||
solvePnP(object_subset, image_subset, *camera_mat, *dist_coef, rot_vec, transl_vec);
|
||||
|
||||
// Remember translation vector
|
||||
Mat transl_vec_ = transl_vectors.colRange(iter * 3, (iter + 1) * 3);
|
||||
transl_vec = transl_vec.reshape(0, 1);
|
||||
transl_vec.convertTo(transl_vec_, CV_32F);
|
||||
|
||||
// Remember rotation matrix
|
||||
Rodrigues(rot_vec, rot_mat);
|
||||
Mat rot_mat_ = rot_matrices.colRange(iter * 9, (iter + 1) * 9).reshape(0, 3);
|
||||
rot_mat.convertTo(rot_mat_, CV_32F);
|
||||
}
|
||||
}
|
||||
|
||||
const Mat* object;
|
||||
const Mat* image;
|
||||
const Mat* dist_coef;
|
||||
const Mat* camera_mat;
|
||||
int num_points;
|
||||
int subset_size;
|
||||
|
||||
// Hypotheses storage (global)
|
||||
Mat rot_matrices;
|
||||
Mat transl_vectors;
|
||||
};
|
||||
}
|
||||
|
||||
void cv::cuda::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
|
||||
const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess,
|
||||
int num_iters, float max_dist, int min_inlier_count,
|
||||
std::vector<int>* inliers)
|
||||
{
|
||||
(void)min_inlier_count;
|
||||
CV_Assert(object.rows == 1 && object.cols > 0 && object.type() == CV_32FC3);
|
||||
CV_Assert(image.rows == 1 && image.cols > 0 && image.type() == CV_32FC2);
|
||||
CV_Assert(object.cols == image.cols);
|
||||
CV_Assert(camera_mat.size() == Size(3, 3) && camera_mat.type() == CV_32F);
|
||||
CV_Assert(!use_extrinsic_guess); // We don't support initial guess for now
|
||||
CV_Assert(num_iters <= solve_pnp_ransac::maxNumIters());
|
||||
|
||||
const int subset_size = 4;
|
||||
const int num_points = object.cols;
|
||||
CV_Assert(num_points >= subset_size);
|
||||
|
||||
// Unapply distortion and intrinsic camera transformations
|
||||
Mat eye_camera_mat = Mat::eye(3, 3, CV_32F);
|
||||
Mat empty_dist_coef;
|
||||
Mat image_normalized;
|
||||
undistortPoints(image, image_normalized, camera_mat, dist_coef, Mat(), eye_camera_mat);
|
||||
|
||||
// Hypotheses storage (global)
|
||||
Mat rot_matrices(1, num_iters * 9, CV_32F);
|
||||
Mat transl_vectors(1, num_iters * 3, CV_32F);
|
||||
|
||||
// Generate set of hypotheses using small subsets of the input data
|
||||
TransformHypothesesGenerator body(object, image_normalized, empty_dist_coef, eye_camera_mat,
|
||||
num_points, subset_size, rot_matrices, transl_vectors);
|
||||
parallel_for_(Range(0, num_iters), body);
|
||||
|
||||
// Compute scores (i.e. number of inliers) for each hypothesis
|
||||
GpuMat d_object(object);
|
||||
GpuMat d_image_normalized(image_normalized);
|
||||
GpuMat d_hypothesis_scores(1, num_iters, CV_32S);
|
||||
solve_pnp_ransac::computeHypothesisScores(
|
||||
num_iters, num_points, rot_matrices.ptr<float>(), transl_vectors.ptr<float3>(),
|
||||
d_object.ptr<float3>(), d_image_normalized.ptr<float2>(), max_dist * max_dist,
|
||||
d_hypothesis_scores.ptr<int>());
|
||||
|
||||
// Find the best hypothesis index
|
||||
Point best_idx;
|
||||
double best_score;
|
||||
cuda::minMaxLoc(d_hypothesis_scores, NULL, &best_score, NULL, &best_idx);
|
||||
int num_inliers = static_cast<int>(best_score);
|
||||
|
||||
// Extract the best hypothesis data
|
||||
|
||||
Mat rot_mat = rot_matrices.colRange(best_idx.x * 9, (best_idx.x + 1) * 9).reshape(0, 3);
|
||||
Rodrigues(rot_mat, rvec);
|
||||
rvec = rvec.reshape(0, 1);
|
||||
|
||||
tvec = transl_vectors.colRange(best_idx.x * 3, (best_idx.x + 1) * 3).clone();
|
||||
tvec = tvec.reshape(0, 1);
|
||||
|
||||
// Build vector of inlier indices
|
||||
if (inliers != NULL)
|
||||
{
|
||||
inliers->clear();
|
||||
inliers->reserve(num_inliers);
|
||||
|
||||
Point3f p, p_transf;
|
||||
Point2f p_proj;
|
||||
const float* rot = rot_mat.ptr<float>();
|
||||
const float* transl = tvec.ptr<float>();
|
||||
|
||||
for (int i = 0; i < num_points; ++i)
|
||||
{
|
||||
p = object.at<Point3f>(0, i);
|
||||
p_transf.x = rot[0] * p.x + rot[1] * p.y + rot[2] * p.z + transl[0];
|
||||
p_transf.y = rot[3] * p.x + rot[4] * p.y + rot[5] * p.z + transl[1];
|
||||
p_transf.z = rot[6] * p.x + rot[7] * p.y + rot[8] * p.z + transl[2];
|
||||
p_proj.x = p_transf.x / p_transf.z;
|
||||
p_proj.y = p_transf.y / p_transf.z;
|
||||
if (norm(p_proj - image_normalized.at<Point2f>(0, i)) < max_dist)
|
||||
inliers->push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
758
modules/cuda/src/cascadeclassifier.cpp
Normal file
758
modules/cuda/src/cascadeclassifier.cpp
Normal file
@@ -0,0 +1,758 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencv2/objdetect/objdetect_c.h"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::cuda;
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_no_cuda(); }
|
||||
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU(const String&) { throw_no_cuda(); }
|
||||
cv::cuda::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_no_cuda(); }
|
||||
bool cv::cuda::CascadeClassifier_GPU::empty() const { throw_no_cuda(); return true; }
|
||||
bool cv::cuda::CascadeClassifier_GPU::load(const String&) { throw_no_cuda(); return true; }
|
||||
Size cv::cuda::CascadeClassifier_GPU::getClassifierSize() const { throw_no_cuda(); return Size();}
|
||||
void cv::cuda::CascadeClassifier_GPU::release() { throw_no_cuda(); }
|
||||
int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, double, int, Size) {throw_no_cuda(); return -1;}
|
||||
int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, Size, Size, double, int) {throw_no_cuda(); return -1;}
|
||||
|
||||
#else
|
||||
|
||||
struct cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
CascadeClassifierImpl(){}
|
||||
virtual ~CascadeClassifierImpl(){}
|
||||
|
||||
virtual unsigned int process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
|
||||
bool findLargestObject, bool visualizeInPlace, cv::Size ncvMinSize, cv::Size maxObjectSize) = 0;
|
||||
|
||||
virtual cv::Size getClassifierCvSize() const = 0;
|
||||
virtual bool read(const String& classifierAsXml) = 0;
|
||||
};
|
||||
|
||||
#ifndef HAVE_OPENCV_CUDALEGACY
|
||||
|
||||
struct cv::cuda::CascadeClassifier_GPU::HaarCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
HaarCascade()
|
||||
{
|
||||
throw_no_cuda();
|
||||
}
|
||||
|
||||
unsigned int process(const GpuMat&, GpuMat&, float, int, bool, bool, cv::Size, cv::Size)
|
||||
{
|
||||
throw_no_cuda();
|
||||
return 0;
|
||||
}
|
||||
|
||||
cv::Size getClassifierCvSize() const
|
||||
{
|
||||
throw_no_cuda();
|
||||
return cv::Size();
|
||||
}
|
||||
|
||||
bool read(const String&)
|
||||
{
|
||||
throw_no_cuda();
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
struct cv::cuda::CascadeClassifier_GPU::HaarCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
HaarCascade() : lastAllocatedFrameSize(-1, -1)
|
||||
{
|
||||
ncvSetDebugOutputHandler(NCVDebugOutputHandler);
|
||||
}
|
||||
|
||||
bool read(const String& filename)
|
||||
{
|
||||
ncvSafeCall( load(filename) );
|
||||
return true;
|
||||
}
|
||||
|
||||
NCVStatus process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
|
||||
bool findLargestObject, bool visualizeInPlace, cv::Size ncvMinSize,
|
||||
/*out*/unsigned int& numDetections)
|
||||
{
|
||||
calculateMemReqsAndAllocate(src.size());
|
||||
|
||||
NCVMemPtr src_beg;
|
||||
src_beg.ptr = (void*)src.ptr<Ncv8u>();
|
||||
src_beg.memtype = NCVMemoryTypeDevice;
|
||||
|
||||
NCVMemSegment src_seg;
|
||||
src_seg.begin = src_beg;
|
||||
src_seg.size = src.step * src.rows;
|
||||
|
||||
NCVMatrixReuse<Ncv8u> d_src(src_seg, static_cast<int>(devProp.textureAlignment), src.cols, src.rows, static_cast<int>(src.step), true);
|
||||
ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
||||
|
||||
CV_Assert(objects.rows == 1);
|
||||
|
||||
NCVMemPtr objects_beg;
|
||||
objects_beg.ptr = (void*)objects.ptr<NcvRect32u>();
|
||||
objects_beg.memtype = NCVMemoryTypeDevice;
|
||||
|
||||
NCVMemSegment objects_seg;
|
||||
objects_seg.begin = objects_beg;
|
||||
objects_seg.size = objects.step * objects.rows;
|
||||
NCVVectorReuse<NcvRect32u> d_rects(objects_seg, objects.cols);
|
||||
ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
|
||||
|
||||
NcvSize32u roi;
|
||||
roi.width = d_src.width();
|
||||
roi.height = d_src.height();
|
||||
|
||||
NcvSize32u winMinSize(ncvMinSize.width, ncvMinSize.height);
|
||||
|
||||
Ncv32u flags = 0;
|
||||
flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0;
|
||||
flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0;
|
||||
|
||||
ncvStat = ncvDetectObjectsMultiScale_device(
|
||||
d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
||||
*d_haarStages, *d_haarNodes, *d_haarFeatures,
|
||||
winMinSize,
|
||||
minNeighbors,
|
||||
scaleStep, 1,
|
||||
flags,
|
||||
*gpuAllocator, *cpuAllocator, devProp, 0);
|
||||
ncvAssertReturnNcvStat(ncvStat);
|
||||
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
||||
|
||||
return NCV_SUCCESS;
|
||||
}
|
||||
|
||||
unsigned int process(const GpuMat& image, GpuMat& objectsBuf, float scaleFactor, int minNeighbors,
|
||||
bool findLargestObject, bool visualizeInPlace, cv::Size minSize, cv::Size /*maxObjectSize*/)
|
||||
{
|
||||
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U);
|
||||
|
||||
const int defaultObjSearchNum = 100;
|
||||
if (objectsBuf.empty())
|
||||
{
|
||||
objectsBuf.create(1, defaultObjSearchNum, DataType<Rect>::type);
|
||||
}
|
||||
|
||||
cv::Size ncvMinSize = this->getClassifierCvSize();
|
||||
|
||||
if (ncvMinSize.width < minSize.width && ncvMinSize.height < minSize.height)
|
||||
{
|
||||
ncvMinSize.width = minSize.width;
|
||||
ncvMinSize.height = minSize.height;
|
||||
}
|
||||
|
||||
unsigned int numDetections;
|
||||
ncvSafeCall(this->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, ncvMinSize, numDetections));
|
||||
|
||||
return numDetections;
|
||||
}
|
||||
|
||||
cv::Size getClassifierCvSize() const { return cv::Size(haar.ClassifierSize.width, haar.ClassifierSize.height); }
|
||||
|
||||
private:
|
||||
static void NCVDebugOutputHandler(const String &msg) { CV_Error(cv::Error::GpuApiCallError, msg.c_str()); }
|
||||
|
||||
NCVStatus load(const String& classifierFile)
|
||||
{
|
||||
int devId = cv::cuda::getDevice();
|
||||
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR);
|
||||
|
||||
// Load the classifier from file (assuming its size is about 1 mb) using a simple allocator
|
||||
gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast<int>(devProp.textureAlignment));
|
||||
cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast<int>(devProp.textureAlignment));
|
||||
|
||||
ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR);
|
||||
|
||||
Ncv32u haarNumStages, haarNumNodes, haarNumFeatures;
|
||||
ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures);
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR);
|
||||
|
||||
h_haarStages = new NCVVectorAlloc<HaarStage64>(*cpuCascadeAllocator, haarNumStages);
|
||||
h_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*cpuCascadeAllocator, haarNumNodes);
|
||||
h_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*cpuCascadeAllocator, haarNumFeatures);
|
||||
|
||||
ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(h_haarFeatures->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
|
||||
|
||||
ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures);
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR);
|
||||
|
||||
d_haarStages = new NCVVectorAlloc<HaarStage64>(*gpuCascadeAllocator, haarNumStages);
|
||||
d_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*gpuCascadeAllocator, haarNumNodes);
|
||||
d_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*gpuCascadeAllocator, haarNumFeatures);
|
||||
|
||||
ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(d_haarFeatures->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
|
||||
|
||||
ncvStat = h_haarStages->copySolid(*d_haarStages, 0);
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
||||
ncvStat = h_haarNodes->copySolid(*d_haarNodes, 0);
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
||||
ncvStat = h_haarFeatures->copySolid(*d_haarFeatures, 0);
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error copying cascade to GPU", NCV_CUDA_ERROR);
|
||||
|
||||
return NCV_SUCCESS;
|
||||
}
|
||||
|
||||
NCVStatus calculateMemReqsAndAllocate(const Size& frameSize)
|
||||
{
|
||||
if (lastAllocatedFrameSize == frameSize)
|
||||
{
|
||||
return NCV_SUCCESS;
|
||||
}
|
||||
|
||||
// Calculate memory requirements and create real allocators
|
||||
NCVMemStackAllocator gpuCounter(static_cast<int>(devProp.textureAlignment));
|
||||
NCVMemStackAllocator cpuCounter(static_cast<int>(devProp.textureAlignment));
|
||||
|
||||
ncvAssertPrintReturn(gpuCounter.isInitialized(), "Error creating GPU memory counter", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(cpuCounter.isInitialized(), "Error creating CPU memory counter", NCV_CUDA_ERROR);
|
||||
|
||||
NCVMatrixAlloc<Ncv8u> d_src(gpuCounter, frameSize.width, frameSize.height);
|
||||
NCVMatrixAlloc<Ncv8u> h_src(cpuCounter, frameSize.width, frameSize.height);
|
||||
|
||||
ncvAssertReturn(d_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
ncvAssertReturn(h_src.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
NCVVectorAlloc<NcvRect32u> d_rects(gpuCounter, 100);
|
||||
ncvAssertReturn(d_rects.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
|
||||
|
||||
NcvSize32u roi;
|
||||
roi.width = d_src.width();
|
||||
roi.height = d_src.height();
|
||||
Ncv32u numDetections;
|
||||
ncvStat = ncvDetectObjectsMultiScale_device(d_src, roi, d_rects, numDetections, haar, *h_haarStages,
|
||||
*d_haarStages, *d_haarNodes, *d_haarFeatures, haar.ClassifierSize, 4, 1.2f, 1, 0, gpuCounter, cpuCounter, devProp, 0);
|
||||
|
||||
ncvAssertReturnNcvStat(ncvStat);
|
||||
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
|
||||
|
||||
gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
||||
cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
|
||||
|
||||
ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR);
|
||||
ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR);
|
||||
|
||||
lastAllocatedFrameSize = frameSize;
|
||||
return NCV_SUCCESS;
|
||||
}
|
||||
|
||||
cudaDeviceProp devProp;
|
||||
NCVStatus ncvStat;
|
||||
|
||||
Ptr<NCVMemNativeAllocator> gpuCascadeAllocator;
|
||||
Ptr<NCVMemNativeAllocator> cpuCascadeAllocator;
|
||||
|
||||
Ptr<NCVVectorAlloc<HaarStage64> > h_haarStages;
|
||||
Ptr<NCVVectorAlloc<HaarClassifierNode128> > h_haarNodes;
|
||||
Ptr<NCVVectorAlloc<HaarFeature64> > h_haarFeatures;
|
||||
|
||||
HaarClassifierCascadeDescriptor haar;
|
||||
|
||||
Ptr<NCVVectorAlloc<HaarStage64> > d_haarStages;
|
||||
Ptr<NCVVectorAlloc<HaarClassifierNode128> > d_haarNodes;
|
||||
Ptr<NCVVectorAlloc<HaarFeature64> > d_haarFeatures;
|
||||
|
||||
Size lastAllocatedFrameSize;
|
||||
|
||||
Ptr<NCVMemStackAllocator> gpuAllocator;
|
||||
Ptr<NCVMemStackAllocator> cpuAllocator;
|
||||
|
||||
virtual ~HaarCascade(){}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
cv::Size operator -(const cv::Size& a, const cv::Size& b)
|
||||
{
|
||||
return cv::Size(a.width - b.width, a.height - b.height);
|
||||
}
|
||||
|
||||
cv::Size operator +(const cv::Size& a, const int& i)
|
||||
{
|
||||
return cv::Size(a.width + i, a.height + i);
|
||||
}
|
||||
|
||||
cv::Size operator *(const cv::Size& a, const float& f)
|
||||
{
|
||||
return cv::Size(cvRound(a.width * f), cvRound(a.height * f));
|
||||
}
|
||||
|
||||
cv::Size operator /(const cv::Size& a, const float& f)
|
||||
{
|
||||
return cv::Size(cvRound(a.width / f), cvRound(a.height / f));
|
||||
}
|
||||
|
||||
bool operator <=(const cv::Size& a, const cv::Size& b)
|
||||
{
|
||||
return a.width <= b.width && a.height <= b.width;
|
||||
}
|
||||
|
||||
struct PyrLavel
|
||||
{
|
||||
PyrLavel(int _order, float _scale, cv::Size frame, cv::Size window, cv::Size minObjectSize)
|
||||
{
|
||||
do
|
||||
{
|
||||
order = _order;
|
||||
scale = pow(_scale, order);
|
||||
sFrame = frame / scale;
|
||||
workArea = sFrame - window + 1;
|
||||
sWindow = window * scale;
|
||||
_order++;
|
||||
} while (sWindow <= minObjectSize);
|
||||
}
|
||||
|
||||
bool isFeasible(cv::Size maxObj)
|
||||
{
|
||||
return workArea.width > 0 && workArea.height > 0 && sWindow <= maxObj;
|
||||
}
|
||||
|
||||
PyrLavel next(float factor, cv::Size frame, cv::Size window, cv::Size minObjectSize)
|
||||
{
|
||||
return PyrLavel(order + 1, factor, frame, window, minObjectSize);
|
||||
}
|
||||
|
||||
int order;
|
||||
float scale;
|
||||
cv::Size sFrame;
|
||||
cv::Size workArea;
|
||||
cv::Size sWindow;
|
||||
};
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace lbp
|
||||
{
|
||||
void classifyPyramid(int frameW,
|
||||
int frameH,
|
||||
int windowW,
|
||||
int windowH,
|
||||
float initalScale,
|
||||
float factor,
|
||||
int total,
|
||||
const PtrStepSzb& mstages,
|
||||
const int nstages,
|
||||
const PtrStepSzi& mnodes,
|
||||
const PtrStepSzf& mleaves,
|
||||
const PtrStepSzi& msubsets,
|
||||
const PtrStepSzb& mfeatures,
|
||||
const int subsetSize,
|
||||
PtrStepSz<int4> objects,
|
||||
unsigned int* classified,
|
||||
PtrStepSzi integral);
|
||||
|
||||
void connectedConmonents(PtrStepSz<int4> candidates, int ncandidates, PtrStepSz<int4> objects,int groupThreshold, float grouping_eps, unsigned int* nclasses);
|
||||
}
|
||||
}}}
|
||||
|
||||
struct cv::cuda::CascadeClassifier_GPU::LbpCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
|
||||
{
|
||||
public:
|
||||
struct Stage
|
||||
{
|
||||
int first;
|
||||
int ntrees;
|
||||
float threshold;
|
||||
};
|
||||
|
||||
LbpCascade(){}
|
||||
virtual ~LbpCascade(){}
|
||||
|
||||
virtual unsigned int process(const GpuMat& image, GpuMat& objects, float scaleFactor, int groupThreshold, bool /*findLargestObject*/,
|
||||
bool /*visualizeInPlace*/, cv::Size minObjectSize, cv::Size maxObjectSize)
|
||||
{
|
||||
CV_Assert(scaleFactor > 1 && image.depth() == CV_8U);
|
||||
|
||||
// const int defaultObjSearchNum = 100;
|
||||
const float grouping_eps = 0.2f;
|
||||
|
||||
if( !objects.empty() && objects.depth() == CV_32S)
|
||||
objects.reshape(4, 1);
|
||||
else
|
||||
objects.create(1 , image.cols >> 4, CV_32SC4);
|
||||
|
||||
// used for debug
|
||||
// candidates.setTo(cv::Scalar::all(0));
|
||||
// objects.setTo(cv::Scalar::all(0));
|
||||
|
||||
if (maxObjectSize == cv::Size())
|
||||
maxObjectSize = image.size();
|
||||
|
||||
allocateBuffers(image.size());
|
||||
|
||||
unsigned int classified = 0;
|
||||
GpuMat dclassified(1, 1, CV_32S);
|
||||
cudaSafeCall( cudaMemcpy(dclassified.ptr(), &classified, sizeof(int), cudaMemcpyHostToDevice) );
|
||||
|
||||
PyrLavel level(0, scaleFactor, image.size(), NxM, minObjectSize);
|
||||
|
||||
while (level.isFeasible(maxObjectSize))
|
||||
{
|
||||
int acc = level.sFrame.width + 1;
|
||||
float iniScale = level.scale;
|
||||
|
||||
cv::Size area = level.workArea;
|
||||
int step = 1 + (level.scale <= 2.f);
|
||||
|
||||
int total = 0, prev = 0;
|
||||
|
||||
while (acc <= integralFactor * (image.cols + 1) && level.isFeasible(maxObjectSize))
|
||||
{
|
||||
// create sutable matrix headers
|
||||
GpuMat src = resuzeBuffer(cv::Rect(0, 0, level.sFrame.width, level.sFrame.height));
|
||||
GpuMat sint = integral(cv::Rect(prev, 0, level.sFrame.width + 1, level.sFrame.height + 1));
|
||||
GpuMat buff = integralBuffer;
|
||||
|
||||
// generate integral for scale
|
||||
cuda::resize(image, src, level.sFrame, 0, 0, cv::INTER_LINEAR);
|
||||
cuda::integral(src, sint, buff);
|
||||
|
||||
// calculate job
|
||||
int totalWidth = level.workArea.width / step;
|
||||
total += totalWidth * (level.workArea.height / step);
|
||||
|
||||
// go to next pyramide level
|
||||
level = level.next(scaleFactor, image.size(), NxM, minObjectSize);
|
||||
area = level.workArea;
|
||||
|
||||
step = (1 + (level.scale <= 2.f));
|
||||
prev = acc;
|
||||
acc += level.sFrame.width + 1;
|
||||
}
|
||||
|
||||
device::lbp::classifyPyramid(image.cols, image.rows, NxM.width - 1, NxM.height - 1, iniScale, scaleFactor, total, stage_mat, stage_mat.cols / sizeof(Stage), nodes_mat,
|
||||
leaves_mat, subsets_mat, features_mat, subsetSize, candidates, dclassified.ptr<unsigned int>(), integral);
|
||||
}
|
||||
|
||||
if (groupThreshold <= 0 || objects.empty())
|
||||
return 0;
|
||||
|
||||
cudaSafeCall( cudaMemcpy(&classified, dclassified.ptr(), sizeof(int), cudaMemcpyDeviceToHost) );
|
||||
device::lbp::connectedConmonents(candidates, classified, objects, groupThreshold, grouping_eps, dclassified.ptr<unsigned int>());
|
||||
|
||||
cudaSafeCall( cudaMemcpy(&classified, dclassified.ptr(), sizeof(int), cudaMemcpyDeviceToHost) );
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
return classified;
|
||||
}
|
||||
|
||||
virtual cv::Size getClassifierCvSize() const { return NxM; }
|
||||
|
||||
bool read(const String& classifierAsXml)
|
||||
{
|
||||
FileStorage fs(classifierAsXml, FileStorage::READ);
|
||||
return fs.isOpened() ? read(fs.getFirstTopLevelNode()) : false;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
void allocateBuffers(cv::Size frame)
|
||||
{
|
||||
if (frame == cv::Size())
|
||||
return;
|
||||
|
||||
if (resuzeBuffer.empty() || frame.width > resuzeBuffer.cols || frame.height > resuzeBuffer.rows)
|
||||
{
|
||||
resuzeBuffer.create(frame, CV_8UC1);
|
||||
|
||||
integral.create(frame.height + 1, integralFactor * (frame.width + 1), CV_32SC1);
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDALEGACY
|
||||
NcvSize32u roiSize;
|
||||
roiSize.width = frame.width;
|
||||
roiSize.height = frame.height;
|
||||
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::cuda::getDevice()) );
|
||||
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
|
||||
integralBuffer.create(1, bufSize, CV_8UC1);
|
||||
#endif
|
||||
|
||||
candidates.create(1 , frame.width >> 1, CV_32SC4);
|
||||
}
|
||||
}
|
||||
|
||||
bool read(const FileNode &root)
|
||||
{
|
||||
const char *GPU_CC_STAGE_TYPE = "stageType";
|
||||
const char *GPU_CC_FEATURE_TYPE = "featureType";
|
||||
const char *GPU_CC_BOOST = "BOOST";
|
||||
const char *GPU_CC_LBP = "LBP";
|
||||
const char *GPU_CC_MAX_CAT_COUNT = "maxCatCount";
|
||||
const char *GPU_CC_HEIGHT = "height";
|
||||
const char *GPU_CC_WIDTH = "width";
|
||||
const char *GPU_CC_STAGE_PARAMS = "stageParams";
|
||||
const char *GPU_CC_MAX_DEPTH = "maxDepth";
|
||||
const char *GPU_CC_FEATURE_PARAMS = "featureParams";
|
||||
const char *GPU_CC_STAGES = "stages";
|
||||
const char *GPU_CC_STAGE_THRESHOLD = "stageThreshold";
|
||||
const float GPU_THRESHOLD_EPS = 1e-5f;
|
||||
const char *GPU_CC_WEAK_CLASSIFIERS = "weakClassifiers";
|
||||
const char *GPU_CC_INTERNAL_NODES = "internalNodes";
|
||||
const char *GPU_CC_LEAF_VALUES = "leafValues";
|
||||
const char *GPU_CC_FEATURES = "features";
|
||||
const char *GPU_CC_RECT = "rect";
|
||||
|
||||
String stageTypeStr = (String)root[GPU_CC_STAGE_TYPE];
|
||||
CV_Assert(stageTypeStr == GPU_CC_BOOST);
|
||||
|
||||
String featureTypeStr = (String)root[GPU_CC_FEATURE_TYPE];
|
||||
CV_Assert(featureTypeStr == GPU_CC_LBP);
|
||||
|
||||
NxM.width = (int)root[GPU_CC_WIDTH];
|
||||
NxM.height = (int)root[GPU_CC_HEIGHT];
|
||||
CV_Assert( NxM.height > 0 && NxM.width > 0 );
|
||||
|
||||
isStumps = ((int)(root[GPU_CC_STAGE_PARAMS][GPU_CC_MAX_DEPTH]) == 1) ? true : false;
|
||||
CV_Assert(isStumps);
|
||||
|
||||
FileNode fn = root[GPU_CC_FEATURE_PARAMS];
|
||||
if (fn.empty())
|
||||
return false;
|
||||
|
||||
ncategories = fn[GPU_CC_MAX_CAT_COUNT];
|
||||
|
||||
subsetSize = (ncategories + 31) / 32;
|
||||
nodeStep = 3 + ( ncategories > 0 ? subsetSize : 1 );
|
||||
|
||||
fn = root[GPU_CC_STAGES];
|
||||
if (fn.empty())
|
||||
return false;
|
||||
|
||||
std::vector<Stage> stages;
|
||||
stages.reserve(fn.size());
|
||||
|
||||
std::vector<int> cl_trees;
|
||||
std::vector<int> cl_nodes;
|
||||
std::vector<float> cl_leaves;
|
||||
std::vector<int> subsets;
|
||||
|
||||
FileNodeIterator it = fn.begin(), it_end = fn.end();
|
||||
for (size_t si = 0; it != it_end; si++, ++it )
|
||||
{
|
||||
FileNode fns = *it;
|
||||
Stage st;
|
||||
st.threshold = (float)fns[GPU_CC_STAGE_THRESHOLD] - GPU_THRESHOLD_EPS;
|
||||
|
||||
fns = fns[GPU_CC_WEAK_CLASSIFIERS];
|
||||
if (fns.empty())
|
||||
return false;
|
||||
|
||||
st.ntrees = (int)fns.size();
|
||||
st.first = (int)cl_trees.size();
|
||||
|
||||
stages.push_back(st);// (int, int, float)
|
||||
|
||||
cl_trees.reserve(stages[si].first + stages[si].ntrees);
|
||||
|
||||
// weak trees
|
||||
FileNodeIterator it1 = fns.begin(), it1_end = fns.end();
|
||||
for ( ; it1 != it1_end; ++it1 )
|
||||
{
|
||||
FileNode fnw = *it1;
|
||||
|
||||
FileNode internalNodes = fnw[GPU_CC_INTERNAL_NODES];
|
||||
FileNode leafValues = fnw[GPU_CC_LEAF_VALUES];
|
||||
if ( internalNodes.empty() || leafValues.empty() )
|
||||
return false;
|
||||
|
||||
int nodeCount = (int)internalNodes.size()/nodeStep;
|
||||
cl_trees.push_back(nodeCount);
|
||||
|
||||
cl_nodes.reserve((cl_nodes.size() + nodeCount) * 3);
|
||||
cl_leaves.reserve(cl_leaves.size() + leafValues.size());
|
||||
|
||||
if( subsetSize > 0 )
|
||||
subsets.reserve(subsets.size() + nodeCount * subsetSize);
|
||||
|
||||
// nodes
|
||||
FileNodeIterator iIt = internalNodes.begin(), iEnd = internalNodes.end();
|
||||
|
||||
for( ; iIt != iEnd; )
|
||||
{
|
||||
cl_nodes.push_back((int)*(iIt++));
|
||||
cl_nodes.push_back((int)*(iIt++));
|
||||
cl_nodes.push_back((int)*(iIt++));
|
||||
|
||||
if( subsetSize > 0 )
|
||||
for( int j = 0; j < subsetSize; j++, ++iIt )
|
||||
subsets.push_back((int)*iIt);
|
||||
}
|
||||
|
||||
// leaves
|
||||
iIt = leafValues.begin(), iEnd = leafValues.end();
|
||||
for( ; iIt != iEnd; ++iIt )
|
||||
cl_leaves.push_back((float)*iIt);
|
||||
}
|
||||
}
|
||||
|
||||
fn = root[GPU_CC_FEATURES];
|
||||
if( fn.empty() )
|
||||
return false;
|
||||
std::vector<uchar> features;
|
||||
features.reserve(fn.size() * 4);
|
||||
FileNodeIterator f_it = fn.begin(), f_end = fn.end();
|
||||
for (; f_it != f_end; ++f_it)
|
||||
{
|
||||
FileNode rect = (*f_it)[GPU_CC_RECT];
|
||||
FileNodeIterator r_it = rect.begin();
|
||||
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
||||
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
||||
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
||||
features.push_back(saturate_cast<uchar>((int)*(r_it++)));
|
||||
}
|
||||
|
||||
// copy data structures on gpu
|
||||
stage_mat.upload(cv::Mat(1, (int) (stages.size() * sizeof(Stage)), CV_8UC1, (uchar*)&(stages[0]) ));
|
||||
trees_mat.upload(cv::Mat(cl_trees).reshape(1,1));
|
||||
nodes_mat.upload(cv::Mat(cl_nodes).reshape(1,1));
|
||||
leaves_mat.upload(cv::Mat(cl_leaves).reshape(1,1));
|
||||
subsets_mat.upload(cv::Mat(subsets).reshape(1,1));
|
||||
features_mat.upload(cv::Mat(features).reshape(4,1));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
enum stage { BOOST = 0 };
|
||||
enum feature { LBP = 1, HAAR = 2 };
|
||||
static const stage stageType = BOOST;
|
||||
static const feature featureType = LBP;
|
||||
|
||||
cv::Size NxM;
|
||||
bool isStumps;
|
||||
int ncategories;
|
||||
int subsetSize;
|
||||
int nodeStep;
|
||||
|
||||
// gpu representation of classifier
|
||||
GpuMat stage_mat;
|
||||
GpuMat trees_mat;
|
||||
GpuMat nodes_mat;
|
||||
GpuMat leaves_mat;
|
||||
GpuMat subsets_mat;
|
||||
GpuMat features_mat;
|
||||
|
||||
GpuMat integral;
|
||||
GpuMat integralBuffer;
|
||||
GpuMat resuzeBuffer;
|
||||
|
||||
GpuMat candidates;
|
||||
static const int integralFactor = 4;
|
||||
};
|
||||
|
||||
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU()
|
||||
: findLargestObject(false), visualizeInPlace(false), impl(0) {}
|
||||
|
||||
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU(const String& filename)
|
||||
: findLargestObject(false), visualizeInPlace(false), impl(0) { load(filename); }
|
||||
|
||||
cv::cuda::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
|
||||
|
||||
void cv::cuda::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
|
||||
|
||||
bool cv::cuda::CascadeClassifier_GPU::empty() const { return impl == 0; }
|
||||
|
||||
Size cv::cuda::CascadeClassifier_GPU::getClassifierSize() const
|
||||
{
|
||||
return this->empty() ? Size() : impl->getClassifierCvSize();
|
||||
}
|
||||
|
||||
int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
|
||||
{
|
||||
CV_Assert( !this->empty());
|
||||
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, cv::Size());
|
||||
}
|
||||
|
||||
int cv::cuda::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors)
|
||||
{
|
||||
CV_Assert( !this->empty());
|
||||
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, maxObjectSize);
|
||||
}
|
||||
|
||||
bool cv::cuda::CascadeClassifier_GPU::load(const String& filename)
|
||||
{
|
||||
release();
|
||||
|
||||
String fext = filename.substr(filename.find_last_of(".") + 1);
|
||||
fext = fext.toLowerCase();
|
||||
|
||||
if (fext == "nvbin")
|
||||
{
|
||||
impl = new HaarCascade();
|
||||
return impl->read(filename);
|
||||
}
|
||||
|
||||
FileStorage fs(filename, FileStorage::READ);
|
||||
|
||||
if (!fs.isOpened())
|
||||
{
|
||||
impl = new HaarCascade();
|
||||
return impl->read(filename);
|
||||
}
|
||||
|
||||
const char *GPU_CC_LBP = "LBP";
|
||||
String featureTypeStr = (String)fs.getFirstTopLevelNode()["featureType"];
|
||||
if (featureTypeStr == GPU_CC_LBP)
|
||||
impl = new LbpCascade();
|
||||
else
|
||||
impl = new HaarCascade();
|
||||
|
||||
impl->read(filename);
|
||||
return !this->empty();
|
||||
}
|
||||
|
||||
#endif
|
193
modules/cuda/src/cuda/calib3d.cu
Normal file
193
modules/cuda/src/cuda/calib3d.cu
Normal file
@@ -0,0 +1,193 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/transform.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
#define SOLVE_PNP_RANSAC_MAX_NUM_ITERS 200
|
||||
|
||||
namespace transform_points
|
||||
{
|
||||
__constant__ float3 crot0;
|
||||
__constant__ float3 crot1;
|
||||
__constant__ float3 crot2;
|
||||
__constant__ float3 ctransl;
|
||||
|
||||
struct TransformOp : unary_function<float3, float3>
|
||||
{
|
||||
__device__ __forceinline__ float3 operator()(const float3& p) const
|
||||
{
|
||||
return make_float3(
|
||||
crot0.x * p.x + crot0.y * p.y + crot0.z * p.z + ctransl.x,
|
||||
crot1.x * p.x + crot1.y * p.y + crot1.z * p.z + ctransl.y,
|
||||
crot2.x * p.x + crot2.y * p.y + crot2.z * p.z + ctransl.z);
|
||||
}
|
||||
__host__ __device__ __forceinline__ TransformOp() {}
|
||||
__host__ __device__ __forceinline__ TransformOp(const TransformOp&) {}
|
||||
};
|
||||
|
||||
void call(const PtrStepSz<float3> src, const float* rot,
|
||||
const float* transl, PtrStepSz<float3> dst,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot0, rot, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot1, rot + 3, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot2, rot + 6, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(ctransl, transl, sizeof(float) * 3));
|
||||
cv::cuda::device::transform(src, dst, TransformOp(), WithOutMask(), stream);
|
||||
}
|
||||
} // namespace transform_points
|
||||
|
||||
namespace project_points
|
||||
{
|
||||
__constant__ float3 crot0;
|
||||
__constant__ float3 crot1;
|
||||
__constant__ float3 crot2;
|
||||
__constant__ float3 ctransl;
|
||||
__constant__ float3 cproj0;
|
||||
__constant__ float3 cproj1;
|
||||
|
||||
struct ProjectOp : unary_function<float3, float3>
|
||||
{
|
||||
__device__ __forceinline__ float2 operator()(const float3& p) const
|
||||
{
|
||||
// Rotate and translate in 3D
|
||||
float3 t = make_float3(
|
||||
crot0.x * p.x + crot0.y * p.y + crot0.z * p.z + ctransl.x,
|
||||
crot1.x * p.x + crot1.y * p.y + crot1.z * p.z + ctransl.y,
|
||||
crot2.x * p.x + crot2.y * p.y + crot2.z * p.z + ctransl.z);
|
||||
// Project on 2D plane
|
||||
return make_float2(
|
||||
(cproj0.x * t.x + cproj0.y * t.y) / t.z + cproj0.z,
|
||||
(cproj1.x * t.x + cproj1.y * t.y) / t.z + cproj1.z);
|
||||
}
|
||||
__host__ __device__ __forceinline__ ProjectOp() {}
|
||||
__host__ __device__ __forceinline__ ProjectOp(const ProjectOp&) {}
|
||||
};
|
||||
|
||||
void call(const PtrStepSz<float3> src, const float* rot,
|
||||
const float* transl, const float* proj, PtrStepSz<float2> dst,
|
||||
cudaStream_t stream)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot0, rot, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot1, rot + 3, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot2, rot + 6, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(ctransl, transl, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(cproj0, proj, sizeof(float) * 3));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(cproj1, proj + 3, sizeof(float) * 3));
|
||||
cv::cuda::device::transform(src, dst, ProjectOp(), WithOutMask(), stream);
|
||||
}
|
||||
} // namespace project_points
|
||||
|
||||
namespace solve_pnp_ransac
|
||||
{
|
||||
__constant__ float3 crot_matrices[SOLVE_PNP_RANSAC_MAX_NUM_ITERS * 3];
|
||||
__constant__ float3 ctransl_vectors[SOLVE_PNP_RANSAC_MAX_NUM_ITERS];
|
||||
|
||||
int maxNumIters()
|
||||
{
|
||||
return SOLVE_PNP_RANSAC_MAX_NUM_ITERS;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ float sqr(float x)
|
||||
{
|
||||
return x * x;
|
||||
}
|
||||
|
||||
template <int BLOCK_SIZE>
|
||||
__global__ void computeHypothesisScoresKernel(
|
||||
const int num_points, const float3* object, const float2* image,
|
||||
const float dist_threshold, int* g_num_inliers)
|
||||
{
|
||||
const float3* const &rot_mat = crot_matrices + blockIdx.x * 3;
|
||||
const float3 &transl_vec = ctransl_vectors[blockIdx.x];
|
||||
int num_inliers = 0;
|
||||
|
||||
for (int i = threadIdx.x; i < num_points; i += blockDim.x)
|
||||
{
|
||||
float3 p = object[i];
|
||||
p = make_float3(
|
||||
rot_mat[0].x * p.x + rot_mat[0].y * p.y + rot_mat[0].z * p.z + transl_vec.x,
|
||||
rot_mat[1].x * p.x + rot_mat[1].y * p.y + rot_mat[1].z * p.z + transl_vec.y,
|
||||
rot_mat[2].x * p.x + rot_mat[2].y * p.y + rot_mat[2].z * p.z + transl_vec.z);
|
||||
p.x /= p.z;
|
||||
p.y /= p.z;
|
||||
float2 image_p = image[i];
|
||||
if (sqr(p.x - image_p.x) + sqr(p.y - image_p.y) < dist_threshold)
|
||||
++num_inliers;
|
||||
}
|
||||
|
||||
__shared__ int s_num_inliers[BLOCK_SIZE];
|
||||
reduce<BLOCK_SIZE>(s_num_inliers, num_inliers, threadIdx.x, plus<int>());
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
g_num_inliers[blockIdx.x] = num_inliers;
|
||||
}
|
||||
|
||||
void computeHypothesisScores(
|
||||
const int num_hypotheses, const int num_points, const float* rot_matrices,
|
||||
const float3* transl_vectors, const float3* object, const float2* image,
|
||||
const float dist_threshold, int* hypothesis_scores)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(crot_matrices, rot_matrices, num_hypotheses * 3 * sizeof(float3)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(ctransl_vectors, transl_vectors, num_hypotheses * sizeof(float3)));
|
||||
|
||||
dim3 threads(256);
|
||||
dim3 grid(num_hypotheses);
|
||||
|
||||
computeHypothesisScoresKernel<256><<<grid, threads>>>(
|
||||
num_points, object, image, dist_threshold, hypothesis_scores);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
} // namespace solvepnp_ransac
|
||||
}}} // namespace cv { namespace cuda { namespace cudev
|
||||
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
534
modules/cuda/src/cuda/ccomponetns.cu
Normal file
534
modules/cuda/src/cuda/ccomponetns.cu
Normal file
@@ -0,0 +1,534 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include <opencv2/core/cuda/common.hpp>
|
||||
#include <opencv2/core/cuda/vec_traits.hpp>
|
||||
#include <opencv2/core/cuda/vec_math.hpp>
|
||||
#include <opencv2/core/cuda/emulation.hpp>
|
||||
|
||||
#include <iostream>
|
||||
#include <stdio.h>
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace ccl
|
||||
{
|
||||
enum
|
||||
{
|
||||
WARP_SIZE = 32,
|
||||
WARP_LOG = 5,
|
||||
|
||||
CTA_SIZE_X = 32,
|
||||
CTA_SIZE_Y = 8,
|
||||
|
||||
STA_SIZE_MERGE_Y = 4,
|
||||
STA_SIZE_MERGE_X = 32,
|
||||
|
||||
TPB_X = 1,
|
||||
TPB_Y = 4,
|
||||
|
||||
TILE_COLS = CTA_SIZE_X * TPB_X,
|
||||
TILE_ROWS = CTA_SIZE_Y * TPB_Y
|
||||
};
|
||||
|
||||
template<typename T> struct IntervalsTraits
|
||||
{
|
||||
typedef T elem_type;
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<unsigned char>
|
||||
{
|
||||
typedef int dist_type;
|
||||
enum {ch = 1};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<uchar3>
|
||||
{
|
||||
typedef int3 dist_type;
|
||||
enum {ch = 3};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<uchar4>
|
||||
{
|
||||
typedef int4 dist_type;
|
||||
enum {ch = 4};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<unsigned short>
|
||||
{
|
||||
typedef int dist_type;
|
||||
enum {ch = 1};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<ushort3>
|
||||
{
|
||||
typedef int3 dist_type;
|
||||
enum {ch = 3};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<ushort4>
|
||||
{
|
||||
typedef int4 dist_type;
|
||||
enum {ch = 4};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<float>
|
||||
{
|
||||
typedef float dist_type;
|
||||
enum {ch = 1};
|
||||
};
|
||||
|
||||
template<> struct IntervalsTraits<int>
|
||||
{
|
||||
typedef int dist_type;
|
||||
enum {ch = 1};
|
||||
};
|
||||
|
||||
typedef unsigned char component;
|
||||
enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 };
|
||||
|
||||
template<typename T, int CH> struct InInterval {};
|
||||
|
||||
template<typename T> struct InInterval<T, 1>
|
||||
{
|
||||
typedef typename VecTraits<T>::elem_type E;
|
||||
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo((E)(-_lo.x)), hi((E)_hi.x) {};
|
||||
T lo, hi;
|
||||
|
||||
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
|
||||
{
|
||||
I d = a - b;
|
||||
return lo <= d && d <= hi;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename T> struct InInterval<T, 3>
|
||||
{
|
||||
typedef typename VecTraits<T>::elem_type E;
|
||||
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
|
||||
: lo (VecTraits<T>::make((E)(-_lo.x), (E)(-_lo.y), (E)(-_lo.z))), hi (VecTraits<T>::make((E)_hi.x, (E)_hi.y, (E)_hi.z)){};
|
||||
T lo, hi;
|
||||
|
||||
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
|
||||
{
|
||||
I d = saturate_cast<I>(a - b);
|
||||
return lo.x <= d.x && d.x <= hi.x &&
|
||||
lo.y <= d.y && d.y <= hi.y &&
|
||||
lo.z <= d.z && d.z <= hi.z;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T> struct InInterval<T, 4>
|
||||
{
|
||||
typedef typename VecTraits<T>::elem_type E;
|
||||
__host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi)
|
||||
: lo (VecTraits<T>::make((E)(-_lo.x), (E)(-_lo.y), (E)(-_lo.z), (E)(-_lo.w))), hi (VecTraits<T>::make((E)_hi.x, (E)_hi.y, (E)_hi.z, (E)_hi.w)){};
|
||||
T lo, hi;
|
||||
|
||||
template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const
|
||||
{
|
||||
I d = saturate_cast<I>(a - b);
|
||||
return lo.x <= d.x && d.x <= hi.x &&
|
||||
lo.y <= d.y && d.y <= hi.y &&
|
||||
lo.z <= d.z && d.z <= hi.z &&
|
||||
lo.w <= d.w && d.w <= hi.w;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename T, typename F>
|
||||
__global__ void computeConnectivity(const PtrStepSz<T> image, PtrStepSzb components, F connected)
|
||||
{
|
||||
int x = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
int y = threadIdx.y + blockIdx.y * blockDim.y;
|
||||
|
||||
if (x >= image.cols || y >= image.rows) return;
|
||||
|
||||
T intensity = image(y, x);
|
||||
component c = 0;
|
||||
|
||||
if ( x > 0 && connected(intensity, image(y, x - 1)))
|
||||
c |= LEFT;
|
||||
|
||||
if ( y > 0 && connected(intensity, image(y - 1, x)))
|
||||
c |= UP;
|
||||
|
||||
if ( x + 1 < image.cols && connected(intensity, image(y, x + 1)))
|
||||
c |= RIGHT;
|
||||
|
||||
if ( y + 1 < image.rows && connected(intensity, image(y + 1, x)))
|
||||
c |= DOWN;
|
||||
|
||||
components(y, x) = c;
|
||||
}
|
||||
|
||||
template< typename T>
|
||||
void computeEdges(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream)
|
||||
{
|
||||
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
|
||||
dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y));
|
||||
|
||||
typedef InInterval<typename IntervalsTraits<T>::dist_type, IntervalsTraits<T>::ch> Int_t;
|
||||
|
||||
Int_t inInt(lo, hi);
|
||||
computeConnectivity<T, Int_t><<<grid, block, 0, stream>>>(static_cast<const PtrStepSz<T> >(image), edges, inInt);
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template void computeEdges<uchar> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<uchar3> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<uchar4> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<ushort> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<ushort3>(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<ushort4>(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<int> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
template void computeEdges<float> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
|
||||
__global__ void lableTiles(const PtrStepSzb edges, PtrStepSzi comps)
|
||||
{
|
||||
int x = threadIdx.x + blockIdx.x * TILE_COLS;
|
||||
int y = threadIdx.y + blockIdx.y * TILE_ROWS;
|
||||
|
||||
if (x >= edges.cols || y >= edges.rows) return;
|
||||
|
||||
//currently x is 1
|
||||
int bounds = ((y + TPB_Y) < edges.rows);
|
||||
|
||||
__shared__ int labelsTile[TILE_ROWS][TILE_COLS];
|
||||
__shared__ int edgesTile[TILE_ROWS][TILE_COLS];
|
||||
|
||||
int new_labels[TPB_Y][TPB_X];
|
||||
int old_labels[TPB_Y][TPB_X];
|
||||
|
||||
#pragma unroll
|
||||
for (int i = 0; i < TPB_Y; ++i)
|
||||
#pragma unroll
|
||||
for (int j = 0; j < TPB_X; ++j)
|
||||
{
|
||||
int yloc = threadIdx.y + CTA_SIZE_Y * i;
|
||||
int xloc = threadIdx.x + CTA_SIZE_X * j;
|
||||
component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j);
|
||||
|
||||
if (!xloc) c &= ~LEFT;
|
||||
if (!yloc) c &= ~UP;
|
||||
|
||||
if (xloc == TILE_COLS -1) c &= ~RIGHT;
|
||||
if (yloc == TILE_ROWS -1) c &= ~DOWN;
|
||||
|
||||
new_labels[i][j] = yloc * TILE_COLS + xloc;
|
||||
edgesTile[yloc][xloc] = c;
|
||||
}
|
||||
|
||||
for (int k = 0; ;++k)
|
||||
{
|
||||
//1. backup
|
||||
#pragma unroll
|
||||
for (int i = 0; i < TPB_Y; ++i)
|
||||
#pragma unroll
|
||||
for (int j = 0; j < TPB_X; ++j)
|
||||
{
|
||||
int yloc = threadIdx.y + CTA_SIZE_Y * i;
|
||||
int xloc = threadIdx.x + CTA_SIZE_X * j;
|
||||
|
||||
old_labels[i][j] = new_labels[i][j];
|
||||
labelsTile[yloc][xloc] = new_labels[i][j];
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
//2. compare local arrays
|
||||
#pragma unroll
|
||||
for (int i = 0; i < TPB_Y; ++i)
|
||||
#pragma unroll
|
||||
for (int j = 0; j < TPB_X; ++j)
|
||||
{
|
||||
int yloc = threadIdx.y + CTA_SIZE_Y * i;
|
||||
int xloc = threadIdx.x + CTA_SIZE_X * j;
|
||||
|
||||
component c = edgesTile[yloc][xloc];
|
||||
int label = new_labels[i][j];
|
||||
|
||||
if (c & UP)
|
||||
label = ::min(label, labelsTile[yloc - 1][xloc]);
|
||||
|
||||
if (c & DOWN)
|
||||
label = ::min(label, labelsTile[yloc + 1][xloc]);
|
||||
|
||||
if (c & LEFT)
|
||||
label = ::min(label, labelsTile[yloc][xloc - 1]);
|
||||
|
||||
if (c & RIGHT)
|
||||
label = ::min(label, labelsTile[yloc][xloc + 1]);
|
||||
|
||||
new_labels[i][j] = label;
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
//3. determine: Is any value changed?
|
||||
int changed = 0;
|
||||
#pragma unroll
|
||||
for (int i = 0; i < TPB_Y; ++i)
|
||||
#pragma unroll
|
||||
for (int j = 0; j < TPB_X; ++j)
|
||||
{
|
||||
if (new_labels[i][j] < old_labels[i][j])
|
||||
{
|
||||
changed = 1;
|
||||
Emulation::smem::atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]);
|
||||
}
|
||||
}
|
||||
|
||||
changed = Emulation::syncthreadsOr(changed);
|
||||
|
||||
if (!changed)
|
||||
break;
|
||||
|
||||
//4. Compact paths
|
||||
const int *labels = &labelsTile[0][0];
|
||||
#pragma unroll
|
||||
for (int i = 0; i < TPB_Y; ++i)
|
||||
#pragma unroll
|
||||
for (int j = 0; j < TPB_X; ++j)
|
||||
{
|
||||
int label = new_labels[i][j];
|
||||
|
||||
while( labels[label] < label ) label = labels[label];
|
||||
|
||||
new_labels[i][j] = label;
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
#pragma unroll
|
||||
for (int i = 0; i < TPB_Y; ++i)
|
||||
#pragma unroll
|
||||
for (int j = 0; j < TPB_X; ++j)
|
||||
{
|
||||
int label = new_labels[i][j];
|
||||
int yloc = label / TILE_COLS;
|
||||
int xloc = label - yloc * TILE_COLS;
|
||||
|
||||
xloc += blockIdx.x * TILE_COLS;
|
||||
yloc += blockIdx.y * TILE_ROWS;
|
||||
|
||||
label = yloc * edges.cols + xloc;
|
||||
// do it for x too.
|
||||
if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label;
|
||||
}
|
||||
}
|
||||
|
||||
__device__ __forceinline__ int root(const PtrStepSzi& comps, int label)
|
||||
{
|
||||
while(1)
|
||||
{
|
||||
int y = label / comps.cols;
|
||||
int x = label - y * comps.cols;
|
||||
|
||||
int parent = comps(y, x);
|
||||
|
||||
if (label == parent) break;
|
||||
|
||||
label = parent;
|
||||
}
|
||||
return label;
|
||||
}
|
||||
|
||||
__device__ __forceinline__ void isConnected(PtrStepSzi& comps, int l1, int l2, bool& changed)
|
||||
{
|
||||
int r1 = root(comps, l1);
|
||||
int r2 = root(comps, l2);
|
||||
|
||||
if (r1 == r2) return;
|
||||
|
||||
int mi = ::min(r1, r2);
|
||||
int ma = ::max(r1, r2);
|
||||
|
||||
int y = ma / comps.cols;
|
||||
int x = ma - y * comps.cols;
|
||||
|
||||
atomicMin(&comps.ptr(y)[x], mi);
|
||||
changed = true;
|
||||
}
|
||||
|
||||
__global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX,
|
||||
const PtrStepSzb edges, PtrStepSzi comps, const int yIncomplete, int xIncomplete)
|
||||
{
|
||||
int tid = threadIdx.y * blockDim.x + threadIdx.x;
|
||||
int stride = blockDim.y * blockDim.x;
|
||||
|
||||
int ybegin = blockIdx.y * (tilesNumY * tileSizeY);
|
||||
int yend = ybegin + tilesNumY * tileSizeY;
|
||||
|
||||
if (blockIdx.y == gridDim.y - 1)
|
||||
{
|
||||
yend -= yIncomplete * tileSizeY;
|
||||
yend -= tileSizeY;
|
||||
tileSizeY = (edges.rows % tileSizeY);
|
||||
|
||||
yend += tileSizeY;
|
||||
}
|
||||
|
||||
int xbegin = blockIdx.x * tilesNumX * tileSizeX;
|
||||
int xend = xbegin + tilesNumX * tileSizeX;
|
||||
|
||||
if (blockIdx.x == gridDim.x - 1)
|
||||
{
|
||||
if (xIncomplete) yend = ybegin;
|
||||
xend -= xIncomplete * tileSizeX;
|
||||
xend -= tileSizeX;
|
||||
tileSizeX = (edges.cols % tileSizeX);
|
||||
|
||||
xend += tileSizeX;
|
||||
}
|
||||
|
||||
if (blockIdx.y == (gridDim.y - 1) && yIncomplete)
|
||||
{
|
||||
xend = xbegin;
|
||||
}
|
||||
|
||||
int tasksV = (tilesNumX - 1) * (yend - ybegin);
|
||||
int tasksH = (tilesNumY - 1) * (xend - xbegin);
|
||||
|
||||
int total = tasksH + tasksV;
|
||||
|
||||
bool changed;
|
||||
do
|
||||
{
|
||||
changed = false;
|
||||
for (int taskIdx = tid; taskIdx < total; taskIdx += stride)
|
||||
{
|
||||
if (taskIdx < tasksH)
|
||||
{
|
||||
int indexH = taskIdx;
|
||||
|
||||
int row = indexH / (xend - xbegin);
|
||||
int col = indexH - row * (xend - xbegin);
|
||||
|
||||
int y = ybegin + (row + 1) * tileSizeY;
|
||||
int x = xbegin + col;
|
||||
|
||||
component e = edges( x, y);
|
||||
if (e & UP)
|
||||
{
|
||||
int lc = comps(y,x);
|
||||
int lu = comps(y - 1, x);
|
||||
|
||||
isConnected(comps, lc, lu, changed);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int indexV = taskIdx - tasksH;
|
||||
|
||||
int col = indexV / (yend - ybegin);
|
||||
int row = indexV - col * (yend - ybegin);
|
||||
|
||||
int x = xbegin + (col + 1) * tileSizeX;
|
||||
int y = ybegin + row;
|
||||
|
||||
component e = edges(x, y);
|
||||
if (e & LEFT)
|
||||
{
|
||||
int lc = comps(y, x);
|
||||
int ll = comps(y, x - 1);
|
||||
|
||||
isConnected(comps, lc, ll, changed);
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (Emulation::syncthreadsOr(changed));
|
||||
}
|
||||
|
||||
__global__ void flatten(const PtrStepSzb edges, PtrStepSzi comps)
|
||||
{
|
||||
int x = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
int y = threadIdx.y + blockIdx.y * blockDim.y;
|
||||
|
||||
if( x < comps.cols && y < comps.rows)
|
||||
comps(y, x) = root(comps, comps(y, x));
|
||||
}
|
||||
|
||||
enum {CC_NO_COMPACT = 0, CC_COMPACT_LABELS = 1};
|
||||
|
||||
void labelComponents(const PtrStepSzb& edges, PtrStepSzi comps, int flags, cudaStream_t stream)
|
||||
{
|
||||
(void) flags;
|
||||
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
|
||||
dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS));
|
||||
|
||||
lableTiles<<<grid, block, 0, stream>>>(edges, comps);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS;
|
||||
while (grid.x > 1 || grid.y > 1)
|
||||
{
|
||||
dim3 mergeGrid((int)ceilf(grid.x / 2.f), (int)ceilf(grid.y / 2.f));
|
||||
dim3 mergeBlock(STA_SIZE_MERGE_X, STA_SIZE_MERGE_Y);
|
||||
// debug log
|
||||
// std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl;
|
||||
crossMerge<<<mergeGrid, mergeBlock, 0, stream>>>(2, 2, tileSizeY, tileSizeX, edges, comps, (int)ceilf(grid.y / 2.f) - grid.y / 2, (int)ceilf(grid.x / 2.f) - grid.x / 2);
|
||||
tileSizeX <<= 1;
|
||||
tileSizeY <<= 1;
|
||||
grid = mergeGrid;
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
}
|
||||
|
||||
grid.x = divUp(edges.cols, block.x);
|
||||
grid.y = divUp(edges.rows, block.y);
|
||||
flatten<<<grid, block, 0, stream>>>(edges, comps);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
}
|
||||
} } }
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
117
modules/cuda/src/cuda/global_motion.cu
Normal file
117
modules/cuda/src/cuda/global_motion.cu
Normal file
@@ -0,0 +1,117 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include <thrust/device_ptr.h>
|
||||
#include <thrust/remove.h>
|
||||
#include <thrust/functional.h>
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device { namespace globmotion {
|
||||
|
||||
__constant__ float cml[9];
|
||||
__constant__ float cmr[9];
|
||||
|
||||
int compactPoints(int N, float *points0, float *points1, const uchar *mask)
|
||||
{
|
||||
thrust::device_ptr<float2> dpoints0((float2*)points0);
|
||||
thrust::device_ptr<float2> dpoints1((float2*)points1);
|
||||
thrust::device_ptr<const uchar> dmask(mask);
|
||||
|
||||
return (int)(thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)),
|
||||
thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)),
|
||||
dmask, thrust::not1(thrust::identity<uchar>()))
|
||||
- thrust::make_zip_iterator(make_tuple(dpoints0, dpoints1)));
|
||||
}
|
||||
|
||||
|
||||
__global__ void calcWobbleSuppressionMapsKernel(
|
||||
const int left, const int idx, const int right, const int width, const int height,
|
||||
PtrStepf mapx, PtrStepf mapy)
|
||||
{
|
||||
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||
|
||||
if (x < width && y < height)
|
||||
{
|
||||
float xl = cml[0]*x + cml[1]*y + cml[2];
|
||||
float yl = cml[3]*x + cml[4]*y + cml[5];
|
||||
float izl = 1.f / (cml[6]*x + cml[7]*y + cml[8]);
|
||||
xl *= izl;
|
||||
yl *= izl;
|
||||
|
||||
float xr = cmr[0]*x + cmr[1]*y + cmr[2];
|
||||
float yr = cmr[3]*x + cmr[4]*y + cmr[5];
|
||||
float izr = 1.f / (cmr[6]*x + cmr[7]*y + cmr[8]);
|
||||
xr *= izr;
|
||||
yr *= izr;
|
||||
|
||||
float wl = idx - left;
|
||||
float wr = right - idx;
|
||||
mapx(y,x) = (wr * xl + wl * xr) / (wl + wr);
|
||||
mapy(y,x) = (wr * yl + wl * yr) / (wl + wr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void calcWobbleSuppressionMaps(
|
||||
int left, int idx, int right, int width, int height,
|
||||
const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy)
|
||||
{
|
||||
cudaSafeCall(cudaMemcpyToSymbol(cml, ml, 9*sizeof(float)));
|
||||
cudaSafeCall(cudaMemcpyToSymbol(cmr, mr, 9*sizeof(float)));
|
||||
|
||||
dim3 threads(32, 8);
|
||||
dim3 grid(divUp(width, threads.x), divUp(height, threads.y));
|
||||
|
||||
calcWobbleSuppressionMapsKernel<<<grid, threads>>>(
|
||||
left, idx, right, width, height, mapx, mapy);
|
||||
|
||||
cudaSafeCall(cudaGetLastError());
|
||||
cudaSafeCall(cudaDeviceSynchronize());
|
||||
}
|
||||
|
||||
}}}}
|
||||
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
814
modules/cuda/src/cuda/hog.cu
Normal file
814
modules/cuda/src/cuda/hog.cu
Normal file
@@ -0,0 +1,814 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/reduce.hpp"
|
||||
#include "opencv2/core/cuda/functional.hpp"
|
||||
#include "opencv2/core/cuda/warp_shuffle.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
// Other values are not supported
|
||||
#define CELL_WIDTH 8
|
||||
#define CELL_HEIGHT 8
|
||||
#define CELLS_PER_BLOCK_X 2
|
||||
#define CELLS_PER_BLOCK_Y 2
|
||||
|
||||
namespace hog
|
||||
{
|
||||
__constant__ int cnbins;
|
||||
__constant__ int cblock_stride_x;
|
||||
__constant__ int cblock_stride_y;
|
||||
__constant__ int cnblocks_win_x;
|
||||
__constant__ int cnblocks_win_y;
|
||||
__constant__ int cblock_hist_size;
|
||||
__constant__ int cblock_hist_size_2up;
|
||||
__constant__ int cdescr_size;
|
||||
__constant__ int cdescr_width;
|
||||
|
||||
|
||||
/* Returns the nearest upper power of two, works only for
|
||||
the typical GPU thread count (pert block) values */
|
||||
int power_2up(unsigned int n)
|
||||
{
|
||||
if (n < 1) return 1;
|
||||
else if (n < 2) return 2;
|
||||
else if (n < 4) return 4;
|
||||
else if (n < 8) return 8;
|
||||
else if (n < 16) return 16;
|
||||
else if (n < 32) return 32;
|
||||
else if (n < 64) return 64;
|
||||
else if (n < 128) return 128;
|
||||
else if (n < 256) return 256;
|
||||
else if (n < 512) return 512;
|
||||
else if (n < 1024) return 1024;
|
||||
return -1; // Input is too big
|
||||
}
|
||||
|
||||
|
||||
void set_up_constants(int nbins, int block_stride_x, int block_stride_y,
|
||||
int nblocks_win_x, int nblocks_win_y)
|
||||
{
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cnbins, &nbins, sizeof(nbins)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_x, &block_stride_x, sizeof(block_stride_x)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_y, &block_stride_y, sizeof(block_stride_y)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x)) );
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y)) );
|
||||
|
||||
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size, &block_hist_size, sizeof(block_hist_size)) );
|
||||
|
||||
int block_hist_size_2up = power_2up(block_hist_size);
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up)) );
|
||||
|
||||
int descr_width = nblocks_win_x * block_hist_size;
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cdescr_width, &descr_width, sizeof(descr_width)) );
|
||||
|
||||
int descr_size = descr_width * nblocks_win_y;
|
||||
cudaSafeCall( cudaMemcpyToSymbol(cdescr_size, &descr_size, sizeof(descr_size)) );
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Histogram computation
|
||||
|
||||
|
||||
template <int nblocks> // Number of histogram blocks processed by single GPU thread block
|
||||
__global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrStepf grad,
|
||||
const PtrStepb qangle, float scale, float* block_hists)
|
||||
{
|
||||
const int block_x = threadIdx.z;
|
||||
const int cell_x = threadIdx.x / 16;
|
||||
const int cell_y = threadIdx.y;
|
||||
const int cell_thread_x = threadIdx.x & 0xF;
|
||||
|
||||
if (blockIdx.x * blockDim.z + block_x >= img_block_width)
|
||||
return;
|
||||
|
||||
extern __shared__ float smem[];
|
||||
float* hists = smem;
|
||||
float* final_hist = smem + cnbins * 48 * nblocks;
|
||||
|
||||
const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x +
|
||||
4 * cell_x + cell_thread_x;
|
||||
const int offset_y = blockIdx.y * cblock_stride_y + 4 * cell_y;
|
||||
|
||||
const float* grad_ptr = grad.ptr(offset_y) + offset_x * 2;
|
||||
const unsigned char* qangle_ptr = qangle.ptr(offset_y) + offset_x * 2;
|
||||
|
||||
// 12 means that 12 pixels affect on block's cell (in one row)
|
||||
if (cell_thread_x < 12)
|
||||
{
|
||||
float* hist = hists + 12 * (cell_y * blockDim.z * CELLS_PER_BLOCK_Y +
|
||||
cell_x + block_x * CELLS_PER_BLOCK_X) +
|
||||
cell_thread_x;
|
||||
for (int bin_id = 0; bin_id < cnbins; ++bin_id)
|
||||
hist[bin_id * 48 * nblocks] = 0.f;
|
||||
|
||||
const int dist_x = -4 + (int)cell_thread_x - 4 * cell_x;
|
||||
|
||||
const int dist_y_begin = -4 - 4 * (int)threadIdx.y;
|
||||
for (int dist_y = dist_y_begin; dist_y < dist_y_begin + 12; ++dist_y)
|
||||
{
|
||||
float2 vote = *(const float2*)grad_ptr;
|
||||
uchar2 bin = *(const uchar2*)qangle_ptr;
|
||||
|
||||
grad_ptr += grad.step/sizeof(float);
|
||||
qangle_ptr += qangle.step;
|
||||
|
||||
int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);
|
||||
int dist_center_x = dist_x - 4 * (1 - 2 * cell_x);
|
||||
|
||||
float gaussian = ::expf(-(dist_center_y * dist_center_y +
|
||||
dist_center_x * dist_center_x) * scale);
|
||||
float interp_weight = (8.f - ::fabs(dist_y + 0.5f)) *
|
||||
(8.f - ::fabs(dist_x + 0.5f)) / 64.f;
|
||||
|
||||
hist[bin.x * 48 * nblocks] += gaussian * interp_weight * vote.x;
|
||||
hist[bin.y * 48 * nblocks] += gaussian * interp_weight * vote.y;
|
||||
}
|
||||
|
||||
volatile float* hist_ = hist;
|
||||
for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += 48 * nblocks)
|
||||
{
|
||||
if (cell_thread_x < 6) hist_[0] += hist_[6];
|
||||
if (cell_thread_x < 3) hist_[0] += hist_[3];
|
||||
if (cell_thread_x == 0)
|
||||
final_hist[((cell_x + block_x * 2) * 2 + cell_y) * cnbins + bin_id]
|
||||
= hist_[0] + hist_[1] + hist_[2];
|
||||
}
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
float* block_hist = block_hists + (blockIdx.y * img_block_width +
|
||||
blockIdx.x * blockDim.z + block_x) *
|
||||
cblock_hist_size;
|
||||
|
||||
int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 16 + cell_thread_x;
|
||||
if (tid < cblock_hist_size)
|
||||
block_hist[tid] = final_hist[block_x * cblock_hist_size + tid];
|
||||
}
|
||||
|
||||
|
||||
void compute_hists(int nbins, int block_stride_x, int block_stride_y,
|
||||
int height, int width, const PtrStepSzf& grad,
|
||||
const PtrStepSzb& qangle, float sigma, float* block_hists)
|
||||
{
|
||||
const int nblocks = 1;
|
||||
|
||||
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
|
||||
block_stride_x;
|
||||
int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) /
|
||||
block_stride_y;
|
||||
|
||||
dim3 grid(divUp(img_block_width, nblocks), img_block_height);
|
||||
dim3 threads(32, 2, nblocks);
|
||||
|
||||
cudaSafeCall(cudaFuncSetCacheConfig(compute_hists_kernel_many_blocks<nblocks>,
|
||||
cudaFuncCachePreferL1));
|
||||
|
||||
// Precompute gaussian spatial window parameter
|
||||
float scale = 1.f / (2.f * sigma * sigma);
|
||||
|
||||
int hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * 12 * nblocks) * sizeof(float);
|
||||
int final_hists_size = (nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y * nblocks) * sizeof(float);
|
||||
int smem = hists_size + final_hists_size;
|
||||
compute_hists_kernel_many_blocks<nblocks><<<grid, threads, smem>>>(
|
||||
img_block_width, grad, qangle, scale, block_hists);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// Normalization of histograms via L2Hys_norm
|
||||
//
|
||||
|
||||
|
||||
template<int size>
|
||||
__device__ float reduce_smem(float* smem, float val)
|
||||
{
|
||||
unsigned int tid = threadIdx.x;
|
||||
float sum = val;
|
||||
|
||||
reduce<size>(smem, sum, tid, plus<float>());
|
||||
|
||||
if (size == 32)
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 300
|
||||
return shfl(sum, 0);
|
||||
#else
|
||||
return smem[0];
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
#if __CUDA_ARCH__ >= 300
|
||||
if (threadIdx.x == 0)
|
||||
smem[0] = sum;
|
||||
#endif
|
||||
|
||||
__syncthreads();
|
||||
|
||||
return smem[0];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <int nthreads, // Number of threads which process one block historgam
|
||||
int nblocks> // Number of block hisograms processed by one GPU thread block
|
||||
__global__ void normalize_hists_kernel_many_blocks(const int block_hist_size,
|
||||
const int img_block_width,
|
||||
float* block_hists, float threshold)
|
||||
{
|
||||
if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width)
|
||||
return;
|
||||
|
||||
float* hist = block_hists + (blockIdx.y * img_block_width +
|
||||
blockIdx.x * blockDim.z + threadIdx.z) *
|
||||
block_hist_size + threadIdx.x;
|
||||
|
||||
__shared__ float sh_squares[nthreads * nblocks];
|
||||
float* squares = sh_squares + threadIdx.z * nthreads;
|
||||
|
||||
float elem = 0.f;
|
||||
if (threadIdx.x < block_hist_size)
|
||||
elem = hist[0];
|
||||
|
||||
float sum = reduce_smem<nthreads>(squares, elem * elem);
|
||||
|
||||
float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size);
|
||||
elem = ::min(elem * scale, threshold);
|
||||
|
||||
sum = reduce_smem<nthreads>(squares, elem * elem);
|
||||
|
||||
scale = 1.0f / (::sqrtf(sum) + 1e-3f);
|
||||
|
||||
if (threadIdx.x < block_hist_size)
|
||||
hist[0] = elem * scale;
|
||||
}
|
||||
|
||||
|
||||
void normalize_hists(int nbins, int block_stride_x, int block_stride_y,
|
||||
int height, int width, float* block_hists, float threshold)
|
||||
{
|
||||
const int nblocks = 1;
|
||||
|
||||
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;
|
||||
int nthreads = power_2up(block_hist_size);
|
||||
dim3 threads(nthreads, 1, nblocks);
|
||||
|
||||
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
|
||||
int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / block_stride_y;
|
||||
dim3 grid(divUp(img_block_width, nblocks), img_block_height);
|
||||
|
||||
if (nthreads == 32)
|
||||
normalize_hists_kernel_many_blocks<32, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
|
||||
else if (nthreads == 64)
|
||||
normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
|
||||
else if (nthreads == 128)
|
||||
normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
|
||||
else if (nthreads == 256)
|
||||
normalize_hists_kernel_many_blocks<256, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
|
||||
else if (nthreads == 512)
|
||||
normalize_hists_kernel_many_blocks<512, nblocks><<<grid, threads>>>(block_hist_size, img_block_width, block_hists, threshold);
|
||||
else
|
||||
CV_Error(cv::Error::StsBadArg, "normalize_hists: histogram's size is too big, try to decrease number of bins");
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Linear SVM based classification
|
||||
//
|
||||
|
||||
// return confidence values not just positive location
|
||||
template <int nthreads, // Number of threads per one histogram block
|
||||
int nblocks> // Number of histogram block processed by single GPU thread block
|
||||
__global__ void compute_confidence_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
|
||||
const int win_block_stride_x, const int win_block_stride_y,
|
||||
const float* block_hists, const float* coefs,
|
||||
float free_coef, float threshold, float* confidences)
|
||||
{
|
||||
const int win_x = threadIdx.z;
|
||||
if (blockIdx.x * blockDim.z + win_x >= img_win_width)
|
||||
return;
|
||||
|
||||
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
|
||||
blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
|
||||
cblock_hist_size;
|
||||
|
||||
float product = 0.f;
|
||||
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
|
||||
{
|
||||
int offset_y = i / cdescr_width;
|
||||
int offset_x = i - offset_y * cdescr_width;
|
||||
product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
|
||||
}
|
||||
|
||||
__shared__ float products[nthreads * nblocks];
|
||||
|
||||
const int tid = threadIdx.z * nthreads + threadIdx.x;
|
||||
|
||||
reduce<nthreads>(products, product, tid, plus<float>());
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
confidences[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = product + free_coef;
|
||||
|
||||
}
|
||||
|
||||
void compute_confidence_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
|
||||
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
|
||||
float* coefs, float free_coef, float threshold, float *confidences)
|
||||
{
|
||||
const int nthreads = 256;
|
||||
const int nblocks = 1;
|
||||
|
||||
int win_block_stride_x = win_stride_x / block_stride_x;
|
||||
int win_block_stride_y = win_stride_y / block_stride_y;
|
||||
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
|
||||
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
|
||||
|
||||
dim3 threads(nthreads, 1, nblocks);
|
||||
dim3 grid(divUp(img_win_width, nblocks), img_win_height);
|
||||
|
||||
cudaSafeCall(cudaFuncSetCacheConfig(compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>,
|
||||
cudaFuncCachePreferL1));
|
||||
|
||||
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /
|
||||
block_stride_x;
|
||||
compute_confidence_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(
|
||||
img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
|
||||
block_hists, coefs, free_coef, threshold, confidences);
|
||||
cudaSafeCall(cudaThreadSynchronize());
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <int nthreads, // Number of threads per one histogram block
|
||||
int nblocks> // Number of histogram block processed by single GPU thread block
|
||||
__global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,
|
||||
const int win_block_stride_x, const int win_block_stride_y,
|
||||
const float* block_hists, const float* coefs,
|
||||
float free_coef, float threshold, unsigned char* labels)
|
||||
{
|
||||
const int win_x = threadIdx.z;
|
||||
if (blockIdx.x * blockDim.z + win_x >= img_win_width)
|
||||
return;
|
||||
|
||||
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
|
||||
blockIdx.x * win_block_stride_x * blockDim.z + win_x) *
|
||||
cblock_hist_size;
|
||||
|
||||
float product = 0.f;
|
||||
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
|
||||
{
|
||||
int offset_y = i / cdescr_width;
|
||||
int offset_x = i - offset_y * cdescr_width;
|
||||
product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x];
|
||||
}
|
||||
|
||||
__shared__ float products[nthreads * nblocks];
|
||||
|
||||
const int tid = threadIdx.z * nthreads + threadIdx.x;
|
||||
|
||||
reduce<nthreads>(products, product, tid, plus<float>());
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
labels[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = (product + free_coef >= threshold);
|
||||
}
|
||||
|
||||
|
||||
void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,
|
||||
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
|
||||
float* coefs, float free_coef, float threshold, unsigned char* labels)
|
||||
{
|
||||
const int nthreads = 256;
|
||||
const int nblocks = 1;
|
||||
|
||||
int win_block_stride_x = win_stride_x / block_stride_x;
|
||||
int win_block_stride_y = win_stride_y / block_stride_y;
|
||||
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
|
||||
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
|
||||
|
||||
dim3 threads(nthreads, 1, nblocks);
|
||||
dim3 grid(divUp(img_win_width, nblocks), img_win_height);
|
||||
|
||||
cudaSafeCall(cudaFuncSetCacheConfig(classify_hists_kernel_many_blocks<nthreads, nblocks>, cudaFuncCachePreferL1));
|
||||
|
||||
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
|
||||
classify_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(
|
||||
img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,
|
||||
block_hists, coefs, free_coef, threshold, labels);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Extract descriptors
|
||||
|
||||
|
||||
template <int nthreads>
|
||||
__global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y,
|
||||
const float* block_hists, PtrStepf descriptors)
|
||||
{
|
||||
// Get left top corner of the window in src
|
||||
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
|
||||
blockIdx.x * win_block_stride_x) * cblock_hist_size;
|
||||
|
||||
// Get left top corner of the window in dst
|
||||
float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);
|
||||
|
||||
// Copy elements from src to dst
|
||||
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
|
||||
{
|
||||
int offset_y = i / cdescr_width;
|
||||
int offset_x = i - offset_y * cdescr_width;
|
||||
descriptor[i] = hist[offset_y * img_block_width * cblock_hist_size + offset_x];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x,
|
||||
int height, int width, float* block_hists, PtrStepSzf descriptors)
|
||||
{
|
||||
const int nthreads = 256;
|
||||
|
||||
int win_block_stride_x = win_stride_x / block_stride_x;
|
||||
int win_block_stride_y = win_stride_y / block_stride_y;
|
||||
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
|
||||
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
|
||||
dim3 threads(nthreads, 1);
|
||||
dim3 grid(img_win_width, img_win_height);
|
||||
|
||||
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
|
||||
extract_descrs_by_rows_kernel<nthreads><<<grid, threads>>>(
|
||||
img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
template <int nthreads>
|
||||
__global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x,
|
||||
const int win_block_stride_y, const float* block_hists,
|
||||
PtrStepf descriptors)
|
||||
{
|
||||
// Get left top corner of the window in src
|
||||
const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +
|
||||
blockIdx.x * win_block_stride_x) * cblock_hist_size;
|
||||
|
||||
// Get left top corner of the window in dst
|
||||
float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x);
|
||||
|
||||
// Copy elements from src to dst
|
||||
for (int i = threadIdx.x; i < cdescr_size; i += nthreads)
|
||||
{
|
||||
int block_idx = i / cblock_hist_size;
|
||||
int idx_in_block = i - block_idx * cblock_hist_size;
|
||||
|
||||
int y = block_idx / cnblocks_win_x;
|
||||
int x = block_idx - y * cnblocks_win_x;
|
||||
|
||||
descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block]
|
||||
= hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x,
|
||||
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
|
||||
PtrStepSzf descriptors)
|
||||
{
|
||||
const int nthreads = 256;
|
||||
|
||||
int win_block_stride_x = win_stride_x / block_stride_x;
|
||||
int win_block_stride_y = win_stride_y / block_stride_y;
|
||||
int img_win_width = (width - win_width + win_stride_x) / win_stride_x;
|
||||
int img_win_height = (height - win_height + win_stride_y) / win_stride_y;
|
||||
dim3 threads(nthreads, 1);
|
||||
dim3 grid(img_win_width, img_win_height);
|
||||
|
||||
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;
|
||||
extract_descrs_by_cols_kernel<nthreads><<<grid, threads>>>(
|
||||
img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// Gradients computation
|
||||
|
||||
|
||||
template <int nthreads, int correct_gamma>
|
||||
__global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrStepb img,
|
||||
float angle_scale, PtrStepf grad, PtrStepb qangle)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
const uchar4* row = (const uchar4*)img.ptr(blockIdx.y);
|
||||
|
||||
__shared__ float sh_row[(nthreads + 2) * 3];
|
||||
|
||||
uchar4 val;
|
||||
if (x < width)
|
||||
val = row[x];
|
||||
else
|
||||
val = row[width - 2];
|
||||
|
||||
sh_row[threadIdx.x + 1] = val.x;
|
||||
sh_row[threadIdx.x + 1 + (nthreads + 2)] = val.y;
|
||||
sh_row[threadIdx.x + 1 + 2 * (nthreads + 2)] = val.z;
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
{
|
||||
val = row[::max(x - 1, 1)];
|
||||
sh_row[0] = val.x;
|
||||
sh_row[(nthreads + 2)] = val.y;
|
||||
sh_row[2 * (nthreads + 2)] = val.z;
|
||||
}
|
||||
|
||||
if (threadIdx.x == blockDim.x - 1)
|
||||
{
|
||||
val = row[::min(x + 1, width - 2)];
|
||||
sh_row[blockDim.x + 1] = val.x;
|
||||
sh_row[blockDim.x + 1 + (nthreads + 2)] = val.y;
|
||||
sh_row[blockDim.x + 1 + 2 * (nthreads + 2)] = val.z;
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
if (x < width)
|
||||
{
|
||||
float3 a, b;
|
||||
|
||||
b.x = sh_row[threadIdx.x + 2];
|
||||
b.y = sh_row[threadIdx.x + 2 + (nthreads + 2)];
|
||||
b.z = sh_row[threadIdx.x + 2 + 2 * (nthreads + 2)];
|
||||
a.x = sh_row[threadIdx.x];
|
||||
a.y = sh_row[threadIdx.x + (nthreads + 2)];
|
||||
a.z = sh_row[threadIdx.x + 2 * (nthreads + 2)];
|
||||
|
||||
float3 dx;
|
||||
if (correct_gamma)
|
||||
dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
|
||||
else
|
||||
dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
|
||||
|
||||
float3 dy = make_float3(0.f, 0.f, 0.f);
|
||||
|
||||
if (blockIdx.y > 0 && blockIdx.y < height - 1)
|
||||
{
|
||||
val = ((const uchar4*)img.ptr(blockIdx.y - 1))[x];
|
||||
a = make_float3(val.x, val.y, val.z);
|
||||
|
||||
val = ((const uchar4*)img.ptr(blockIdx.y + 1))[x];
|
||||
b = make_float3(val.x, val.y, val.z);
|
||||
|
||||
if (correct_gamma)
|
||||
dy = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));
|
||||
else
|
||||
dy = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);
|
||||
}
|
||||
|
||||
float best_dx = dx.x;
|
||||
float best_dy = dy.x;
|
||||
|
||||
float mag0 = dx.x * dx.x + dy.x * dy.x;
|
||||
float mag1 = dx.y * dx.y + dy.y * dy.y;
|
||||
if (mag0 < mag1)
|
||||
{
|
||||
best_dx = dx.y;
|
||||
best_dy = dy.y;
|
||||
mag0 = mag1;
|
||||
}
|
||||
|
||||
mag1 = dx.z * dx.z + dy.z * dy.z;
|
||||
if (mag0 < mag1)
|
||||
{
|
||||
best_dx = dx.z;
|
||||
best_dy = dy.z;
|
||||
mag0 = mag1;
|
||||
}
|
||||
|
||||
mag0 = ::sqrtf(mag0);
|
||||
|
||||
float ang = (::atan2f(best_dy, best_dx) + CV_PI_F) * angle_scale - 0.5f;
|
||||
int hidx = (int)::floorf(ang);
|
||||
ang -= hidx;
|
||||
hidx = (hidx + cnbins) % cnbins;
|
||||
|
||||
((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
|
||||
((float2*)grad.ptr(blockIdx.y))[x] = make_float2(mag0 * (1.f - ang), mag0 * ang);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void compute_gradients_8UC4(int nbins, int height, int width, const PtrStepSzb& img,
|
||||
float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
|
||||
{
|
||||
(void)nbins;
|
||||
const int nthreads = 256;
|
||||
|
||||
dim3 bdim(nthreads, 1);
|
||||
dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));
|
||||
|
||||
if (correct_gamma)
|
||||
compute_gradients_8UC4_kernel<nthreads, 1><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
|
||||
else
|
||||
compute_gradients_8UC4_kernel<nthreads, 0><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <int nthreads, int correct_gamma>
|
||||
__global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrStepb img,
|
||||
float angle_scale, PtrStepf grad, PtrStepb qangle)
|
||||
{
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
const unsigned char* row = (const unsigned char*)img.ptr(blockIdx.y);
|
||||
|
||||
__shared__ float sh_row[nthreads + 2];
|
||||
|
||||
if (x < width)
|
||||
sh_row[threadIdx.x + 1] = row[x];
|
||||
else
|
||||
sh_row[threadIdx.x + 1] = row[width - 2];
|
||||
|
||||
if (threadIdx.x == 0)
|
||||
sh_row[0] = row[::max(x - 1, 1)];
|
||||
|
||||
if (threadIdx.x == blockDim.x - 1)
|
||||
sh_row[blockDim.x + 1] = row[::min(x + 1, width - 2)];
|
||||
|
||||
__syncthreads();
|
||||
if (x < width)
|
||||
{
|
||||
float dx;
|
||||
|
||||
if (correct_gamma)
|
||||
dx = ::sqrtf(sh_row[threadIdx.x + 2]) - ::sqrtf(sh_row[threadIdx.x]);
|
||||
else
|
||||
dx = sh_row[threadIdx.x + 2] - sh_row[threadIdx.x];
|
||||
|
||||
float dy = 0.f;
|
||||
if (blockIdx.y > 0 && blockIdx.y < height - 1)
|
||||
{
|
||||
float a = ((const unsigned char*)img.ptr(blockIdx.y + 1))[x];
|
||||
float b = ((const unsigned char*)img.ptr(blockIdx.y - 1))[x];
|
||||
if (correct_gamma)
|
||||
dy = ::sqrtf(a) - ::sqrtf(b);
|
||||
else
|
||||
dy = a - b;
|
||||
}
|
||||
float mag = ::sqrtf(dx * dx + dy * dy);
|
||||
|
||||
float ang = (::atan2f(dy, dx) + CV_PI_F) * angle_scale - 0.5f;
|
||||
int hidx = (int)::floorf(ang);
|
||||
ang -= hidx;
|
||||
hidx = (hidx + cnbins) % cnbins;
|
||||
|
||||
((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins);
|
||||
((float2*) grad.ptr(blockIdx.y))[x] = make_float2(mag * (1.f - ang), mag * ang);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void compute_gradients_8UC1(int nbins, int height, int width, const PtrStepSzb& img,
|
||||
float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma)
|
||||
{
|
||||
(void)nbins;
|
||||
const int nthreads = 256;
|
||||
|
||||
dim3 bdim(nthreads, 1);
|
||||
dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y));
|
||||
|
||||
if (correct_gamma)
|
||||
compute_gradients_8UC1_kernel<nthreads, 1><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
|
||||
else
|
||||
compute_gradients_8UC1_kernel<nthreads, 0><<<gdim, bdim>>>(height, width, img, angle_scale, grad, qangle);
|
||||
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Resize
|
||||
|
||||
texture<uchar4, 2, cudaReadModeNormalizedFloat> resize8UC4_tex;
|
||||
texture<uchar, 2, cudaReadModeNormalizedFloat> resize8UC1_tex;
|
||||
|
||||
__global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar> dst, int colOfs)
|
||||
{
|
||||
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x < dst.cols && y < dst.rows)
|
||||
dst.ptr(y)[x] = tex2D(resize8UC1_tex, x * sx + colOfs, y * sy) * 255;
|
||||
}
|
||||
|
||||
__global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar4> dst, int colOfs)
|
||||
{
|
||||
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (x < dst.cols && y < dst.rows)
|
||||
{
|
||||
float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy);
|
||||
dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255);
|
||||
}
|
||||
}
|
||||
|
||||
template<class T, class TEX>
|
||||
static void resize_for_hog(const PtrStepSzb& src, PtrStepSzb dst, TEX& tex)
|
||||
{
|
||||
tex.filterMode = cudaFilterModeLinear;
|
||||
|
||||
size_t texOfs = 0;
|
||||
int colOfs = 0;
|
||||
|
||||
cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
|
||||
cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
|
||||
|
||||
if (texOfs != 0)
|
||||
{
|
||||
colOfs = static_cast<int>( texOfs/sizeof(T) );
|
||||
cudaSafeCall( cudaUnbindTexture(tex) );
|
||||
cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );
|
||||
}
|
||||
|
||||
dim3 threads(32, 8);
|
||||
dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y));
|
||||
|
||||
float sx = static_cast<float>(src.cols) / dst.cols;
|
||||
float sy = static_cast<float>(src.rows) / dst.rows;
|
||||
|
||||
resize_for_hog_kernel<<<grid, threads>>>(sx, sy, (PtrStepSz<T>)dst, colOfs);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
|
||||
cudaSafeCall( cudaUnbindTexture(tex) );
|
||||
}
|
||||
|
||||
void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
|
||||
void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
|
||||
} // namespace hog
|
||||
}}} // namespace cv { namespace cuda { namespace cudev
|
||||
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
303
modules/cuda/src/cuda/lbp.cu
Normal file
303
modules/cuda/src/cuda/lbp.cu
Normal file
@@ -0,0 +1,303 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if !defined CUDA_DISABLER
|
||||
|
||||
#include "lbp.hpp"
|
||||
#include "opencv2/core/cuda/vec_traits.hpp"
|
||||
#include "opencv2/core/cuda/saturate_cast.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace lbp
|
||||
{
|
||||
struct LBP
|
||||
{
|
||||
__host__ __device__ __forceinline__ LBP() {}
|
||||
|
||||
__device__ __forceinline__ int operator() (const int* integral, int ty, int fh, int fw, int& shift) const
|
||||
{
|
||||
int anchors[9];
|
||||
|
||||
anchors[0] = integral[ty];
|
||||
anchors[1] = integral[ty + fw];
|
||||
anchors[0] -= anchors[1];
|
||||
anchors[2] = integral[ty + fw * 2];
|
||||
anchors[1] -= anchors[2];
|
||||
anchors[2] -= integral[ty + fw * 3];
|
||||
|
||||
ty += fh;
|
||||
anchors[3] = integral[ty];
|
||||
anchors[4] = integral[ty + fw];
|
||||
anchors[3] -= anchors[4];
|
||||
anchors[5] = integral[ty + fw * 2];
|
||||
anchors[4] -= anchors[5];
|
||||
anchors[5] -= integral[ty + fw * 3];
|
||||
|
||||
anchors[0] -= anchors[3];
|
||||
anchors[1] -= anchors[4];
|
||||
anchors[2] -= anchors[5];
|
||||
// 0 - 2 contains s0 - s2
|
||||
|
||||
ty += fh;
|
||||
anchors[6] = integral[ty];
|
||||
anchors[7] = integral[ty + fw];
|
||||
anchors[6] -= anchors[7];
|
||||
anchors[8] = integral[ty + fw * 2];
|
||||
anchors[7] -= anchors[8];
|
||||
anchors[8] -= integral[ty + fw * 3];
|
||||
|
||||
anchors[3] -= anchors[6];
|
||||
anchors[4] -= anchors[7];
|
||||
anchors[5] -= anchors[8];
|
||||
// 3 - 5 contains s3 - s5
|
||||
|
||||
anchors[0] -= anchors[4];
|
||||
anchors[1] -= anchors[4];
|
||||
anchors[2] -= anchors[4];
|
||||
anchors[3] -= anchors[4];
|
||||
anchors[5] -= anchors[4];
|
||||
|
||||
int response = (~(anchors[0] >> 31)) & 4;
|
||||
response |= (~(anchors[1] >> 31)) & 2;;
|
||||
response |= (~(anchors[2] >> 31)) & 1;
|
||||
|
||||
shift = (~(anchors[5] >> 31)) & 16;
|
||||
shift |= (~(anchors[3] >> 31)) & 1;
|
||||
|
||||
ty += fh;
|
||||
anchors[0] = integral[ty];
|
||||
anchors[1] = integral[ty + fw];
|
||||
anchors[0] -= anchors[1];
|
||||
anchors[2] = integral[ty + fw * 2];
|
||||
anchors[1] -= anchors[2];
|
||||
anchors[2] -= integral[ty + fw * 3];
|
||||
|
||||
anchors[6] -= anchors[0];
|
||||
anchors[7] -= anchors[1];
|
||||
anchors[8] -= anchors[2];
|
||||
// 0 -2 contains s6 - s8
|
||||
|
||||
anchors[6] -= anchors[4];
|
||||
anchors[7] -= anchors[4];
|
||||
anchors[8] -= anchors[4];
|
||||
|
||||
shift |= (~(anchors[6] >> 31)) & 2;
|
||||
shift |= (~(anchors[7] >> 31)) & 4;
|
||||
shift |= (~(anchors[8] >> 31)) & 8;
|
||||
return response;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Pr>
|
||||
__global__ void disjoin(int4* candidates, int4* objects, unsigned int n, int groupThreshold, float grouping_eps, unsigned int* nclasses)
|
||||
{
|
||||
unsigned int tid = threadIdx.x;
|
||||
extern __shared__ int sbuff[];
|
||||
|
||||
int* labels = sbuff;
|
||||
int* rrects = sbuff + n;
|
||||
|
||||
Pr predicate(grouping_eps);
|
||||
partition(candidates, n, labels, predicate);
|
||||
|
||||
rrects[tid * 4 + 0] = 0;
|
||||
rrects[tid * 4 + 1] = 0;
|
||||
rrects[tid * 4 + 2] = 0;
|
||||
rrects[tid * 4 + 3] = 0;
|
||||
__syncthreads();
|
||||
|
||||
int cls = labels[tid];
|
||||
Emulation::smem::atomicAdd((rrects + cls * 4 + 0), candidates[tid].x);
|
||||
Emulation::smem::atomicAdd((rrects + cls * 4 + 1), candidates[tid].y);
|
||||
Emulation::smem::atomicAdd((rrects + cls * 4 + 2), candidates[tid].z);
|
||||
Emulation::smem::atomicAdd((rrects + cls * 4 + 3), candidates[tid].w);
|
||||
|
||||
__syncthreads();
|
||||
labels[tid] = 0;
|
||||
|
||||
__syncthreads();
|
||||
Emulation::smem::atomicInc((unsigned int*)labels + cls, n);
|
||||
|
||||
__syncthreads();
|
||||
*nclasses = 0;
|
||||
|
||||
int active = labels[tid];
|
||||
if (active)
|
||||
{
|
||||
int* r1 = rrects + tid * 4;
|
||||
float s = 1.f / active;
|
||||
r1[0] = saturate_cast<int>(r1[0] * s);
|
||||
r1[1] = saturate_cast<int>(r1[1] * s);
|
||||
r1[2] = saturate_cast<int>(r1[2] * s);
|
||||
r1[3] = saturate_cast<int>(r1[3] * s);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
if (active && active >= groupThreshold)
|
||||
{
|
||||
int* r1 = rrects + tid * 4;
|
||||
int4 r_out = make_int4(r1[0], r1[1], r1[2], r1[3]);
|
||||
|
||||
int aidx = Emulation::smem::atomicInc(nclasses, n);
|
||||
objects[aidx] = r_out;
|
||||
}
|
||||
}
|
||||
|
||||
void connectedConmonents(PtrStepSz<int4> candidates, int ncandidates, PtrStepSz<int4> objects, int groupThreshold, float grouping_eps, unsigned int* nclasses)
|
||||
{
|
||||
if (!ncandidates) return;
|
||||
int block = ncandidates;
|
||||
int smem = block * ( sizeof(int) + sizeof(int4) );
|
||||
disjoin<InSameComponint><<<1, block, smem>>>(candidates, objects, ncandidates, groupThreshold, grouping_eps, nclasses);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
}
|
||||
|
||||
struct Cascade
|
||||
{
|
||||
__host__ __device__ __forceinline__ Cascade(const Stage* _stages, int _nstages, const ClNode* _nodes, const float* _leaves,
|
||||
const int* _subsets, const uchar4* _features, int _subsetSize)
|
||||
|
||||
: stages(_stages), nstages(_nstages), nodes(_nodes), leaves(_leaves), subsets(_subsets), features(_features), subsetSize(_subsetSize){}
|
||||
|
||||
__device__ __forceinline__ bool operator() (int y, int x, int* integral, const int pitch) const
|
||||
{
|
||||
int current_node = 0;
|
||||
int current_leave = 0;
|
||||
|
||||
for (int s = 0; s < nstages; ++s)
|
||||
{
|
||||
float sum = 0;
|
||||
Stage stage = stages[s];
|
||||
for (int t = 0; t < stage.ntrees; t++)
|
||||
{
|
||||
ClNode node = nodes[current_node];
|
||||
uchar4 feature = features[node.featureIdx];
|
||||
|
||||
int shift;
|
||||
int c = evaluator(integral, (y + feature.y) * pitch + x + feature.x, feature.w * pitch, feature.z, shift);
|
||||
int idx = (subsets[ current_node * subsetSize + c] & ( 1 << shift)) ? current_leave : current_leave + 1;
|
||||
sum += leaves[idx];
|
||||
|
||||
current_node += 1;
|
||||
current_leave += 2;
|
||||
}
|
||||
|
||||
if (sum < stage.threshold)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const Stage* stages;
|
||||
const int nstages;
|
||||
|
||||
const ClNode* nodes;
|
||||
const float* leaves;
|
||||
const int* subsets;
|
||||
const uchar4* features;
|
||||
|
||||
const int subsetSize;
|
||||
const LBP evaluator;
|
||||
};
|
||||
|
||||
// stepShift, scale, width_k, sum_prev => y = sum_prev + tid_k / width_k, x = tid_k - tid_k / width_k
|
||||
__global__ void lbp_cascade(const Cascade cascade, int frameW, int frameH, int windowW, int windowH, float scale, const float factor,
|
||||
const int total, int* integral, const int pitch, PtrStepSz<int4> objects, unsigned int* classified)
|
||||
{
|
||||
int ftid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (ftid >= total) return;
|
||||
|
||||
int step = (scale <= 2.f);
|
||||
|
||||
int windowsForLine = (__float2int_rn( __fdividef(frameW, scale)) - windowW) >> step;
|
||||
int stotal = windowsForLine * ( (__float2int_rn( __fdividef(frameH, scale)) - windowH) >> step);
|
||||
int wshift = 0;
|
||||
|
||||
int scaleTid = ftid;
|
||||
|
||||
while (scaleTid >= stotal)
|
||||
{
|
||||
scaleTid -= stotal;
|
||||
wshift += __float2int_rn(__fdividef(frameW, scale)) + 1;
|
||||
scale *= factor;
|
||||
step = (scale <= 2.f);
|
||||
windowsForLine = ( ((__float2int_rn(__fdividef(frameW, scale)) - windowW) >> step));
|
||||
stotal = windowsForLine * ( (__float2int_rn(__fdividef(frameH, scale)) - windowH) >> step);
|
||||
}
|
||||
|
||||
int y = __fdividef(scaleTid, windowsForLine);
|
||||
int x = scaleTid - y * windowsForLine;
|
||||
|
||||
x <<= step;
|
||||
y <<= step;
|
||||
|
||||
if (cascade(y, x + wshift, integral, pitch))
|
||||
{
|
||||
if(x >= __float2int_rn(__fdividef(frameW, scale)) - windowW) return;
|
||||
|
||||
int4 rect;
|
||||
rect.x = __float2int_rn(x * scale);
|
||||
rect.y = __float2int_rn(y * scale);
|
||||
rect.z = __float2int_rn(windowW * scale);
|
||||
rect.w = __float2int_rn(windowH * scale);
|
||||
|
||||
int res = atomicInc(classified, (unsigned int)objects.cols);
|
||||
objects(0, res) = rect;
|
||||
}
|
||||
}
|
||||
|
||||
void classifyPyramid(int frameW, int frameH, int windowW, int windowH, float initialScale, float factor, int workAmount,
|
||||
const PtrStepSzb& mstages, const int nstages, const PtrStepSzi& mnodes, const PtrStepSzf& mleaves, const PtrStepSzi& msubsets, const PtrStepSzb& mfeatures,
|
||||
const int subsetSize, PtrStepSz<int4> objects, unsigned int* classified, PtrStepSzi integral)
|
||||
{
|
||||
const int block = 128;
|
||||
int grid = divUp(workAmount, block);
|
||||
cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1);
|
||||
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
|
||||
lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), (int)integral.step / sizeof(int), objects, classified);
|
||||
}
|
||||
}
|
||||
}}}
|
||||
|
||||
#endif /* CUDA_DISABLER */
|
112
modules/cuda/src/cuda/lbp.hpp
Normal file
112
modules/cuda/src/cuda/lbp.hpp
Normal file
@@ -0,0 +1,112 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_GPU_DEVICE_LBP_HPP_
|
||||
#define __OPENCV_GPU_DEVICE_LBP_HPP_
|
||||
|
||||
#include "opencv2/core/cuda/common.hpp"
|
||||
#include "opencv2/core/cuda/emulation.hpp"
|
||||
|
||||
namespace cv { namespace cuda { namespace device {
|
||||
|
||||
namespace lbp {
|
||||
|
||||
struct Stage
|
||||
{
|
||||
int first;
|
||||
int ntrees;
|
||||
float threshold;
|
||||
};
|
||||
|
||||
struct ClNode
|
||||
{
|
||||
int left;
|
||||
int right;
|
||||
int featureIdx;
|
||||
};
|
||||
|
||||
struct InSameComponint
|
||||
{
|
||||
public:
|
||||
__device__ __forceinline__ InSameComponint(float _eps) : eps(_eps) {}
|
||||
__device__ __forceinline__ InSameComponint(const InSameComponint& other) : eps(other.eps) {}
|
||||
|
||||
__device__ __forceinline__ bool operator()(const int4& r1, const int4& r2) const
|
||||
{
|
||||
float delta = eps * (::min(r1.z, r2.z) + ::min(r1.w, r2.w)) * 0.5f;
|
||||
|
||||
return ::abs(r1.x - r2.x) <= delta && ::abs(r1.y - r2.y) <= delta
|
||||
&& ::abs(r1.x + r1.z - r2.x - r2.z) <= delta && ::abs(r1.y + r1.w - r2.y - r2.w) <= delta;
|
||||
}
|
||||
float eps;
|
||||
};
|
||||
|
||||
template<typename Pr>
|
||||
__device__ __forceinline__ void partition(int4* vec, unsigned int n, int* labels, Pr predicate)
|
||||
{
|
||||
unsigned tid = threadIdx.x;
|
||||
labels[tid] = tid;
|
||||
__syncthreads();
|
||||
for (unsigned int id = 0; id < n; id++)
|
||||
{
|
||||
if (tid != id && predicate(vec[tid], vec[id]))
|
||||
{
|
||||
int p = labels[tid];
|
||||
int q = labels[id];
|
||||
if (p < q)
|
||||
{
|
||||
Emulation::smem::atomicMin(labels + id, p);
|
||||
}
|
||||
else if (p > q)
|
||||
{
|
||||
Emulation::smem::atomicMin(labels + tid, q);
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
} // lbp
|
||||
|
||||
} } }// namespaces
|
||||
|
||||
#endif
|
96
modules/cuda/src/global_motion.cpp
Normal file
96
modules/cuda/src/global_motion.cpp
Normal file
@@ -0,0 +1,96 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace cv::cuda;
|
||||
|
||||
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
|
||||
|
||||
void cv::cuda::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
|
||||
void cv::cuda::calcWobbleSuppressionMaps(
|
||||
int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
|
||||
|
||||
#else
|
||||
|
||||
namespace cv { namespace cuda { namespace device { namespace globmotion {
|
||||
|
||||
int compactPoints(int N, float *points0, float *points1, const uchar *mask);
|
||||
|
||||
void calcWobbleSuppressionMaps(
|
||||
int left, int idx, int right, int width, int height,
|
||||
const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy);
|
||||
|
||||
}}}}
|
||||
|
||||
void cv::cuda::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
|
||||
{
|
||||
CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
|
||||
CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
|
||||
CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
|
||||
|
||||
int npoints = points0.cols;
|
||||
int remaining = cv::cuda::device::globmotion::compactPoints(
|
||||
npoints, (float*)points0.data, (float*)points1.data, mask.data);
|
||||
|
||||
points0 = points0.colRange(0, remaining);
|
||||
points1 = points1.colRange(0, remaining);
|
||||
}
|
||||
|
||||
|
||||
void cv::cuda::calcWobbleSuppressionMaps(
|
||||
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
|
||||
GpuMat &mapx, GpuMat &mapy)
|
||||
{
|
||||
CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous());
|
||||
CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous());
|
||||
|
||||
mapx.create(size, CV_32F);
|
||||
mapy.create(size, CV_32F);
|
||||
|
||||
cv::cuda::device::globmotion::calcWobbleSuppressionMaps(
|
||||
left, idx, right, size.width, size.height,
|
||||
ml.ptr<float>(), mr.ptr<float>(), mapx, mapy);
|
||||
}
|
||||
|
||||
#endif
|
282
modules/cuda/src/graphcuts.cpp
Normal file
282
modules/cuda/src/graphcuts.cpp
Normal file
@@ -0,0 +1,282 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
|
||||
|
||||
void cv::cuda::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
void cv::cuda::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
|
||||
|
||||
void cv::cuda::connectivityMask(const GpuMat&, GpuMat&, const cv::Scalar&, const cv::Scalar&, Stream&) { throw_no_cuda(); }
|
||||
void cv::cuda::labelComponents(const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
namespace cv { namespace cuda { namespace device
|
||||
{
|
||||
namespace ccl
|
||||
{
|
||||
void labelComponents(const PtrStepSzb& edges, PtrStepSzi comps, int flags, cudaStream_t stream);
|
||||
|
||||
template<typename T>
|
||||
void computeEdges(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
static float4 scalarToCudaType(const cv::Scalar& in)
|
||||
{
|
||||
return make_float4((float)in[0], (float)in[1], (float)in[2], (float)in[3]);
|
||||
}
|
||||
|
||||
void cv::cuda::connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& s)
|
||||
{
|
||||
CV_Assert(!image.empty());
|
||||
|
||||
int ch = image.channels();
|
||||
CV_Assert(ch <= 4);
|
||||
|
||||
int depth = image.depth();
|
||||
|
||||
typedef void (*func_t)(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream);
|
||||
|
||||
static const func_t suppotLookup[8][4] =
|
||||
{ // 1, 2, 3, 4
|
||||
{ device::ccl::computeEdges<uchar>, 0, device::ccl::computeEdges<uchar3>, device::ccl::computeEdges<uchar4> },// CV_8U
|
||||
{ 0, 0, 0, 0 },// CV_16U
|
||||
{ device::ccl::computeEdges<ushort>, 0, device::ccl::computeEdges<ushort3>, device::ccl::computeEdges<ushort4> },// CV_8S
|
||||
{ 0, 0, 0, 0 },// CV_16S
|
||||
{ device::ccl::computeEdges<int>, 0, 0, 0 },// CV_32S
|
||||
{ device::ccl::computeEdges<float>, 0, 0, 0 },// CV_32F
|
||||
{ 0, 0, 0, 0 },// CV_64F
|
||||
{ 0, 0, 0, 0 } // CV_USRTYPE1
|
||||
};
|
||||
|
||||
func_t f = suppotLookup[depth][ch - 1];
|
||||
CV_Assert(f);
|
||||
|
||||
if (image.size() != mask.size() || mask.type() != CV_8UC1)
|
||||
mask.create(image.size(), CV_8UC1);
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
float4 culo = scalarToCudaType(lo), cuhi = scalarToCudaType(hi);
|
||||
f(image, mask, culo, cuhi, stream);
|
||||
}
|
||||
|
||||
void cv::cuda::labelComponents(const GpuMat& mask, GpuMat& components, int flags, Stream& s)
|
||||
{
|
||||
CV_Assert(!mask.empty() && mask.type() == CV_8U);
|
||||
|
||||
if (!deviceSupports(SHARED_ATOMICS))
|
||||
CV_Error(cv::Error::StsNotImplemented, "The device doesn't support shared atomics and communicative synchronization!");
|
||||
|
||||
components.create(mask.size(), CV_32SC1);
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
device::ccl::labelComponents(mask, components, flags, stream);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
typedef NppStatus (*init_func_t)(NppiSize oSize, NppiGraphcutState** ppState, Npp8u* pDeviceMem);
|
||||
|
||||
class NppiGraphcutStateHandler
|
||||
{
|
||||
public:
|
||||
NppiGraphcutStateHandler(NppiSize sznpp, Npp8u* pDeviceMem, const init_func_t func)
|
||||
{
|
||||
nppSafeCall( func(sznpp, &pState, pDeviceMem) );
|
||||
}
|
||||
|
||||
~NppiGraphcutStateHandler()
|
||||
{
|
||||
nppSafeCall( nppiGraphcutFree(pState) );
|
||||
}
|
||||
|
||||
operator NppiGraphcutState*()
|
||||
{
|
||||
return pState;
|
||||
}
|
||||
|
||||
private:
|
||||
NppiGraphcutState* pState;
|
||||
};
|
||||
}
|
||||
|
||||
void cv::cuda::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf, Stream& s)
|
||||
{
|
||||
#if (CUDA_VERSION < 5000)
|
||||
CV_Assert(terminals.type() == CV_32S);
|
||||
#else
|
||||
CV_Assert(terminals.type() == CV_32S || terminals.type() == CV_32F);
|
||||
#endif
|
||||
|
||||
Size src_size = terminals.size();
|
||||
|
||||
CV_Assert(leftTransp.size() == Size(src_size.height, src_size.width));
|
||||
CV_Assert(leftTransp.type() == terminals.type());
|
||||
|
||||
CV_Assert(rightTransp.size() == Size(src_size.height, src_size.width));
|
||||
CV_Assert(rightTransp.type() == terminals.type());
|
||||
|
||||
CV_Assert(top.size() == src_size);
|
||||
CV_Assert(top.type() == terminals.type());
|
||||
|
||||
CV_Assert(bottom.size() == src_size);
|
||||
CV_Assert(bottom.type() == terminals.type());
|
||||
|
||||
labels.create(src_size, CV_8U);
|
||||
|
||||
NppiSize sznpp;
|
||||
sznpp.width = src_size.width;
|
||||
sznpp.height = src_size.height;
|
||||
|
||||
int bufsz;
|
||||
nppSafeCall( nppiGraphcutGetSize(sznpp, &bufsz) );
|
||||
|
||||
ensureSizeIsEnough(1, bufsz, CV_8U, buf);
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiGraphcutStateHandler state(sznpp, buf.ptr<Npp8u>(), nppiGraphcutInitAlloc);
|
||||
|
||||
#if (CUDA_VERSION < 5000)
|
||||
nppSafeCall( nppiGraphcut_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(), top.ptr<Npp32s>(), bottom.ptr<Npp32s>(),
|
||||
static_cast<int>(terminals.step), static_cast<int>(leftTransp.step), sznpp, labels.ptr<Npp8u>(), static_cast<int>(labels.step), state) );
|
||||
#else
|
||||
if (terminals.type() == CV_32S)
|
||||
{
|
||||
nppSafeCall( nppiGraphcut_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(), top.ptr<Npp32s>(), bottom.ptr<Npp32s>(),
|
||||
static_cast<int>(terminals.step), static_cast<int>(leftTransp.step), sznpp, labels.ptr<Npp8u>(), static_cast<int>(labels.step), state) );
|
||||
}
|
||||
else
|
||||
{
|
||||
nppSafeCall( nppiGraphcut_32f8u(terminals.ptr<Npp32f>(), leftTransp.ptr<Npp32f>(), rightTransp.ptr<Npp32f>(), top.ptr<Npp32f>(), bottom.ptr<Npp32f>(),
|
||||
static_cast<int>(terminals.step), static_cast<int>(leftTransp.step), sznpp, labels.ptr<Npp8u>(), static_cast<int>(labels.step), state) );
|
||||
}
|
||||
#endif
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
void cv::cuda::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
|
||||
GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, GpuMat& labels, GpuMat& buf, Stream& s)
|
||||
{
|
||||
#if (CUDA_VERSION < 5000)
|
||||
CV_Assert(terminals.type() == CV_32S);
|
||||
#else
|
||||
CV_Assert(terminals.type() == CV_32S || terminals.type() == CV_32F);
|
||||
#endif
|
||||
|
||||
Size src_size = terminals.size();
|
||||
|
||||
CV_Assert(leftTransp.size() == Size(src_size.height, src_size.width));
|
||||
CV_Assert(leftTransp.type() == terminals.type());
|
||||
|
||||
CV_Assert(rightTransp.size() == Size(src_size.height, src_size.width));
|
||||
CV_Assert(rightTransp.type() == terminals.type());
|
||||
|
||||
CV_Assert(top.size() == src_size);
|
||||
CV_Assert(top.type() == terminals.type());
|
||||
|
||||
CV_Assert(topLeft.size() == src_size);
|
||||
CV_Assert(topLeft.type() == terminals.type());
|
||||
|
||||
CV_Assert(topRight.size() == src_size);
|
||||
CV_Assert(topRight.type() == terminals.type());
|
||||
|
||||
CV_Assert(bottom.size() == src_size);
|
||||
CV_Assert(bottom.type() == terminals.type());
|
||||
|
||||
CV_Assert(bottomLeft.size() == src_size);
|
||||
CV_Assert(bottomLeft.type() == terminals.type());
|
||||
|
||||
CV_Assert(bottomRight.size() == src_size);
|
||||
CV_Assert(bottomRight.type() == terminals.type());
|
||||
|
||||
labels.create(src_size, CV_8U);
|
||||
|
||||
NppiSize sznpp;
|
||||
sznpp.width = src_size.width;
|
||||
sznpp.height = src_size.height;
|
||||
|
||||
int bufsz;
|
||||
nppSafeCall( nppiGraphcut8GetSize(sznpp, &bufsz) );
|
||||
|
||||
ensureSizeIsEnough(1, bufsz, CV_8U, buf);
|
||||
|
||||
cudaStream_t stream = StreamAccessor::getStream(s);
|
||||
|
||||
NppStreamHandler h(stream);
|
||||
|
||||
NppiGraphcutStateHandler state(sznpp, buf.ptr<Npp8u>(), nppiGraphcut8InitAlloc);
|
||||
|
||||
#if (CUDA_VERSION < 5000)
|
||||
nppSafeCall( nppiGraphcut8_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(),
|
||||
top.ptr<Npp32s>(), topLeft.ptr<Npp32s>(), topRight.ptr<Npp32s>(),
|
||||
bottom.ptr<Npp32s>(), bottomLeft.ptr<Npp32s>(), bottomRight.ptr<Npp32s>(),
|
||||
static_cast<int>(terminals.step), static_cast<int>(leftTransp.step), sznpp, labels.ptr<Npp8u>(), static_cast<int>(labels.step), state) );
|
||||
#else
|
||||
if (terminals.type() == CV_32S)
|
||||
{
|
||||
nppSafeCall( nppiGraphcut8_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(),
|
||||
top.ptr<Npp32s>(), topLeft.ptr<Npp32s>(), topRight.ptr<Npp32s>(),
|
||||
bottom.ptr<Npp32s>(), bottomLeft.ptr<Npp32s>(), bottomRight.ptr<Npp32s>(),
|
||||
static_cast<int>(terminals.step), static_cast<int>(leftTransp.step), sznpp, labels.ptr<Npp8u>(), static_cast<int>(labels.step), state) );
|
||||
}
|
||||
else
|
||||
{
|
||||
nppSafeCall( nppiGraphcut8_32f8u(terminals.ptr<Npp32f>(), leftTransp.ptr<Npp32f>(), rightTransp.ptr<Npp32f>(),
|
||||
top.ptr<Npp32f>(), topLeft.ptr<Npp32f>(), topRight.ptr<Npp32f>(),
|
||||
bottom.ptr<Npp32f>(), bottomLeft.ptr<Npp32f>(), bottomRight.ptr<Npp32f>(),
|
||||
static_cast<int>(terminals.step), static_cast<int>(leftTransp.step), sznpp, labels.ptr<Npp8u>(), static_cast<int>(labels.step), state) );
|
||||
}
|
||||
#endif
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
1619
modules/cuda/src/hog.cpp
Normal file
1619
modules/cuda/src/hog.cpp
Normal file
File diff suppressed because it is too large
Load Diff
43
modules/cuda/src/precomp.cpp
Normal file
43
modules/cuda/src/precomp.cpp
Normal file
@@ -0,0 +1,43 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
60
modules/cuda/src/precomp.hpp
Normal file
60
modules/cuda/src/precomp.hpp
Normal file
@@ -0,0 +1,60 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_PRECOMP_H__
|
||||
#define __OPENCV_PRECOMP_H__
|
||||
|
||||
#include "opencv2/cuda.hpp"
|
||||
#include "opencv2/cudaarithm.hpp"
|
||||
#include "opencv2/cudawarping.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
#include "opencv2/objdetect.hpp"
|
||||
|
||||
#include "opencv2/core/private.cuda.hpp"
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
|
||||
#ifdef HAVE_OPENCV_CUDALEGACY
|
||||
# include "opencv2/cudalegacy/private.hpp"
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCV_PRECOMP_H__ */
|
Reference in New Issue
Block a user