Merge pull request #656 from jet47:gpu-nonfree

This commit is contained in:
cuda-geek 2013-03-21 08:57:08 +04:00 committed by OpenCV Buildbot
commit ecdc7da6b1
80 changed files with 2266 additions and 2011 deletions

View File

@ -3,7 +3,7 @@ if(ANDROID OR IOS)
endif()
set(the_description "GPU-accelerated Computer Vision")
ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video opencv_nonfree opencv_photo opencv_legacy)
ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video opencv_photo opencv_legacy)
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/cuda")

View File

@ -5,109 +5,6 @@ Feature Detection and Description
gpu::SURF_GPU
-------------
.. ocv:class:: gpu::SURF_GPU
Class used for extracting Speeded Up Robust Features (SURF) from an image. ::
class SURF_GPU
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_GPU();
//! the full constructor taking all the necessary parameters
explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
void uploadKeypoints(const vector<KeyPoint>& keypoints,
GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU,
vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU,
vector<float>& descriptors);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = keypointsRatio * img.size().area()
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
The class ``SURF_GPU`` implements Speeded Up Robust Features descriptor. There is a fast multi-scale Hessian keypoint detector that can be used to find the keypoints (which is the default option). But the descriptors can also be computed for the user-specified keypoints. Only 8-bit grayscale images are supported.
The class ``SURF_GPU`` can store results in the GPU and CPU memory. It provides functions to convert results between CPU and GPU version ( ``uploadKeypoints``, ``downloadKeypoints``, ``downloadDescriptors`` ). The format of CPU results is the same as ``SURF`` results. GPU results are stored in ``GpuMat``. The ``keypoints`` matrix is :math:`\texttt{nFeatures} \times 7` matrix with the ``CV_32FC1`` type.
* ``keypoints.ptr<float>(X_ROW)[i]`` contains x coordinate of the i-th feature.
* ``keypoints.ptr<float>(Y_ROW)[i]`` contains y coordinate of the i-th feature.
* ``keypoints.ptr<float>(LAPLACIAN_ROW)[i]`` contains the laplacian sign of the i-th feature.
* ``keypoints.ptr<float>(OCTAVE_ROW)[i]`` contains the octave of the i-th feature.
* ``keypoints.ptr<float>(SIZE_ROW)[i]`` contains the size of the i-th feature.
* ``keypoints.ptr<float>(ANGLE_ROW)[i]`` contain orientation of the i-th feature.
* ``keypoints.ptr<float>(HESSIAN_ROW)[i]`` contains the response of the i-th feature.
The ``descriptors`` matrix is :math:`\texttt{nFeatures} \times \texttt{descriptorSize}` matrix with the ``CV_32FC1`` type.
The class ``SURF_GPU`` uses some buffers and provides access to it. All buffers can be safely released between function calls.
.. seealso:: :ocv:class:`SURF`
gpu::FAST_GPU
-------------
.. ocv:class:: gpu::FAST_GPU

View File

@ -579,76 +579,6 @@ Releases all inner buffer's memory.
gpu::VIBE_GPU
-------------
.. ocv:class:: gpu::VIBE_GPU
Class used for background/foreground segmentation. ::
class VIBE_GPU
{
public:
explicit VIBE_GPU(unsigned long rngSeed = 1234567);
void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null());
void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null());
void release();
...
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [VIBE2011]_.
gpu::VIBE_GPU::VIBE_GPU
-----------------------
The constructor.
.. ocv:function:: gpu::VIBE_GPU::VIBE_GPU(unsigned long rngSeed = 1234567)
:param rngSeed: Value used to initiate a random sequence.
Default constructor sets all parameters to default values.
gpu::VIBE_GPU::initialize
-------------------------
Initialize background model and allocates all inner buffers.
.. ocv:function:: void gpu::VIBE_GPU::initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null())
:param firstFrame: First frame from video sequence.
:param stream: Stream for the asynchronous version.
gpu::VIBE_GPU::operator()
-------------------------
Updates the background model and returns the foreground mask
.. ocv:function:: void gpu::VIBE_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null())
:param frame: Next video frame.
:param fgmask: The output foreground mask as an 8-bit binary image.
:param stream: Stream for the asynchronous version.
gpu::VIBE_GPU::release
----------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::VIBE_GPU::release()
gpu::GMG_GPU
------------
.. ocv:class:: gpu::GMG_GPU
@ -1209,5 +1139,4 @@ Parse next video frame. Implementation must call this method after new frame was
.. [MOG2001] P. KadewTraKuPong and R. Bowden. *An improved adaptive background mixture model for real-time tracking with shadow detection*. Proc. 2nd European Workshop on Advanced Video-Based Surveillance Systems, 2001
.. [MOG2004] Z. Zivkovic. *Improved adaptive Gausian mixture model for background subtraction*. International Conference Pattern Recognition, UK, August, 2004
.. [ShadowDetect2003] Prati, Mikic, Trivedi and Cucchiarra. *Detecting Moving Shadows...*. IEEE PAMI, 2003
.. [VIBE2011] O. Barnich and M. Van D Roogenbroeck. *ViBe: A universal background subtraction algorithm for video sequences*. IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011
.. [GMG2012] A. Godbehere, A. Matsukawa and K. Goldberg. *Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive Audio Art Installation*. American Control Conference, Montreal, June 2012

View File

@ -1557,82 +1557,6 @@ private:
friend class CascadeClassifier_GPU_LBP;
};
////////////////////////////////// SURF //////////////////////////////////////////
class CV_EXPORTS SURF_GPU
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_GPU();
//! the full constructor taking all the necessary parameters
explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
void uploadKeypoints(const vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU, vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU, vector<float>& descriptors);
//! finds the keypoints using fast hessian detector used in SURF
//! supports CV_8UC1 images
//! keypoints will have nFeature cols and 6 rows
//! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
//! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
//! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
//! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
//! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
//! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
//! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
//! finds the keypoints and computes their descriptors.
//! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
bool useProvidedKeypoints = false);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
////////////////////////////////// FAST //////////////////////////////////////////
class CV_EXPORTS FAST_GPU
@ -2307,41 +2231,6 @@ private:
GpuMat bgmodelUsedModes_; //keep track of number of modes per pixel
};
/*!
* The class implements the following algorithm:
* "ViBe: A universal background subtraction algorithm for video sequences"
* O. Barnich and M. Van D Roogenbroeck
* IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011
*/
class CV_EXPORTS VIBE_GPU
{
public:
//! the default constructor
explicit VIBE_GPU(unsigned long rngSeed = 1234567);
//! re-initiaization method
void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null());
//! the update operator
void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null());
//! releases all inner buffers
void release();
int nbSamples; // number of samples per pixel
int reqMatches; // #_min
int radius; // R
int subsamplingFactor; // amount of random subsampling
private:
Size frameSize_;
unsigned long rngSeed_;
GpuMat randStates_;
GpuMat samples_;
};
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.

View File

@ -2,6 +2,7 @@
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// StereoBM

View File

@ -2,6 +2,7 @@
using namespace std;
using namespace testing;
using namespace perf;
#define GPU_DENOISING_IMAGE_SIZES testing::Values(perf::szVGA, perf::sz720p)

View File

@ -2,105 +2,7 @@
using namespace std;
using namespace testing;
struct KeypointIdxCompare
{
std::vector<cv::KeyPoint>* keypoints;
explicit KeypointIdxCompare(std::vector<cv::KeyPoint>* _keypoints) : keypoints(_keypoints) {}
bool operator ()(size_t i1, size_t i2) const
{
cv::KeyPoint kp1 = (*keypoints)[i1];
cv::KeyPoint kp2 = (*keypoints)[i2];
if (kp1.pt.x != kp2.pt.x)
return kp1.pt.x < kp2.pt.x;
if (kp1.pt.y != kp2.pt.y)
return kp1.pt.y < kp2.pt.y;
if (kp1.response != kp2.response)
return kp1.response < kp2.response;
return kp1.octave < kp2.octave;
}
};
static void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors = cv::noArray())
{
std::vector<size_t> indexies(keypoints.size());
for (size_t i = 0; i < indexies.size(); ++i)
indexies[i] = i;
std::sort(indexies.begin(), indexies.end(), KeypointIdxCompare(&keypoints));
std::vector<cv::KeyPoint> new_keypoints;
cv::Mat new_descriptors;
new_keypoints.resize(keypoints.size());
cv::Mat descriptors;
if (_descriptors.needed())
{
descriptors = _descriptors.getMat();
new_descriptors.create(descriptors.size(), descriptors.type());
}
for (size_t i = 0; i < indexies.size(); ++i)
{
size_t new_idx = indexies[i];
new_keypoints[i] = keypoints[new_idx];
if (!new_descriptors.empty())
descriptors.row((int) new_idx).copyTo(new_descriptors.row((int) i));
}
keypoints.swap(new_keypoints);
if (_descriptors.needed())
new_descriptors.copyTo(_descriptors);
}
//////////////////////////////////////////////////////////////////////
// SURF
DEF_PARAM_TEST_1(Image, string);
PERF_TEST_P(Image, Features2D_SURF,
Values<string>("gpu/perf/aloe.png"))
{
declare.time(50.0);
const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
if (PERF_RUN_GPU())
{
cv::gpu::SURF_GPU d_surf;
const cv::gpu::GpuMat d_img(img);
cv::gpu::GpuMat d_keypoints, d_descriptors;
TEST_CYCLE() d_surf(d_img, cv::gpu::GpuMat(), d_keypoints, d_descriptors);
std::vector<cv::KeyPoint> gpu_keypoints;
d_surf.downloadKeypoints(d_keypoints, gpu_keypoints);
cv::Mat gpu_descriptors(d_descriptors);
sortKeyPoints(gpu_keypoints, gpu_descriptors);
SANITY_CHECK_KEYPOINTS(gpu_keypoints);
SANITY_CHECK(gpu_descriptors, 1e-3);
}
else
{
cv::SURF surf;
std::vector<cv::KeyPoint> cpu_keypoints;
cv::Mat cpu_descriptors;
TEST_CYCLE() surf(img, cv::noArray(), cpu_keypoints, cpu_descriptors);
SANITY_CHECK_KEYPOINTS(cpu_keypoints);
SANITY_CHECK(cpu_descriptors);
}
}
using namespace perf;
//////////////////////////////////////////////////////////////////////
// FAST

View File

@ -2,6 +2,7 @@
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// Blur

View File

@ -2,6 +2,7 @@
using namespace std;
using namespace testing;
using namespace perf;
DEF_PARAM_TEST_1(Image, string);

View File

@ -1,70 +1,5 @@
#include "perf_precomp.hpp"
static void printOsInfo()
{
#if defined _WIN32
# if defined _WIN64
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined linux
# if defined _LP64
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined __APPLE__
# if defined _LP64
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x32.\n[----------]\n"), fflush(stdout);
# endif
#endif
}
static void printCudaInfo()
{
printOsInfo();
#ifndef HAVE_CUDA
printf("[----------]\n[ GPU INFO ] \tOpenCV was built without CUDA support.\n[----------]\n"), fflush(stdout);
#else
int driver;
cudaDriverGetVersion(&driver);
printf("[----------]\n"), fflush(stdout);
printf("[ GPU INFO ] \tCUDA Driver version: %d.\n", driver), fflush(stdout);
printf("[ GPU INFO ] \tCUDA Runtime version: %d.\n", CUDART_VERSION), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[ GPU INFO ] \tGPU module was compiled for the following GPU archs.\n"), fflush(stdout);
printf("[ BIN ] \t%s.\n", CUDA_ARCH_BIN), fflush(stdout);
printf("[ PTX ] \t%s.\n", CUDA_ARCH_PTX), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
int deviceCount = cv::gpu::getCudaEnabledDeviceCount();
printf("[ GPU INFO ] \tCUDA device count:: %d.\n", deviceCount), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
for (int i = 0; i < deviceCount; ++i)
{
cv::gpu::DeviceInfo info(i);
printf("[----------]\n"), fflush(stdout);
printf("[ DEVICE ] \t# %d %s.\n", i, info.name().c_str()), fflush(stdout);
printf("[ ] \tCompute capability: %d.%d\n", (int)info.majorVersion(), (int)info.minorVersion()), fflush(stdout);
printf("[ ] \tMulti Processor Count: %d\n", info.multiProcessorCount()), fflush(stdout);
printf("[ ] \tTotal memory: %d Mb\n", static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0)), fflush(stdout);
printf("[ ] \tFree memory: %d Mb\n", static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0)), fflush(stdout);
if (!info.isCompatible())
printf("[ GPU INFO ] \tThis device is NOT compatible with current GPU module build\n");
printf("[----------]\n"), fflush(stdout);
}
#endif
}
using namespace perf;
CV_PERF_TEST_MAIN(gpu, printCudaInfo())

View File

@ -2,6 +2,7 @@
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// SetTo

View File

@ -2,6 +2,7 @@
using namespace std;
using namespace testing;
using namespace perf;
///////////////////////////////////////////////////////////////
// HOG

View File

@ -20,6 +20,7 @@
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/ts/gpu_perf.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
@ -27,12 +28,9 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/photo/photo.hpp"
#include "utility.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -818,71 +818,6 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
}
}
//////////////////////////////////////////////////////
// VIBE
PERF_TEST_P(Video_Cn, Video_VIBE,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat foreground;
vibe(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
vibe(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////
// GMG

View File

@ -1,184 +0,0 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace cv;
Mat readImage(const string& fileName, int flags)
{
return imread(perf::TestBase::getDataPath(fileName), flags);
}
void PrintTo(const CvtColorInfo& info, ostream* os)
{
static const char* str[] =
{
"BGR2BGRA",
"BGRA2BGR",
"BGR2RGBA",
"RGBA2BGR",
"BGR2RGB",
"BGRA2RGBA",
"BGR2GRAY",
"RGB2GRAY",
"GRAY2BGR",
"GRAY2BGRA",
"BGRA2GRAY",
"RGBA2GRAY",
"BGR2BGR565",
"RGB2BGR565",
"BGR5652BGR",
"BGR5652RGB",
"BGRA2BGR565",
"RGBA2BGR565",
"BGR5652BGRA",
"BGR5652RGBA",
"GRAY2BGR565",
"BGR5652GRAY",
"BGR2BGR555",
"RGB2BGR555",
"BGR5552BGR",
"BGR5552RGB",
"BGRA2BGR555",
"RGBA2BGR555",
"BGR5552BGRA",
"BGR5552RGBA",
"GRAY2BGR555",
"BGR5552GRAY",
"BGR2XYZ",
"RGB2XYZ",
"XYZ2BGR",
"XYZ2RGB",
"BGR2YCrCb",
"RGB2YCrCb",
"YCrCb2BGR",
"YCrCb2RGB",
"BGR2HSV",
"RGB2HSV",
"",
"",
"BGR2Lab",
"RGB2Lab",
"BayerBG2BGR",
"BayerGB2BGR",
"BayerRG2BGR",
"BayerGR2BGR",
"BGR2Luv",
"RGB2Luv",
"BGR2HLS",
"RGB2HLS",
"HSV2BGR",
"HSV2RGB",
"Lab2BGR",
"Lab2RGB",
"Luv2BGR",
"Luv2RGB",
"HLS2BGR",
"HLS2RGB",
"BayerBG2BGR_VNG",
"BayerGB2BGR_VNG",
"BayerRG2BGR_VNG",
"BayerGR2BGR_VNG",
"BGR2HSV_FULL",
"RGB2HSV_FULL",
"BGR2HLS_FULL",
"RGB2HLS_FULL",
"HSV2BGR_FULL",
"HSV2RGB_FULL",
"HLS2BGR_FULL",
"HLS2RGB_FULL",
"LBGR2Lab",
"LRGB2Lab",
"LBGR2Luv",
"LRGB2Luv",
"Lab2LBGR",
"Lab2LRGB",
"Luv2LBGR",
"Luv2LRGB",
"BGR2YUV",
"RGB2YUV",
"YUV2BGR",
"YUV2RGB",
"BayerBG2GRAY",
"BayerGB2GRAY",
"BayerRG2GRAY",
"BayerGR2GRAY",
//YUV 4:2:0 formats family
"YUV2RGB_NV12",
"YUV2BGR_NV12",
"YUV2RGB_NV21",
"YUV2BGR_NV21",
"YUV2RGBA_NV12",
"YUV2BGRA_NV12",
"YUV2RGBA_NV21",
"YUV2BGRA_NV21",
"YUV2RGB_YV12",
"YUV2BGR_YV12",
"YUV2RGB_IYUV",
"YUV2BGR_IYUV",
"YUV2RGBA_YV12",
"YUV2BGRA_YV12",
"YUV2RGBA_IYUV",
"YUV2BGRA_IYUV",
"YUV2GRAY_420",
//YUV 4:2:2 formats family
"YUV2RGB_UYVY",
"YUV2BGR_UYVY",
"YUV2RGB_VYUY",
"YUV2BGR_VYUY",
"YUV2RGBA_UYVY",
"YUV2BGRA_UYVY",
"YUV2RGBA_VYUY",
"YUV2BGRA_VYUY",
"YUV2RGB_YUY2",
"YUV2BGR_YUY2",
"YUV2RGB_YVYU",
"YUV2BGR_YVYU",
"YUV2RGBA_YUY2",
"YUV2BGRA_YUY2",
"YUV2RGBA_YVYU",
"YUV2BGRA_YVYU",
"YUV2GRAY_UYVY",
"YUV2GRAY_YUY2",
// alpha premultiplication
"RGBA2mRGBA",
"mRGBA2RGBA",
"COLORCVT_MAX"
};
*os << str[info.code];
}

View File

@ -1,63 +0,0 @@
#ifndef __OPENCV_PERF_GPU_UTILITY_HPP__
#define __OPENCV_PERF_GPU_UTILITY_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/ts/ts_perf.hpp"
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
using perf::MatType;
using perf::MatDepth;
CV_ENUM(BorderMode, cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONSTANT, cv::BORDER_REFLECT, cv::BORDER_WRAP)
#define ALL_BORDER_MODES testing::ValuesIn(BorderMode::all())
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA)
#define ALL_INTERPOLATIONS testing::ValuesIn(Interpolation::all())
CV_ENUM(NormType, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_HAMMING, cv::NORM_MINMAX)
enum { Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4 };
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
#define GPU_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
#define GPU_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
struct CvtColorInfo
{
int scn;
int dcn;
int code;
CvtColorInfo() {}
explicit CvtColorInfo(int scn_, int dcn_, int code_) : scn(scn_), dcn(dcn_), code(code_) {}
};
void PrintTo(const CvtColorInfo& info, std::ostream* os);
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
#define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< std::tr1::tuple< __VA_ARGS__ > > name
#define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
DEF_PARAM_TEST_1(Sz, cv::Size);
typedef perf::Size_MatType Sz_Type;
DEF_PARAM_TEST(Sz_Depth, cv::Size, MatDepth);
DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, MatDepth, MatCn);
#define GPU_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
#define FAIL_NO_CPU() FAIL() << "No such CPU implementation analogy"
#define GPU_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat gpu_##mat(mat); \
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
} while(0)
#define CPU_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat cpu_##mat(mat); \
SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
} while(0)
#endif // __OPENCV_PERF_GPU_UTILITY_HPP__

View File

@ -9,69 +9,19 @@
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
static void printOsInfo()
{
#if defined _WIN32
# if defined _WIN64
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x64.\n[----------]\n"); fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x32.\n[----------]\n"); fflush(stdout);
# endif
#elif defined linux
# if defined _LP64
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x64.\n[----------]\n"); fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x32.\n[----------]\n"); fflush(stdout);
# endif
#elif defined __APPLE__
# if defined _LP64
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x64.\n[----------]\n"); fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x32.\n[----------]\n"); fflush(stdout);
# endif
#endif
}
static void printCudaInfo()
{
const int deviceCount = cv::gpu::getCudaEnabledDeviceCount();
printf("[----------]\n"); fflush(stdout);
printf("[ GPU INFO ] \tCUDA device count:: %d.\n", deviceCount); fflush(stdout);
printf("[----------]\n"); fflush(stdout);
for (int i = 0; i < deviceCount; ++i)
{
cv::gpu::DeviceInfo info(i);
printf("[----------]\n"); fflush(stdout);
printf("[ DEVICE ] \t# %d %s.\n", i, info.name().c_str()); fflush(stdout);
printf("[ ] \tCompute capability: %d.%d\n", info.majorVersion(), info.minorVersion()); fflush(stdout);
printf("[ ] \tMulti Processor Count: %d\n", info.multiProcessorCount()); fflush(stdout);
printf("[ ] \tTotal memory: %d Mb\n", static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0)); fflush(stdout);
printf("[ ] \tFree memory: %d Mb\n", static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0)); fflush(stdout);
if (!info.isCompatible())
printf("[ GPU INFO ] \tThis device is NOT compatible with current GPU module build\n");
printf("[----------]\n"); fflush(stdout);
}
}
#include "opencv2/ts/gpu_perf.hpp"
int main(int argc, char* argv[])
{
printOsInfo();
printCudaInfo();
perf::printCudaInfo();
perf::Regression::Init("nv_perf_test");
perf::Regression::Init("gpu_perf4au");
perf::TestBase::Init(argc, argv);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< std::tr1::tuple< __VA_ARGS__ > > name
#define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
//////////////////////////////////////////////////////////
// HoughLinesP

View File

@ -49,71 +49,6 @@ using namespace cv::gpu;
using namespace cvtest;
using namespace testing;
void printOsInfo()
{
#if defined _WIN32
# if defined _WIN64
cout << "OS: Windows x64 \n" << endl;
# else
cout << "OS: Windows x32 \n" << endl;
# endif
#elif defined linux
# if defined _LP64
cout << "OS: Linux x64 \n" << endl;
# else
cout << "OS: Linux x32 \n" << endl;
# endif
#elif defined __APPLE__
# if defined _LP64
cout << "OS: Apple x64 \n" << endl;
# else
cout << "OS: Apple x32 \n" << endl;
# endif
#endif
}
void printCudaInfo()
{
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
cout << "OpenCV was built without CUDA support \n" << endl;
#else
int driver;
cudaDriverGetVersion(&driver);
cout << "CUDA Driver version: " << driver << '\n';
cout << "CUDA Runtime version: " << CUDART_VERSION << '\n';
cout << endl;
cout << "GPU module was compiled for the following GPU archs:" << endl;
cout << " BIN: " << CUDA_ARCH_BIN << '\n';
cout << " PTX: " << CUDA_ARCH_PTX << '\n';
cout << endl;
int deviceCount = getCudaEnabledDeviceCount();
cout << "CUDA device count: " << deviceCount << '\n';
cout << endl;
for (int i = 0; i < deviceCount; ++i)
{
DeviceInfo info(i);
cout << "Device [" << i << "] \n";
cout << "\t Name: " << info.name() << '\n';
cout << "\t Compute capability: " << info.majorVersion() << '.' << info.minorVersion()<< '\n';
cout << "\t Multi Processor Count: " << info.multiProcessorCount() << '\n';
cout << "\t Total memory: " << static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0) << " Mb \n";
cout << "\t Free memory: " << static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0) << " Mb \n";
if (!info.isCompatible())
cout << "\t !!! This device is NOT compatible with current GPU module build \n";
cout << endl;
}
#endif
}
int main(int argc, char** argv)
{
try
@ -133,7 +68,6 @@ int main(int argc, char** argv)
return 0;
}
printOsInfo();
printCudaInfo();
if (cmd.get<bool>("info"))

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
//////////////////////////////////////////////////////
// FGDStatModel
@ -320,47 +322,6 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, MOG2, testing::Combine(
testing::Values(DetectShadow(true), DetectShadow(false)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// VIBE
PARAM_TEST_CASE(VIBE, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
};
GPU_TEST_P(VIBE, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int type = GET_PARAM(2);
const bool useRoi = GET_PARAM(3);
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0.0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
vibe.initialize(d_frame);
for (int i = 0; i < 20; ++i)
vibe(d_frame, d_fgmask);
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
vibe(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, VIBE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// GMG

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
//////////////////////////////////////////////////////////////////////////
// StereoBM

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cvtColor

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
namespace
{
IMPLEMENT_PARAM_CLASS(Border, int)

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////////////////////////////
// Merge

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////
// BilateralFilter

View File

@ -43,306 +43,7 @@
#ifdef HAVE_CUDA
namespace
{
bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
{
const double maxPtDif = 1.0;
const double maxSizeDif = 1.0;
const double maxAngleDif = 2.0;
const double maxResponseDif = 0.1;
double dist = cv::norm(p1.pt - p2.pt);
if (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
p1.octave == p2.octave &&
p1.class_id == p2.class_id)
{
return true;
}
return false;
}
struct KeyPointLess : std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
return kp1.pt.y < kp2.pt.y || (kp1.pt.y == kp2.pt.y && kp1.pt.x < kp2.pt.x);
}
};
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
if (gold.size() != actual.size())
{
return testing::AssertionFailure() << "KeyPoints size mistmach\n"
<< "\"" << gold_expr << "\" : " << gold.size() << "\n"
<< "\"" << actual_expr << "\" : " << actual.size();
}
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (!keyPointsEquals(p1, p2))
{
return testing::AssertionFailure() << "KeyPoints differ at " << i << "\n"
<< "\"" << gold_expr << "\" vs \"" << actual_expr << "\" : \n"
<< "pt : " << testing::PrintToString(p1.pt) << " vs " << testing::PrintToString(p2.pt) << "\n"
<< "size : " << p1.size << " vs " << p2.size << "\n"
<< "angle : " << p1.angle << " vs " << p2.angle << "\n"
<< "response : " << p1.response << " vs " << p2.response << "\n"
<< "octave : " << p1.octave << " vs " << p2.octave << "\n"
<< "class_id : " << p1.class_id << " vs " << p2.class_id;
}
}
return ::testing::AssertionSuccess();
}
#define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual);
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
int validCount = 0;
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
{
int validCount = 0;
for (size_t i = 0; i < matches.size(); ++i)
{
const cv::DMatch& m = matches[i];
const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// SURF
namespace
{
IMPLEMENT_PARAM_CLASS(SURF_HessianThreshold, double)
IMPLEMENT_PARAM_CLASS(SURF_Octaves, int)
IMPLEMENT_PARAM_CLASS(SURF_OctaveLayers, int)
IMPLEMENT_PARAM_CLASS(SURF_Extended, bool)
IMPLEMENT_PARAM_CLASS(SURF_Upright, bool)
}
PARAM_TEST_CASE(SURF, cv::gpu::DeviceInfo, SURF_HessianThreshold, SURF_Octaves, SURF_OctaveLayers, SURF_Extended, SURF_Upright)
{
cv::gpu::DeviceInfo devInfo;
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
hessianThreshold = GET_PARAM(1);
nOctaves = GET_PARAM(2);
nOctaveLayers = GET_PARAM(3);
extended = GET_PARAM(4);
upright = GET_PARAM(5);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(SURF, Detector)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::gpu::SURF_GPU surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, cv::noArray(), keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.95);
}
}
GPU_TEST_P(SURF, Detector_Masked)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
cv::gpu::SURF_GPU surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), loadMat(mask), keypoints);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), loadMat(mask), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, mask, keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.95);
}
}
GPU_TEST_P(SURF, Descriptor)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::gpu::SURF_GPU surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
std::vector<cv::KeyPoint> keypoints;
cv::gpu::GpuMat descriptors;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints, descriptors);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
std::vector<cv::KeyPoint> keypoints;
surf_gold(image, cv::noArray(), keypoints);
cv::gpu::GpuMat descriptors;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints, descriptors, true);
cv::Mat descriptors_gold;
surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
cv::BFMatcher matcher(cv::NORM_L2);
std::vector<cv::DMatch> matches;
matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
EXPECT_GT(matchedRatio, 0.6);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Features2D, SURF, testing::Combine(
ALL_DEVICES,
testing::Values(SURF_HessianThreshold(100.0), SURF_HessianThreshold(500.0), SURF_HessianThreshold(1000.0)),
testing::Values(SURF_Octaves(3), SURF_Octaves(4)),
testing::Values(SURF_OctaveLayers(2), SURF_OctaveLayers(3)),
testing::Values(SURF_Extended(false), SURF_Extended(true)),
testing::Values(SURF_Upright(false), SURF_Upright(true))));
using namespace cvtest;
/////////////////////////////////////////////////////////////////////////////////////////////////
// FAST

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
namespace
{
IMPLEMENT_PARAM_CLASS(KSize, cv::Size)

View File

@ -44,6 +44,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////////////////////////////
// SetTo

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// HoughLines

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Integral

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
//#define DUMP
struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor

View File

@ -43,6 +43,8 @@
#if defined(HAVE_CUDA) && defined(HAVE_OPENGL)
using namespace cvtest;
/////////////////////////////////////////////
// Buffer

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
//////////////////////////////////////////////////////
// BroxOpticalFlow

View File

@ -69,19 +69,19 @@
#include <cuda.h>
#include <cuda_runtime.h>
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/ts/gpu_test.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/core/opengl_interop.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "utility.hpp"
#include "interpolation.hpp"
#include "main_test_nvidia.h"
#endif

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////
// pyrDown

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////
// Gold implementation

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////
// Gold implementation

View File

@ -44,6 +44,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
#if CUDA_VERSION >= 5000
struct Async : testing::TestWithParam<cv::gpu::DeviceInfo>

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
CV_ENUM(ThreshOp, cv::THRESH_BINARY, cv::THRESH_BINARY_INV, cv::THRESH_TRUNC, cv::THRESH_TOZERO, cv::THRESH_TOZERO_INV)
#define ALL_THRESH_OPS testing::Values(ThreshOp(cv::THRESH_BINARY), ThreshOp(cv::THRESH_BINARY_INV), ThreshOp(cv::THRESH_TRUNC), ThreshOp(cv::THRESH_TOZERO), ThreshOp(cv::THRESH_TOZERO_INV))

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
namespace
{
cv::Mat createTransfomMatrix(cv::Size srcSize, double angle)

View File

@ -43,6 +43,8 @@
#ifdef HAVE_CUDA
using namespace cvtest;
namespace
{
cv::Mat createTransfomMatrix(cv::Size srcSize, double angle)

View File

@ -1,407 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace std;
using namespace cv;
using namespace cv::gpu;
using namespace cvtest;
using namespace testing;
using namespace testing::internal;
//////////////////////////////////////////////////////////////////////
// random generators
int randomInt(int minVal, int maxVal)
{
RNG& rng = TS::ptr()->get_rng();
return rng.uniform(minVal, maxVal);
}
double randomDouble(double minVal, double maxVal)
{
RNG& rng = TS::ptr()->get_rng();
return rng.uniform(minVal, maxVal);
}
Size randomSize(int minVal, int maxVal)
{
return Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
}
Scalar randomScalar(double minVal, double maxVal)
{
return Scalar(randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal));
}
Mat randomMat(Size size, int type, double minVal, double maxVal)
{
return randomMat(TS::ptr()->get_rng(), size, type, minVal, maxVal, false);
}
//////////////////////////////////////////////////////////////////////
// GpuMat create
GpuMat createMat(Size size, int type, bool useRoi)
{
Size size0 = size;
if (useRoi)
{
size0.width += randomInt(5, 15);
size0.height += randomInt(5, 15);
}
GpuMat d_m(size0, type);
if (size0 != size)
d_m = d_m(Rect((size0.width - size.width) / 2, (size0.height - size.height) / 2, size.width, size.height));
return d_m;
}
GpuMat loadMat(const Mat& m, bool useRoi)
{
GpuMat d_m = createMat(m.size(), m.type(), useRoi);
d_m.upload(m);
return d_m;
}
//////////////////////////////////////////////////////////////////////
// Image load
Mat readImage(const std::string& fileName, int flags)
{
return imread(TS::ptr()->get_data_path() + fileName, flags);
}
Mat readImageType(const std::string& fname, int type)
{
Mat src = readImage(fname, CV_MAT_CN(type) == 1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);
if (CV_MAT_CN(type) == 4)
{
Mat temp;
cvtColor(src, temp, COLOR_BGR2BGRA);
swap(src, temp);
}
src.convertTo(src, CV_MAT_DEPTH(type), CV_MAT_DEPTH(type) == CV_32F ? 1.0 / 255.0 : 1.0);
return src;
}
//////////////////////////////////////////////////////////////////////
// Gpu devices
bool supportFeature(const DeviceInfo& info, FeatureSet feature)
{
return TargetArchs::builtWith(feature) && info.supports(feature);
}
DeviceManager& DeviceManager::instance()
{
static DeviceManager obj;
return obj;
}
void DeviceManager::load(int i)
{
devices_.clear();
devices_.reserve(1);
std::ostringstream msg;
if (i < 0 || i >= getCudaEnabledDeviceCount())
{
msg << "Incorrect device number - " << i;
throw runtime_error(msg.str());
}
DeviceInfo info(i);
if (!info.isCompatible())
{
msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current GPU module build";
throw runtime_error(msg.str());
}
devices_.push_back(info);
}
void DeviceManager::loadAll()
{
int deviceCount = getCudaEnabledDeviceCount();
devices_.clear();
devices_.reserve(deviceCount);
for (int i = 0; i < deviceCount; ++i)
{
DeviceInfo info(i);
if (info.isCompatible())
{
devices_.push_back(info);
}
}
}
//////////////////////////////////////////////////////////////////////
// Additional assertion
namespace
{
template <typename T, typename OutT> std::string printMatValImpl(const Mat& m, Point p)
{
const int cn = m.channels();
std::ostringstream ostr;
ostr << "(";
p.x /= cn;
ostr << static_cast<OutT>(m.at<T>(p.y, p.x * cn));
for (int c = 1; c < m.channels(); ++c)
{
ostr << ", " << static_cast<OutT>(m.at<T>(p.y, p.x * cn + c));
}
ostr << ")";
return ostr.str();
}
std::string printMatVal(const Mat& m, Point p)
{
typedef std::string (*func_t)(const Mat& m, Point p);
static const func_t funcs[] =
{
printMatValImpl<uchar, int>, printMatValImpl<schar, int>, printMatValImpl<ushort, int>, printMatValImpl<short, int>,
printMatValImpl<int, int>, printMatValImpl<float, float>, printMatValImpl<double, double>
};
return funcs[m.depth()](m, p);
}
}
void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minLoc_, Point* maxLoc_, const Mat& mask)
{
if (src.depth() != CV_8S)
{
minMaxLoc(src, minVal_, maxVal_, minLoc_, maxLoc_, mask);
return;
}
// OpenCV's minMaxLoc doesn't support CV_8S type
double minVal = numeric_limits<double>::max();
Point minLoc(-1, -1);
double maxVal = -numeric_limits<double>::max();
Point maxLoc(-1, -1);
for (int y = 0; y < src.rows; ++y)
{
const schar* src_row = src.ptr<schar>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<uchar>(y);
for (int x = 0; x < src.cols; ++x)
{
if (!mask_row || mask_row[x])
{
schar val = src_row[x];
if (val < minVal)
{
minVal = val;
minLoc = cv::Point(x, y);
}
if (val > maxVal)
{
maxVal = val;
maxLoc = cv::Point(x, y);
}
}
}
}
if (minVal_) *minVal_ = minVal;
if (maxVal_) *maxVal_ = maxVal;
if (minLoc_) *minLoc_ = minLoc;
if (maxLoc_) *maxLoc_ = maxLoc;
}
Mat getMat(InputArray arr)
{
if (arr.kind() == _InputArray::GPU_MAT)
{
Mat m;
arr.getGpuMat().download(m);
return m;
}
return arr.getMat();
}
AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, InputArray m1_, InputArray m2_, double eps)
{
Mat m1 = getMat(m1_);
Mat m2 = getMat(m2_);
if (m1.size() != m2.size())
{
return AssertionFailure() << "Matrices \"" << expr1 << "\" and \"" << expr2 << "\" have different sizes : \""
<< expr1 << "\" [" << PrintToString(m1.size()) << "] vs \""
<< expr2 << "\" [" << PrintToString(m2.size()) << "]";
}
if (m1.type() != m2.type())
{
return AssertionFailure() << "Matrices \"" << expr1 << "\" and \"" << expr2 << "\" have different types : \""
<< expr1 << "\" [" << PrintToString(MatType(m1.type())) << "] vs \""
<< expr2 << "\" [" << PrintToString(MatType(m2.type())) << "]";
}
Mat diff;
absdiff(m1.reshape(1), m2.reshape(1), diff);
double maxVal = 0.0;
Point maxLoc;
minMaxLocGold(diff, 0, &maxVal, 0, &maxLoc);
if (maxVal > eps)
{
return AssertionFailure() << "The max difference between matrices \"" << expr1 << "\" and \"" << expr2
<< "\" is " << maxVal << " at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ")"
<< ", which exceeds \"" << eps_expr << "\", where \""
<< expr1 << "\" at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ") evaluates to " << printMatVal(m1, maxLoc) << ", \""
<< expr2 << "\" at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ") evaluates to " << printMatVal(m2, maxLoc) << ", \""
<< eps_expr << "\" evaluates to " << eps;
}
return AssertionSuccess();
}
double checkSimilarity(InputArray m1, InputArray m2)
{
Mat diff;
matchTemplate(getMat(m1), getMat(m2), diff, CV_TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f);
}
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end)
{
vector<MatType> v;
v.reserve((depth_end - depth_start + 1) * (cn_end - cn_start + 1));
for (int depth = depth_start; depth <= depth_end; ++depth)
{
for (int cn = cn_start; cn <= cn_end; ++cn)
{
v.push_back(MatType(CV_MAKE_TYPE(depth, cn)));
}
}
return v;
}
const vector<MatType>& all_types()
{
static vector<MatType> v = types(CV_8U, CV_64F, 1, 4);
return v;
}
void cv::gpu::PrintTo(const DeviceInfo& info, ostream* os)
{
(*os) << info.name();
}
void PrintTo(const UseRoi& useRoi, std::ostream* os)
{
if (useRoi)
(*os) << "sub matrix";
else
(*os) << "whole matrix";
}
void PrintTo(const Inverse& inverse, std::ostream* os)
{
if (inverse)
(*os) << "inverse";
else
(*os) << "direct";
}
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const Mat& image)
{
imwrite(TS::ptr()->get_data_path() + fileName, image);
}
void showDiff(InputArray gold_, InputArray actual_, double eps)
{
Mat gold = getMat(gold_);
Mat actual = getMat(actual_);
Mat diff;
absdiff(gold, actual, diff);
threshold(diff, diff, eps, 255.0, cv::THRESH_BINARY);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
waitKey();
}
#endif // HAVE_CUDA

View File

@ -1,332 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPU_TEST_UTILITY_HPP__
#define __OPENCV_GPU_TEST_UTILITY_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
//////////////////////////////////////////////////////////////////////
// random generators
int randomInt(int minVal, int maxVal);
double randomDouble(double minVal, double maxVal);
cv::Size randomSize(int minVal, int maxVal);
cv::Scalar randomScalar(double minVal, double maxVal);
cv::Mat randomMat(cv::Size size, int type, double minVal = 0.0, double maxVal = 255.0);
//////////////////////////////////////////////////////////////////////
// GpuMat create
cv::gpu::GpuMat createMat(cv::Size size, int type, bool useRoi = false);
cv::gpu::GpuMat loadMat(const cv::Mat& m, bool useRoi = false);
//////////////////////////////////////////////////////////////////////
// Image load
//! read image from testdata folder
cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
//! read image from testdata folder and convert it to specified type
cv::Mat readImageType(const std::string& fname, int type);
//////////////////////////////////////////////////////////////////////
// Gpu devices
//! return true if device supports specified feature and gpu module was built with support the feature.
bool supportFeature(const cv::gpu::DeviceInfo& info, cv::gpu::FeatureSet feature);
class DeviceManager
{
public:
static DeviceManager& instance();
void load(int i);
void loadAll();
const std::vector<cv::gpu::DeviceInfo>& values() const { return devices_; }
private:
std::vector<cv::gpu::DeviceInfo> devices_;
};
#define ALL_DEVICES testing::ValuesIn(DeviceManager::instance().values())
//////////////////////////////////////////////////////////////////////
// Additional assertion
void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat());
cv::Mat getMat(cv::InputArray arr);
testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1, cv::InputArray m2, double eps);
#define EXPECT_MAT_NEAR(m1, m2, eps) EXPECT_PRED_FORMAT3(assertMatNear, m1, m2, eps)
#define ASSERT_MAT_NEAR(m1, m2, eps) ASSERT_PRED_FORMAT3(assertMatNear, m1, m2, eps)
#define EXPECT_SCALAR_NEAR(s1, s2, eps) \
{ \
EXPECT_NEAR(s1[0], s2[0], eps); \
EXPECT_NEAR(s1[1], s2[1], eps); \
EXPECT_NEAR(s1[2], s2[2], eps); \
EXPECT_NEAR(s1[3], s2[3], eps); \
}
#define ASSERT_SCALAR_NEAR(s1, s2, eps) \
{ \
ASSERT_NEAR(s1[0], s2[0], eps); \
ASSERT_NEAR(s1[1], s2[1], eps); \
ASSERT_NEAR(s1[2], s2[2], eps); \
ASSERT_NEAR(s1[3], s2[3], eps); \
}
#define EXPECT_POINT2_NEAR(p1, p2, eps) \
{ \
EXPECT_NEAR(p1.x, p2.x, eps); \
EXPECT_NEAR(p1.y, p2.y, eps); \
}
#define ASSERT_POINT2_NEAR(p1, p2, eps) \
{ \
ASSERT_NEAR(p1.x, p2.x, eps); \
ASSERT_NEAR(p1.y, p2.y, eps); \
}
#define EXPECT_POINT3_NEAR(p1, p2, eps) \
{ \
EXPECT_NEAR(p1.x, p2.x, eps); \
EXPECT_NEAR(p1.y, p2.y, eps); \
EXPECT_NEAR(p1.z, p2.z, eps); \
}
#define ASSERT_POINT3_NEAR(p1, p2, eps) \
{ \
ASSERT_NEAR(p1.x, p2.x, eps); \
ASSERT_NEAR(p1.y, p2.y, eps); \
ASSERT_NEAR(p1.z, p2.z, eps); \
}
double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(checkSimilarity(mat1, mat2), eps); \
}
#define ASSERT_MAT_SIMILAR(mat1, mat2, eps) \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
ASSERT_LE(checkSimilarity(mat1, mat2), eps); \
}
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
#define GPU_TEST_P(test_case_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public test_case_name { \
public: \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
virtual void TestBody(); \
private: \
void UnsafeTestBody(); \
static int AddToRegistry() { \
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
GetTestCasePatternHolder<test_case_name>(\
#test_case_name, __FILE__, __LINE__)->AddTestPattern(\
#test_case_name, \
#test_name, \
new ::testing::internal::TestMetaFactory< \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_case_name, \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() \
{ \
try \
{ \
UnsafeTestBody(); \
} \
catch (...) \
{ \
cv::gpu::resetDevice(); \
throw; \
} \
} \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::UnsafeTestBody()
#define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > >
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
namespace cv { namespace gpu
{
void PrintTo(const DeviceInfo& info, std::ostream* os);
}}
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
// Depth
using perf::MatDepth;
#define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
#define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16S), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32F), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
// Type
using perf::MatType;
//! return vector with types from specified range.
std::vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end);
//! return vector with all types (depth: CV_8U-CV_64F, channels: 1-4).
const std::vector<MatType>& all_types();
#define ALL_TYPES testing::ValuesIn(all_types())
#define TYPES(depth_start, depth_end, cn_start, cn_end) testing::ValuesIn(types(depth_start, depth_end, cn_start, cn_end))
// ROI
class UseRoi
{
public:
inline UseRoi(bool val = false) : val_(val) {}
inline operator bool() const { return val_; }
private:
bool val_;
};
void PrintTo(const UseRoi& useRoi, std::ostream* os);
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
// Direct/Inverse
class Inverse
{
public:
inline Inverse(bool val = false) : val_(val) {}
inline operator bool() const { return val_; }
private:
bool val_;
};
void PrintTo(const Inverse& useRoi, std::ostream* os);
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
// Param class
#define IMPLEMENT_PARAM_CLASS(name, type) \
class name \
{ \
public: \
name ( type arg = type ()) : val_(arg) {} \
operator type () const {return val_;} \
private: \
type val_; \
}; \
inline void PrintTo( name param, std::ostream* os) \
{ \
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
}
IMPLEMENT_PARAM_CLASS(Channels, int)
#define ALL_CHANNELS testing::Values(Channels(1), Channels(2), Channels(3), Channels(4))
#define IMAGE_CHANNELS testing::Values(Channels(1), Channels(3), Channels(4))
// Flags and enums
CV_ENUM(NormCode, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_TYPE_MASK, cv::NORM_RELATIVE, cv::NORM_MINMAX)
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA)
CV_ENUM(BorderType, cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONSTANT, cv::BORDER_REFLECT, cv::BORDER_WRAP)
#define ALL_BORDER_TYPES testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT), BorderType(cv::BORDER_WRAP))
CV_FLAGS(WarpFlags, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::WARP_INVERSE_MAP)
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const cv::Mat& image);
void showDiff(cv::InputArray gold, cv::InputArray actual, double eps);
#endif // __OPENCV_GPU_TEST_UTILITY_HPP__

View File

@ -3,4 +3,28 @@ if(BUILD_ANDROID_PACKAGE)
endif()
set(the_description "Functionality with possible limitations on the use")
ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d)
ocv_add_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_gpu)
ocv_module_include_directories()
if(HAVE_CUDA AND HAVE_opencv_gpu)
ocv_source_group("Src\\Cuda" GLOB "src/cuda/*.cu")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include" ${CUDA_INCLUDE_DIRS})
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
file(GLOB lib_cuda "src/cuda/*.cu")
ocv_cuda_compile(cuda_objs ${lib_cuda})
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
else()
set(lib_cuda "")
set(cuda_objs "")
set(cuda_link_libs "")
endif()
ocv_glob_module_sources(SOURCES ${lib_cuda} ${cuda_objs})
ocv_create_module(${cuda_link_libs})
ocv_add_precompiled_headers(${the_module})
ocv_add_accuracy_tests()
ocv_add_perf_tests()

View File

@ -0,0 +1,79 @@
Background Subtraction
======================
.. highlight:: cpp
gpu::VIBE_GPU
-------------
.. ocv:class:: gpu::VIBE_GPU
Class used for background/foreground segmentation. ::
class VIBE_GPU
{
public:
explicit VIBE_GPU(unsigned long rngSeed = 1234567);
void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null());
void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null());
void release();
...
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [VIBE2011]_.
gpu::VIBE_GPU::VIBE_GPU
-----------------------
The constructor.
.. ocv:function:: gpu::VIBE_GPU::VIBE_GPU(unsigned long rngSeed = 1234567)
:param rngSeed: Value used to initiate a random sequence.
Default constructor sets all parameters to default values.
gpu::VIBE_GPU::initialize
-------------------------
Initialize background model and allocates all inner buffers.
.. ocv:function:: void gpu::VIBE_GPU::initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null())
:param firstFrame: First frame from video sequence.
:param stream: Stream for the asynchronous version.
gpu::VIBE_GPU::operator()
-------------------------
Updates the background model and returns the foreground mask
.. ocv:function:: void gpu::VIBE_GPU::operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null())
:param frame: Next video frame.
:param fgmask: The output foreground mask as an 8-bit binary image.
:param stream: Stream for the asynchronous version.
gpu::VIBE_GPU::release
----------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::VIBE_GPU::release()
.. [VIBE2011] O. Barnich and M. Van D Roogenbroeck. *ViBe: A universal background subtraction algorithm for video sequences*. IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011

View File

@ -127,3 +127,106 @@ Detects keypoints and computes SURF descriptors for them.
The function is parallelized with the TBB library.
If you are using the C version, make sure you call ``cv::initModule_nonfree()`` from ``nonfree/nonfree.hpp``.
gpu::SURF_GPU
-------------
.. ocv:class:: gpu::SURF_GPU
Class used for extracting Speeded Up Robust Features (SURF) from an image. ::
class SURF_GPU
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_GPU();
//! the full constructor taking all the necessary parameters
explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
void uploadKeypoints(const vector<KeyPoint>& keypoints,
GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU,
vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU,
vector<float>& descriptors);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = keypointsRatio * img.size().area()
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
The class ``SURF_GPU`` implements Speeded Up Robust Features descriptor. There is a fast multi-scale Hessian keypoint detector that can be used to find the keypoints (which is the default option). But the descriptors can also be computed for the user-specified keypoints. Only 8-bit grayscale images are supported.
The class ``SURF_GPU`` can store results in the GPU and CPU memory. It provides functions to convert results between CPU and GPU version ( ``uploadKeypoints``, ``downloadKeypoints``, ``downloadDescriptors`` ). The format of CPU results is the same as ``SURF`` results. GPU results are stored in ``GpuMat``. The ``keypoints`` matrix is :math:`\texttt{nFeatures} \times 7` matrix with the ``CV_32FC1`` type.
* ``keypoints.ptr<float>(X_ROW)[i]`` contains x coordinate of the i-th feature.
* ``keypoints.ptr<float>(Y_ROW)[i]`` contains y coordinate of the i-th feature.
* ``keypoints.ptr<float>(LAPLACIAN_ROW)[i]`` contains the laplacian sign of the i-th feature.
* ``keypoints.ptr<float>(OCTAVE_ROW)[i]`` contains the octave of the i-th feature.
* ``keypoints.ptr<float>(SIZE_ROW)[i]`` contains the size of the i-th feature.
* ``keypoints.ptr<float>(ANGLE_ROW)[i]`` contain orientation of the i-th feature.
* ``keypoints.ptr<float>(HESSIAN_ROW)[i]`` contains the response of the i-th feature.
The ``descriptors`` matrix is :math:`\texttt{nFeatures} \times \texttt{descriptorSize}` matrix with the ``CV_32FC1`` type.
The class ``SURF_GPU`` uses some buffers and provides access to it. All buffers can be safely released between function calls.
.. seealso:: :ocv:class:`SURF`

View File

@ -8,3 +8,4 @@ The module contains algorithms that may be patented in some countries or have so
:maxdepth: 2
feature_detection
background_subtraction

View File

@ -0,0 +1,169 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_NONFREE_GPU_HPP__
#define __OPENCV_NONFREE_GPU_HPP__
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GPU)
#include "opencv2/gpu/gpu.hpp"
namespace cv { namespace gpu {
class CV_EXPORTS SURF_GPU
{
public:
enum KeypointLayout
{
X_ROW = 0,
Y_ROW,
LAPLACIAN_ROW,
OCTAVE_ROW,
SIZE_ROW,
ANGLE_ROW,
HESSIAN_ROW,
ROWS_COUNT
};
//! the default constructor
SURF_GPU();
//! the full constructor taking all the necessary parameters
explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
//! download keypoints from device to host memory
void downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors);
//! finds the keypoints using fast hessian detector used in SURF
//! supports CV_8UC1 images
//! keypoints will have nFeature cols and 6 rows
//! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
//! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
//! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
//! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
//! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
//! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
//! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
//! finds the keypoints and computes their descriptors.
//! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false);
void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
bool useProvidedKeypoints = false);
void releaseMemory();
// SURF parameters
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
//! max keypoints = min(keypointsRatio * img.size().area(), 65535)
float keypointsRatio;
GpuMat sum, mask1, maskSum, intBuffer;
GpuMat det, trace;
GpuMat maxPosBuffer;
};
/*!
* The class implements the following algorithm:
* "ViBe: A universal background subtraction algorithm for video sequences"
* O. Barnich and M. Van D Roogenbroeck
* IEEE Transactions on Image Processing, 20(6) :1709-1724, June 2011
*/
class CV_EXPORTS VIBE_GPU
{
public:
//! the default constructor
explicit VIBE_GPU(unsigned long rngSeed = 1234567);
//! re-initiaization method
void initialize(const GpuMat& firstFrame, Stream& stream = Stream::Null());
//! the update operator
void operator()(const GpuMat& frame, GpuMat& fgmask, Stream& stream = Stream::Null());
//! releases all inner buffers
void release();
int nbSamples; // number of samples per pixel
int reqMatches; // #_min
int radius; // R
int subsamplingFactor; // amount of random subsampling
private:
Size frameSize_;
unsigned long rngSeed_;
GpuMat randStates_;
GpuMat samples_;
};
} // namespace gpu
} // namespace cv
#endif // defined(HAVE_OPENCV_GPU)
#endif // __OPENCV_NONFREE_GPU_HPP__

View File

@ -0,0 +1,122 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
#if defined(HAVE_OPENCV_GPU) && defined(HAVE_CUDA)
//////////////////////////////////////////////////////////////////////
// SURF
DEF_PARAM_TEST_1(Image, string);
PERF_TEST_P(Image, GPU_SURF,
Values<string>("gpu/perf/aloe.png"))
{
declare.time(50.0);
const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
if (PERF_RUN_GPU())
{
cv::gpu::SURF_GPU d_surf;
const cv::gpu::GpuMat d_img(img);
cv::gpu::GpuMat d_keypoints, d_descriptors;
TEST_CYCLE() d_surf(d_img, cv::gpu::GpuMat(), d_keypoints, d_descriptors);
std::vector<cv::KeyPoint> gpu_keypoints;
d_surf.downloadKeypoints(d_keypoints, gpu_keypoints);
cv::Mat gpu_descriptors(d_descriptors);
sortKeyPoints(gpu_keypoints, gpu_descriptors);
SANITY_CHECK_KEYPOINTS(gpu_keypoints);
SANITY_CHECK(gpu_descriptors, 1e-3);
}
else
{
cv::SURF surf;
std::vector<cv::KeyPoint> cpu_keypoints;
cv::Mat cpu_descriptors;
TEST_CYCLE() surf(img, cv::noArray(), cpu_keypoints, cpu_descriptors);
SANITY_CHECK_KEYPOINTS(cpu_keypoints);
SANITY_CHECK(cpu_descriptors);
}
}
//////////////////////////////////////////////////////
// VIBE
DEF_PARAM_TEST(Video_Cn, string, int);
PERF_TEST_P(Video_Cn, GPU_VIBE,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat foreground;
vibe(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
vibe(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
FAIL_NO_CPU();
}
}
#endif

View File

@ -1,3 +1,3 @@
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(nonfree)
CV_PERF_TEST_MAIN(nonfree, perf::printCudaInfo())

View File

@ -9,10 +9,18 @@
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "cvconfig.h"
#include "opencv2/opencv_modules.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/gpu_perf.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/highgui/highgui.hpp"
#if defined(HAVE_OPENCV_GPU) && defined(HAVE_CUDA)
#include "opencv2/nonfree/gpu.hpp"
#endif
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif

View File

@ -55,6 +55,33 @@
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
namespace cv { namespace gpu { namespace device
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace device
{
namespace surf
@ -717,6 +744,9 @@ namespace cv { namespace gpu { namespace device
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{

View File

@ -44,6 +44,18 @@
#include "opencv2/gpu/device/common.hpp"
namespace cv { namespace gpu { namespace device
{
namespace vibe
{
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor);
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, cudaStream_t stream);
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, cudaStream_t stream);
}
}}}
namespace cv { namespace gpu { namespace device
{
namespace vibe
@ -255,4 +267,4 @@ namespace cv { namespace gpu { namespace device
}}}
#endif /* CUDA_DISABLER */
#endif /* CUDA_DISABLER */

View File

@ -47,8 +47,23 @@
#include "cvconfig.h"
#endif
#include "opencv2/opencv_modules.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/internal.hpp"
#if defined(HAVE_OPENCV_GPU)
#include "opencv2/nonfree/gpu.hpp"
#if defined(HAVE_CUDA)
#include "opencv2/gpu/stream_accessor.hpp"
#include "opencv2/gpu/device/common.hpp"
static inline void throw_nogpu() { CV_Error(CV_StsNotImplemented, "The called functionality is disabled for current build or platform"); }
#else
static inline void throw_nogpu() { CV_Error(CV_GpuNotSupported, "The library is compiled without GPU support"); }
#endif
#endif
#endif

View File

@ -42,11 +42,12 @@
#include "precomp.hpp"
#if defined(HAVE_OPENCV_GPU)
using namespace cv;
using namespace cv::gpu;
using namespace std;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
#if !defined (HAVE_CUDA)
cv::gpu::SURF_GPU::SURF_GPU() { throw_nogpu(); }
cv::gpu::SURF_GPU::SURF_GPU(double, int, int, bool, float, bool) { throw_nogpu(); }
@ -61,7 +62,7 @@ void cv::gpu::SURF_GPU::operator()(const GpuMat&, const GpuMat&, vector<KeyPoint
void cv::gpu::SURF_GPU::operator()(const GpuMat&, const GpuMat&, vector<KeyPoint>&, vector<float>&, bool) { throw_nogpu(); }
void cv::gpu::SURF_GPU::releaseMemory() { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
#else // !defined (HAVE_CUDA)
namespace cv { namespace gpu { namespace device
{
@ -416,4 +417,6 @@ void cv::gpu::SURF_GPU::releaseMemory()
maxPosBuffer.release();
}
#endif /* !defined (HAVE_CUDA) */
#endif // !defined (HAVE_CUDA)
#endif // defined(HAVE_OPENCV_GPU)

View File

@ -42,6 +42,8 @@
#include "precomp.hpp"
#if defined(HAVE_OPENCV_GPU)
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
cv::gpu::VIBE_GPU::VIBE_GPU(unsigned long) { throw_nogpu(); }
@ -135,3 +137,5 @@ void cv::gpu::VIBE_GPU::release()
}
#endif
#endif // defined(HAVE_OPENCV_GPU)

View File

@ -0,0 +1,285 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#if defined(HAVE_OPENCV_GPU) && defined(HAVE_CUDA)
using namespace cvtest;
/////////////////////////////////////////////////////////////////////////////////////////////////
// SURF
namespace
{
IMPLEMENT_PARAM_CLASS(SURF_HessianThreshold, double)
IMPLEMENT_PARAM_CLASS(SURF_Octaves, int)
IMPLEMENT_PARAM_CLASS(SURF_OctaveLayers, int)
IMPLEMENT_PARAM_CLASS(SURF_Extended, bool)
IMPLEMENT_PARAM_CLASS(SURF_Upright, bool)
}
PARAM_TEST_CASE(SURF, cv::gpu::DeviceInfo, SURF_HessianThreshold, SURF_Octaves, SURF_OctaveLayers, SURF_Extended, SURF_Upright)
{
cv::gpu::DeviceInfo devInfo;
double hessianThreshold;
int nOctaves;
int nOctaveLayers;
bool extended;
bool upright;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
hessianThreshold = GET_PARAM(1);
nOctaves = GET_PARAM(2);
nOctaveLayers = GET_PARAM(3);
extended = GET_PARAM(4);
upright = GET_PARAM(5);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(SURF, Detector)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::gpu::SURF_GPU surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, cv::noArray(), keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.95);
}
}
GPU_TEST_P(SURF, Detector_Masked)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
cv::gpu::SURF_GPU surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), loadMat(mask), keypoints);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
std::vector<cv::KeyPoint> keypoints;
surf(loadMat(image), loadMat(mask), keypoints);
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
std::vector<cv::KeyPoint> keypoints_gold;
surf_gold(image, mask, keypoints_gold);
ASSERT_EQ(keypoints_gold.size(), keypoints.size());
int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
EXPECT_GT(matchedRatio, 0.95);
}
}
GPU_TEST_P(SURF, Descriptor)
{
cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::gpu::SURF_GPU surf;
surf.hessianThreshold = hessianThreshold;
surf.nOctaves = nOctaves;
surf.nOctaveLayers = nOctaveLayers;
surf.extended = extended;
surf.upright = upright;
surf.keypointsRatio = 0.05f;
cv::SURF surf_gold;
surf_gold.hessianThreshold = hessianThreshold;
surf_gold.nOctaves = nOctaves;
surf_gold.nOctaveLayers = nOctaveLayers;
surf_gold.extended = extended;
surf_gold.upright = upright;
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
std::vector<cv::KeyPoint> keypoints;
cv::gpu::GpuMat descriptors;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints, descriptors);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
std::vector<cv::KeyPoint> keypoints;
surf_gold(image, cv::noArray(), keypoints);
cv::gpu::GpuMat descriptors;
surf(loadMat(image), cv::gpu::GpuMat(), keypoints, descriptors, true);
cv::Mat descriptors_gold;
surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
cv::BFMatcher matcher(cv::NORM_L2);
std::vector<cv::DMatch> matches;
matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
EXPECT_GT(matchedRatio, 0.6);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Features2D, SURF, testing::Combine(
ALL_DEVICES,
testing::Values(SURF_HessianThreshold(100.0), SURF_HessianThreshold(500.0), SURF_HessianThreshold(1000.0)),
testing::Values(SURF_Octaves(3), SURF_Octaves(4)),
testing::Values(SURF_OctaveLayers(2), SURF_OctaveLayers(3)),
testing::Values(SURF_Extended(false), SURF_Extended(true)),
testing::Values(SURF_Upright(false), SURF_Upright(true))));
//////////////////////////////////////////////////////
// VIBE
PARAM_TEST_CASE(VIBE, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
};
GPU_TEST_P(VIBE, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int type = GET_PARAM(2);
const bool useRoi = GET_PARAM(3);
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0.0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
vibe.initialize(d_frame);
for (int i = 0; i < 20; ++i)
vibe(d_frame, d_fgmask);
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
vibe(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, VIBE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4)),
WHOLE_SUBMAT));
#endif

View File

@ -1,3 +1,73 @@
#include "test_precomp.hpp"
#if defined(HAVE_OPENCV_GPU) && defined(HAVE_CUDA)
using namespace cv;
using namespace cv::gpu;
using namespace cvtest;
using namespace testing;
int main(int argc, char** argv)
{
try
{
const char* keys =
"{ h | help ? | false | Print help}"
"{ i | info | false | Print information about system and exit }"
"{ d | device | -1 | Device on which tests will be executed (-1 means all devices) }"
;
CommandLineParser cmd(argc, (const char**)argv, keys);
if (cmd.get<bool>("help"))
{
cmd.printParams();
return 0;
}
printCudaInfo();
if (cmd.get<bool>("info"))
{
return 0;
}
int device = cmd.get<int>("device");
if (device < 0)
{
DeviceManager::instance().loadAll();
std::cout << "Run tests on all supported devices \n" << std::endl;
}
else
{
DeviceManager::instance().load(device);
DeviceInfo info(device);
std::cout << "Run tests on device " << device << " [" << info.name() << "] \n" << std::endl;
}
TS::ptr()->init("cv");
InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return -1;
}
catch (...)
{
std::cerr << "Unknown error" << std::endl;
return -1;
}
return 0;
}
#else // HAVE_CUDA
CV_TEST_MAIN("cv")
#endif // HAVE_CUDA

View File

@ -9,10 +9,19 @@
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <iostream>
#include "cvconfig.h"
#include "opencv2/opencv_modules.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include <iostream>
#if defined(HAVE_OPENCV_GPU) && defined(HAVE_CUDA)
#include "opencv2/ts/gpu_test.hpp"
#include "opencv2/nonfree/gpu.hpp"
#endif
#endif

View File

@ -47,8 +47,9 @@
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/gpu/gpu.hpp"
#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)
#include "opencv2/nonfree/gpu.hpp"
#endif
namespace cv {
@ -103,7 +104,7 @@ private:
};
#ifdef HAVE_OPENCV_GPU
#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)
class CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder
{
public:

View File

@ -429,7 +429,7 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
}
}
#ifdef HAVE_OPENCV_GPU
#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)
SurfFeaturesFinderGpu::SurfFeaturesFinderGpu(double hess_thresh, int num_octaves, int num_layers,
int num_octaves_descr, int num_layers_descr)
{

View File

@ -71,7 +71,11 @@
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#ifdef HAVE_OPENCV_GPU
# include "opencv2/gpu/gpu.hpp"
#include "opencv2/gpu/gpu.hpp"
#ifdef HAVE_OPENCV_NONFREE
#include "opencv2/nonfree/gpu.hpp"
#endif
#endif
#include "../../imgproc/src/gcgraph.hpp"

View File

@ -61,7 +61,7 @@ Stitcher Stitcher::createDefault(bool try_use_gpu)
#ifdef HAVE_OPENCV_GPU
if (try_use_gpu && gpu::getCudaEnabledDeviceCount() > 0)
{
#ifdef HAVE_OPENCV_NONFREE
#if defined(HAVE_OPENCV_NONFREE)
stitcher.setFeaturesFinder(new detail::SurfFeaturesFinderGpu());
#else
stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());

View File

@ -1,3 +1,5 @@
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(superres)
using namespace perf;
CV_PERF_TEST_MAIN(superres, printCudaInfo())

View File

@ -16,6 +16,7 @@
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/ts/gpu_perf.hpp"
#include "opencv2/superres/superres.hpp"
#include "opencv2/superres/optical_flow.hpp"

View File

@ -8,18 +8,6 @@ using namespace cv;
using namespace cv::superres;
using namespace cv::gpu;
#define GPU_SANITY_CHECK(mat, ...) \
do{ \
Mat gpu_##mat(mat); \
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
} while(0)
#define CPU_SANITY_CHECK(mat, ...) \
do{ \
Mat cpu_##mat(mat); \
SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
} while(0)
namespace
{
class OneFrameSource_CPU : public FrameSource

View File

@ -51,7 +51,7 @@ using namespace cv::gpu;
using namespace cv::superres;
using namespace cv::superres::detail;
#ifndef HAVE_CUDA
#if !defined(HAVE_CUDA) || !defined(HAVE_OPENCV_GPU)
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_GPU()
{

View File

@ -10,6 +10,12 @@ endif()
set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE)
if(HAVE_CUDA)
ocv_include_directories(${CUDA_INCLUDE_DIRS})
endif()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
ocv_add_module(ts opencv_core opencv_features2d)
ocv_glob_module_sources()

View File

@ -0,0 +1,68 @@
#ifndef __OPENCV_GPU_PERF_UTILITY_HPP__
#define __OPENCV_GPU_PERF_UTILITY_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/ts/ts_perf.hpp"
namespace perf
{
CV_ENUM(BorderMode, cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONSTANT, cv::BORDER_REFLECT, cv::BORDER_WRAP)
#define ALL_BORDER_MODES testing::ValuesIn(BorderMode::all())
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA)
#define ALL_INTERPOLATIONS testing::ValuesIn(Interpolation::all())
CV_ENUM(NormType, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_HAMMING, cv::NORM_MINMAX)
enum { Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4 };
CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
#define GPU_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
#define GPU_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
#define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< std::tr1::tuple< __VA_ARGS__ > > name
#define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
DEF_PARAM_TEST_1(Sz, cv::Size);
typedef perf::Size_MatType Sz_Type;
DEF_PARAM_TEST(Sz_Depth, cv::Size, perf::MatDepth);
DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, perf::MatDepth, MatCn);
#define GPU_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
#define FAIL_NO_CPU() FAIL() << "No such CPU implementation analogy"
#define GPU_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat gpu_##mat(mat); \
SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
} while(0)
#define CPU_SANITY_CHECK(mat, ...) \
do{ \
cv::Mat cpu_##mat(mat); \
SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
} while(0)
CV_EXPORTS cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
struct CvtColorInfo
{
int scn;
int dcn;
int code;
CvtColorInfo() {}
explicit CvtColorInfo(int scn_, int dcn_, int code_) : scn(scn_), dcn(dcn_), code(code_) {}
};
CV_EXPORTS void PrintTo(const CvtColorInfo& info, std::ostream* os);
CV_EXPORTS void printCudaInfo();
CV_EXPORTS void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors = cv::noArray());
}
#endif // __OPENCV_GPU_PERF_UTILITY_HPP__

View File

@ -0,0 +1,307 @@
#ifndef __OPENCV_GPU_TEST_UTILITY_HPP__
#define __OPENCV_GPU_TEST_UTILITY_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
namespace cvtest
{
//////////////////////////////////////////////////////////////////////
// random generators
CV_EXPORTS int randomInt(int minVal, int maxVal);
CV_EXPORTS double randomDouble(double minVal, double maxVal);
CV_EXPORTS cv::Size randomSize(int minVal, int maxVal);
CV_EXPORTS cv::Scalar randomScalar(double minVal, double maxVal);
CV_EXPORTS cv::Mat randomMat(cv::Size size, int type, double minVal = 0.0, double maxVal = 255.0);
//////////////////////////////////////////////////////////////////////
// GpuMat create
CV_EXPORTS cv::gpu::GpuMat createMat(cv::Size size, int type, bool useRoi = false);
CV_EXPORTS cv::gpu::GpuMat loadMat(const cv::Mat& m, bool useRoi = false);
//////////////////////////////////////////////////////////////////////
// Image load
//! read image from testdata folder
CV_EXPORTS cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
//! read image from testdata folder and convert it to specified type
CV_EXPORTS cv::Mat readImageType(const std::string& fname, int type);
//////////////////////////////////////////////////////////////////////
// Gpu devices
//! return true if device supports specified feature and gpu module was built with support the feature.
CV_EXPORTS bool supportFeature(const cv::gpu::DeviceInfo& info, cv::gpu::FeatureSet feature);
class CV_EXPORTS DeviceManager
{
public:
static DeviceManager& instance();
void load(int i);
void loadAll();
const std::vector<cv::gpu::DeviceInfo>& values() const { return devices_; }
private:
std::vector<cv::gpu::DeviceInfo> devices_;
};
#define ALL_DEVICES testing::ValuesIn(cvtest::DeviceManager::instance().values())
//////////////////////////////////////////////////////////////////////
// Additional assertion
CV_EXPORTS void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat());
CV_EXPORTS cv::Mat getMat(cv::InputArray arr);
CV_EXPORTS testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1, cv::InputArray m2, double eps);
#define EXPECT_MAT_NEAR(m1, m2, eps) EXPECT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
#define ASSERT_MAT_NEAR(m1, m2, eps) ASSERT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
#define EXPECT_SCALAR_NEAR(s1, s2, eps) \
{ \
EXPECT_NEAR(s1[0], s2[0], eps); \
EXPECT_NEAR(s1[1], s2[1], eps); \
EXPECT_NEAR(s1[2], s2[2], eps); \
EXPECT_NEAR(s1[3], s2[3], eps); \
}
#define ASSERT_SCALAR_NEAR(s1, s2, eps) \
{ \
ASSERT_NEAR(s1[0], s2[0], eps); \
ASSERT_NEAR(s1[1], s2[1], eps); \
ASSERT_NEAR(s1[2], s2[2], eps); \
ASSERT_NEAR(s1[3], s2[3], eps); \
}
#define EXPECT_POINT2_NEAR(p1, p2, eps) \
{ \
EXPECT_NEAR(p1.x, p2.x, eps); \
EXPECT_NEAR(p1.y, p2.y, eps); \
}
#define ASSERT_POINT2_NEAR(p1, p2, eps) \
{ \
ASSERT_NEAR(p1.x, p2.x, eps); \
ASSERT_NEAR(p1.y, p2.y, eps); \
}
#define EXPECT_POINT3_NEAR(p1, p2, eps) \
{ \
EXPECT_NEAR(p1.x, p2.x, eps); \
EXPECT_NEAR(p1.y, p2.y, eps); \
EXPECT_NEAR(p1.z, p2.z, eps); \
}
#define ASSERT_POINT3_NEAR(p1, p2, eps) \
{ \
ASSERT_NEAR(p1.x, p2.x, eps); \
ASSERT_NEAR(p1.y, p2.y, eps); \
ASSERT_NEAR(p1.z, p2.z, eps); \
}
CV_EXPORTS double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
#define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
EXPECT_LE(checkSimilarity(mat1, mat2), eps); \
}
#define ASSERT_MAT_SIMILAR(mat1, mat2, eps) \
{ \
ASSERT_EQ(mat1.type(), mat2.type()); \
ASSERT_EQ(mat1.size(), mat2.size()); \
ASSERT_LE(checkSimilarity(mat1, mat2), eps); \
}
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
#define GPU_TEST_P(test_case_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public test_case_name { \
public: \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
virtual void TestBody(); \
private: \
void UnsafeTestBody(); \
static int AddToRegistry() { \
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
GetTestCasePatternHolder<test_case_name>(\
#test_case_name, __FILE__, __LINE__)->AddTestPattern(\
#test_case_name, \
#test_name, \
new ::testing::internal::TestMetaFactory< \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_case_name, \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() \
{ \
try \
{ \
UnsafeTestBody(); \
} \
catch (...) \
{ \
cv::gpu::resetDevice(); \
throw; \
} \
} \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::UnsafeTestBody()
#define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > >
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
// Depth
using perf::MatDepth;
#define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
#define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16U), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_16S), MatDepth(CV_16S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_16S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32S)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32S), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)), \
std::make_pair(MatDepth(CV_32F), MatDepth(CV_64F)), \
\
std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
// Type
using perf::MatType;
//! return vector with types from specified range.
CV_EXPORTS std::vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end);
//! return vector with all types (depth: CV_8U-CV_64F, channels: 1-4).
CV_EXPORTS const std::vector<MatType>& all_types();
#define ALL_TYPES testing::ValuesIn(all_types())
#define TYPES(depth_start, depth_end, cn_start, cn_end) testing::ValuesIn(types(depth_start, depth_end, cn_start, cn_end))
// ROI
class UseRoi
{
public:
inline UseRoi(bool val = false) : val_(val) {}
inline operator bool() const { return val_; }
private:
bool val_;
};
CV_EXPORTS void PrintTo(const UseRoi& useRoi, std::ostream* os);
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
// Direct/Inverse
class Inverse
{
public:
inline Inverse(bool val = false) : val_(val) {}
inline operator bool() const { return val_; }
private:
bool val_;
};
CV_EXPORTS void PrintTo(const Inverse& useRoi, std::ostream* os);
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
// Param class
#define IMPLEMENT_PARAM_CLASS(name, type) \
class name \
{ \
public: \
name ( type arg = type ()) : val_(arg) {} \
operator type () const {return val_;} \
private: \
type val_; \
}; \
inline void PrintTo( name param, std::ostream* os) \
{ \
*os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
}
IMPLEMENT_PARAM_CLASS(Channels, int)
#define ALL_CHANNELS testing::Values(Channels(1), Channels(2), Channels(3), Channels(4))
#define IMAGE_CHANNELS testing::Values(Channels(1), Channels(3), Channels(4))
// Flags and enums
CV_ENUM(NormCode, cv::NORM_INF, cv::NORM_L1, cv::NORM_L2, cv::NORM_TYPE_MASK, cv::NORM_RELATIVE, cv::NORM_MINMAX)
CV_ENUM(Interpolation, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::INTER_AREA)
CV_ENUM(BorderType, cv::BORDER_REFLECT101, cv::BORDER_REPLICATE, cv::BORDER_CONSTANT, cv::BORDER_REFLECT, cv::BORDER_WRAP)
#define ALL_BORDER_TYPES testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT), BorderType(cv::BORDER_WRAP))
CV_FLAGS(WarpFlags, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::WARP_INVERSE_MAP)
//////////////////////////////////////////////////////////////////////
// Features2D
CV_EXPORTS testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
#define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual)
CV_EXPORTS int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
CV_EXPORTS int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches);
//////////////////////////////////////////////////////////////////////
// Other
CV_EXPORTS void dumpImage(const std::string& fileName, const cv::Mat& image);
CV_EXPORTS void showDiff(cv::InputArray gold, cv::InputArray actual, double eps);
CV_EXPORTS void printCudaInfo();
}
namespace cv { namespace gpu
{
CV_EXPORTS void PrintTo(const DeviceInfo& info, std::ostream* os);
}}
#endif // __OPENCV_GPU_TEST_UTILITY_HPP__

313
modules/ts/src/gpu_perf.cpp Normal file
View File

@ -0,0 +1,313 @@
#include "opencv2/ts/gpu_perf.hpp"
#include "opencv2/core/gpumat.hpp"
#include "cvconfig.h"
#ifdef HAVE_CUDA
#include <cuda_runtime.h>
#endif
using namespace cv;
namespace perf
{
Mat readImage(const string& fileName, int flags)
{
return imread(perf::TestBase::getDataPath(fileName), flags);
}
void PrintTo(const CvtColorInfo& info, std::ostream* os)
{
static const char* str[] =
{
"BGR2BGRA",
"BGRA2BGR",
"BGR2RGBA",
"RGBA2BGR",
"BGR2RGB",
"BGRA2RGBA",
"BGR2GRAY",
"RGB2GRAY",
"GRAY2BGR",
"GRAY2BGRA",
"BGRA2GRAY",
"RGBA2GRAY",
"BGR2BGR565",
"RGB2BGR565",
"BGR5652BGR",
"BGR5652RGB",
"BGRA2BGR565",
"RGBA2BGR565",
"BGR5652BGRA",
"BGR5652RGBA",
"GRAY2BGR565",
"BGR5652GRAY",
"BGR2BGR555",
"RGB2BGR555",
"BGR5552BGR",
"BGR5552RGB",
"BGRA2BGR555",
"RGBA2BGR555",
"BGR5552BGRA",
"BGR5552RGBA",
"GRAY2BGR555",
"BGR5552GRAY",
"BGR2XYZ",
"RGB2XYZ",
"XYZ2BGR",
"XYZ2RGB",
"BGR2YCrCb",
"RGB2YCrCb",
"YCrCb2BGR",
"YCrCb2RGB",
"BGR2HSV",
"RGB2HSV",
"",
"",
"BGR2Lab",
"RGB2Lab",
"BayerBG2BGR",
"BayerGB2BGR",
"BayerRG2BGR",
"BayerGR2BGR",
"BGR2Luv",
"RGB2Luv",
"BGR2HLS",
"RGB2HLS",
"HSV2BGR",
"HSV2RGB",
"Lab2BGR",
"Lab2RGB",
"Luv2BGR",
"Luv2RGB",
"HLS2BGR",
"HLS2RGB",
"BayerBG2BGR_VNG",
"BayerGB2BGR_VNG",
"BayerRG2BGR_VNG",
"BayerGR2BGR_VNG",
"BGR2HSV_FULL",
"RGB2HSV_FULL",
"BGR2HLS_FULL",
"RGB2HLS_FULL",
"HSV2BGR_FULL",
"HSV2RGB_FULL",
"HLS2BGR_FULL",
"HLS2RGB_FULL",
"LBGR2Lab",
"LRGB2Lab",
"LBGR2Luv",
"LRGB2Luv",
"Lab2LBGR",
"Lab2LRGB",
"Luv2LBGR",
"Luv2LRGB",
"BGR2YUV",
"RGB2YUV",
"YUV2BGR",
"YUV2RGB",
"BayerBG2GRAY",
"BayerGB2GRAY",
"BayerRG2GRAY",
"BayerGR2GRAY",
//YUV 4:2:0 formats family
"YUV2RGB_NV12",
"YUV2BGR_NV12",
"YUV2RGB_NV21",
"YUV2BGR_NV21",
"YUV2RGBA_NV12",
"YUV2BGRA_NV12",
"YUV2RGBA_NV21",
"YUV2BGRA_NV21",
"YUV2RGB_YV12",
"YUV2BGR_YV12",
"YUV2RGB_IYUV",
"YUV2BGR_IYUV",
"YUV2RGBA_YV12",
"YUV2BGRA_YV12",
"YUV2RGBA_IYUV",
"YUV2BGRA_IYUV",
"YUV2GRAY_420",
//YUV 4:2:2 formats family
"YUV2RGB_UYVY",
"YUV2BGR_UYVY",
"YUV2RGB_VYUY",
"YUV2BGR_VYUY",
"YUV2RGBA_UYVY",
"YUV2BGRA_UYVY",
"YUV2RGBA_VYUY",
"YUV2BGRA_VYUY",
"YUV2RGB_YUY2",
"YUV2BGR_YUY2",
"YUV2RGB_YVYU",
"YUV2BGR_YVYU",
"YUV2RGBA_YUY2",
"YUV2BGRA_YUY2",
"YUV2RGBA_YVYU",
"YUV2BGRA_YVYU",
"YUV2GRAY_UYVY",
"YUV2GRAY_YUY2",
// alpha premultiplication
"RGBA2mRGBA",
"mRGBA2RGBA",
"COLORCVT_MAX"
};
*os << str[info.code];
}
static void printOsInfo()
{
#if defined _WIN32
# if defined _WIN64
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Windows x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined linux
# if defined _LP64
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Linux x32.\n[----------]\n"), fflush(stdout);
# endif
#elif defined __APPLE__
# if defined _LP64
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x64.\n[----------]\n"), fflush(stdout);
# else
printf("[----------]\n[ GPU INFO ] \tRun on OS Apple x32.\n[----------]\n"), fflush(stdout);
# endif
#endif
}
void printCudaInfo()
{
printOsInfo();
#ifndef HAVE_CUDA
printf("[----------]\n[ GPU INFO ] \tOpenCV was built without CUDA support.\n[----------]\n"), fflush(stdout);
#else
int driver;
cudaDriverGetVersion(&driver);
printf("[----------]\n"), fflush(stdout);
printf("[ GPU INFO ] \tCUDA Driver version: %d.\n", driver), fflush(stdout);
printf("[ GPU INFO ] \tCUDA Runtime version: %d.\n", CUDART_VERSION), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[ GPU INFO ] \tGPU module was compiled for the following GPU archs.\n"), fflush(stdout);
printf("[ BIN ] \t%s.\n", CUDA_ARCH_BIN), fflush(stdout);
printf("[ PTX ] \t%s.\n", CUDA_ARCH_PTX), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
int deviceCount = cv::gpu::getCudaEnabledDeviceCount();
printf("[ GPU INFO ] \tCUDA device count:: %d.\n", deviceCount), fflush(stdout);
printf("[----------]\n"), fflush(stdout);
for (int i = 0; i < deviceCount; ++i)
{
cv::gpu::DeviceInfo info(i);
printf("[----------]\n"), fflush(stdout);
printf("[ DEVICE ] \t# %d %s.\n", i, info.name().c_str()), fflush(stdout);
printf("[ ] \tCompute capability: %d.%d\n", (int)info.majorVersion(), (int)info.minorVersion()), fflush(stdout);
printf("[ ] \tMulti Processor Count: %d\n", info.multiProcessorCount()), fflush(stdout);
printf("[ ] \tTotal memory: %d Mb\n", static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0)), fflush(stdout);
printf("[ ] \tFree memory: %d Mb\n", static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0)), fflush(stdout);
if (!info.isCompatible())
printf("[ GPU INFO ] \tThis device is NOT compatible with current GPU module build\n");
printf("[----------]\n"), fflush(stdout);
}
#endif
}
struct KeypointIdxCompare
{
std::vector<cv::KeyPoint>* keypoints;
explicit KeypointIdxCompare(std::vector<cv::KeyPoint>* _keypoints) : keypoints(_keypoints) {}
bool operator ()(size_t i1, size_t i2) const
{
cv::KeyPoint kp1 = (*keypoints)[i1];
cv::KeyPoint kp2 = (*keypoints)[i2];
if (kp1.pt.x != kp2.pt.x)
return kp1.pt.x < kp2.pt.x;
if (kp1.pt.y != kp2.pt.y)
return kp1.pt.y < kp2.pt.y;
if (kp1.response != kp2.response)
return kp1.response < kp2.response;
return kp1.octave < kp2.octave;
}
};
void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors)
{
std::vector<size_t> indexies(keypoints.size());
for (size_t i = 0; i < indexies.size(); ++i)
indexies[i] = i;
std::sort(indexies.begin(), indexies.end(), KeypointIdxCompare(&keypoints));
std::vector<cv::KeyPoint> new_keypoints;
cv::Mat new_descriptors;
new_keypoints.resize(keypoints.size());
cv::Mat descriptors;
if (_descriptors.needed())
{
descriptors = _descriptors.getMat();
new_descriptors.create(descriptors.size(), descriptors.type());
}
for (size_t i = 0; i < indexies.size(); ++i)
{
size_t new_idx = indexies[i];
new_keypoints[i] = keypoints[new_idx];
if (!new_descriptors.empty())
descriptors.row((int) new_idx).copyTo(new_descriptors.row((int) i));
}
keypoints.swap(new_keypoints);
if (_descriptors.needed())
new_descriptors.copyTo(_descriptors);
}
}

479
modules/ts/src/gpu_test.cpp Normal file
View File

@ -0,0 +1,479 @@
#include "opencv2/ts/gpu_test.hpp"
#include <stdexcept>
using namespace cv;
using namespace cv::gpu;
using namespace cvtest;
using namespace testing;
using namespace testing::internal;
namespace perf
{
CV_EXPORTS void printCudaInfo();
}
namespace cvtest
{
//////////////////////////////////////////////////////////////////////
// random generators
int randomInt(int minVal, int maxVal)
{
RNG& rng = TS::ptr()->get_rng();
return rng.uniform(minVal, maxVal);
}
double randomDouble(double minVal, double maxVal)
{
RNG& rng = TS::ptr()->get_rng();
return rng.uniform(minVal, maxVal);
}
Size randomSize(int minVal, int maxVal)
{
return Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
}
Scalar randomScalar(double minVal, double maxVal)
{
return Scalar(randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal), randomDouble(minVal, maxVal));
}
Mat randomMat(Size size, int type, double minVal, double maxVal)
{
return randomMat(TS::ptr()->get_rng(), size, type, minVal, maxVal, false);
}
//////////////////////////////////////////////////////////////////////
// GpuMat create
GpuMat createMat(Size size, int type, bool useRoi)
{
Size size0 = size;
if (useRoi)
{
size0.width += randomInt(5, 15);
size0.height += randomInt(5, 15);
}
GpuMat d_m(size0, type);
if (size0 != size)
d_m = d_m(Rect((size0.width - size.width) / 2, (size0.height - size.height) / 2, size.width, size.height));
return d_m;
}
GpuMat loadMat(const Mat& m, bool useRoi)
{
GpuMat d_m = createMat(m.size(), m.type(), useRoi);
d_m.upload(m);
return d_m;
}
//////////////////////////////////////////////////////////////////////
// Image load
Mat readImage(const std::string& fileName, int flags)
{
return imread(TS::ptr()->get_data_path() + fileName, flags);
}
Mat readImageType(const std::string& fname, int type)
{
Mat src = readImage(fname, CV_MAT_CN(type) == 1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);
if (CV_MAT_CN(type) == 4)
{
Mat temp;
cvtColor(src, temp, COLOR_BGR2BGRA);
swap(src, temp);
}
src.convertTo(src, CV_MAT_DEPTH(type), CV_MAT_DEPTH(type) == CV_32F ? 1.0 / 255.0 : 1.0);
return src;
}
//////////////////////////////////////////////////////////////////////
// Gpu devices
bool supportFeature(const DeviceInfo& info, FeatureSet feature)
{
return TargetArchs::builtWith(feature) && info.supports(feature);
}
DeviceManager& DeviceManager::instance()
{
static DeviceManager obj;
return obj;
}
void DeviceManager::load(int i)
{
devices_.clear();
devices_.reserve(1);
std::ostringstream msg;
if (i < 0 || i >= getCudaEnabledDeviceCount())
{
msg << "Incorrect device number - " << i;
throw std::runtime_error(msg.str());
}
DeviceInfo info(i);
if (!info.isCompatible())
{
msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current GPU module build";
throw std::runtime_error(msg.str());
}
devices_.push_back(info);
}
void DeviceManager::loadAll()
{
int deviceCount = getCudaEnabledDeviceCount();
devices_.clear();
devices_.reserve(deviceCount);
for (int i = 0; i < deviceCount; ++i)
{
DeviceInfo info(i);
if (info.isCompatible())
{
devices_.push_back(info);
}
}
}
//////////////////////////////////////////////////////////////////////
// Additional assertion
namespace
{
template <typename T, typename OutT> std::string printMatValImpl(const Mat& m, Point p)
{
const int cn = m.channels();
std::ostringstream ostr;
ostr << "(";
p.x /= cn;
ostr << static_cast<OutT>(m.at<T>(p.y, p.x * cn));
for (int c = 1; c < m.channels(); ++c)
{
ostr << ", " << static_cast<OutT>(m.at<T>(p.y, p.x * cn + c));
}
ostr << ")";
return ostr.str();
}
std::string printMatVal(const Mat& m, Point p)
{
typedef std::string (*func_t)(const Mat& m, Point p);
static const func_t funcs[] =
{
printMatValImpl<uchar, int>, printMatValImpl<schar, int>, printMatValImpl<ushort, int>, printMatValImpl<short, int>,
printMatValImpl<int, int>, printMatValImpl<float, float>, printMatValImpl<double, double>
};
return funcs[m.depth()](m, p);
}
}
void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minLoc_, Point* maxLoc_, const Mat& mask)
{
if (src.depth() != CV_8S)
{
minMaxLoc(src, minVal_, maxVal_, minLoc_, maxLoc_, mask);
return;
}
// OpenCV's minMaxLoc doesn't support CV_8S type
double minVal = std::numeric_limits<double>::max();
Point minLoc(-1, -1);
double maxVal = -std::numeric_limits<double>::max();
Point maxLoc(-1, -1);
for (int y = 0; y < src.rows; ++y)
{
const schar* src_row = src.ptr<schar>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<uchar>(y);
for (int x = 0; x < src.cols; ++x)
{
if (!mask_row || mask_row[x])
{
schar val = src_row[x];
if (val < minVal)
{
minVal = val;
minLoc = cv::Point(x, y);
}
if (val > maxVal)
{
maxVal = val;
maxLoc = cv::Point(x, y);
}
}
}
}
if (minVal_) *minVal_ = minVal;
if (maxVal_) *maxVal_ = maxVal;
if (minLoc_) *minLoc_ = minLoc;
if (maxLoc_) *maxLoc_ = maxLoc;
}
Mat getMat(InputArray arr)
{
if (arr.kind() == _InputArray::GPU_MAT)
{
Mat m;
arr.getGpuMat().download(m);
return m;
}
return arr.getMat();
}
AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, InputArray m1_, InputArray m2_, double eps)
{
Mat m1 = getMat(m1_);
Mat m2 = getMat(m2_);
if (m1.size() != m2.size())
{
return AssertionFailure() << "Matrices \"" << expr1 << "\" and \"" << expr2 << "\" have different sizes : \""
<< expr1 << "\" [" << PrintToString(m1.size()) << "] vs \""
<< expr2 << "\" [" << PrintToString(m2.size()) << "]";
}
if (m1.type() != m2.type())
{
return AssertionFailure() << "Matrices \"" << expr1 << "\" and \"" << expr2 << "\" have different types : \""
<< expr1 << "\" [" << PrintToString(MatType(m1.type())) << "] vs \""
<< expr2 << "\" [" << PrintToString(MatType(m2.type())) << "]";
}
Mat diff;
absdiff(m1.reshape(1), m2.reshape(1), diff);
double maxVal = 0.0;
Point maxLoc;
minMaxLocGold(diff, 0, &maxVal, 0, &maxLoc);
if (maxVal > eps)
{
return AssertionFailure() << "The max difference between matrices \"" << expr1 << "\" and \"" << expr2
<< "\" is " << maxVal << " at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ")"
<< ", which exceeds \"" << eps_expr << "\", where \""
<< expr1 << "\" at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ") evaluates to " << printMatVal(m1, maxLoc) << ", \""
<< expr2 << "\" at (" << maxLoc.y << ", " << maxLoc.x / m1.channels() << ") evaluates to " << printMatVal(m2, maxLoc) << ", \""
<< eps_expr << "\" evaluates to " << eps;
}
return AssertionSuccess();
}
double checkSimilarity(InputArray m1, InputArray m2)
{
Mat diff;
matchTemplate(getMat(m1), getMat(m2), diff, CV_TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f);
}
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end)
{
vector<MatType> v;
v.reserve((depth_end - depth_start + 1) * (cn_end - cn_start + 1));
for (int depth = depth_start; depth <= depth_end; ++depth)
{
for (int cn = cn_start; cn <= cn_end; ++cn)
{
v.push_back(MatType(CV_MAKE_TYPE(depth, cn)));
}
}
return v;
}
const vector<MatType>& all_types()
{
static vector<MatType> v = types(CV_8U, CV_64F, 1, 4);
return v;
}
void PrintTo(const UseRoi& useRoi, std::ostream* os)
{
if (useRoi)
(*os) << "sub matrix";
else
(*os) << "whole matrix";
}
void PrintTo(const Inverse& inverse, std::ostream* os)
{
if (inverse)
(*os) << "inverse";
else
(*os) << "direct";
}
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const Mat& image)
{
imwrite(TS::ptr()->get_data_path() + fileName, image);
}
void showDiff(InputArray gold_, InputArray actual_, double eps)
{
Mat gold = getMat(gold_);
Mat actual = getMat(actual_);
Mat diff;
absdiff(gold, actual, diff);
threshold(diff, diff, eps, 255.0, cv::THRESH_BINARY);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
waitKey();
}
namespace
{
bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
{
const double maxPtDif = 1.0;
const double maxSizeDif = 1.0;
const double maxAngleDif = 2.0;
const double maxResponseDif = 0.1;
double dist = cv::norm(p1.pt - p2.pt);
if (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
p1.octave == p2.octave &&
p1.class_id == p2.class_id)
{
return true;
}
return false;
}
struct KeyPointLess : std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
return kp1.pt.y < kp2.pt.y || (kp1.pt.y == kp2.pt.y && kp1.pt.x < kp2.pt.x);
}
};
}
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
if (gold.size() != actual.size())
{
return testing::AssertionFailure() << "KeyPoints size mistmach\n"
<< "\"" << gold_expr << "\" : " << gold.size() << "\n"
<< "\"" << actual_expr << "\" : " << actual.size();
}
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (!keyPointsEquals(p1, p2))
{
return testing::AssertionFailure() << "KeyPoints differ at " << i << "\n"
<< "\"" << gold_expr << "\" vs \"" << actual_expr << "\" : \n"
<< "pt : " << testing::PrintToString(p1.pt) << " vs " << testing::PrintToString(p2.pt) << "\n"
<< "size : " << p1.size << " vs " << p2.size << "\n"
<< "angle : " << p1.angle << " vs " << p2.angle << "\n"
<< "response : " << p1.response << " vs " << p2.response << "\n"
<< "octave : " << p1.octave << " vs " << p2.octave << "\n"
<< "class_id : " << p1.class_id << " vs " << p2.class_id;
}
}
return ::testing::AssertionSuccess();
}
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
int validCount = 0;
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
{
int validCount = 0;
for (size_t i = 0; i < matches.size(); ++i)
{
const cv::DMatch& m = matches[i];
const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
if (keyPointsEquals(p1, p2))
++validCount;
}
return validCount;
}
void printCudaInfo()
{
perf::printCudaInfo();
}
}
void cv::gpu::PrintTo(const DeviceInfo& info, std::ostream* os)
{
(*os) << info.name();
}

View File

@ -16,7 +16,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
ocv_include_directories("${OpenCV_SOURCE_DIR}/include")#for opencv.hpp
ocv_include_modules(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
if (HAVE_opencv_gpu)
if(HAVE_opencv_gpu)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include")
endif()
@ -41,7 +41,7 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
add_executable(${the_target} ${srcs})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
if (HAVE_opencv_gpu)
if(HAVE_opencv_gpu)
target_link_libraries(${the_target} opencv_gpu)
endif()

View File

@ -355,7 +355,7 @@ int main(int argc, char* argv[])
Ptr<FeaturesFinder> finder;
if (features_type == "surf")
{
#ifdef HAVE_OPENCV_GPU
#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
finder = new SurfFeaturesFinderGpu();
else

View File

@ -1,7 +1,7 @@
SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui
opencv_ml opencv_video opencv_objdetect opencv_features2d
opencv_calib3d opencv_legacy opencv_contrib opencv_gpu
opencv_nonfree opencv_superres)
opencv_superres)
ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
@ -17,6 +17,10 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
"${OpenCV_SOURCE_DIR}/modules/gpu/src/nvidia/core"
)
if(HAVE_opencv_nonfree)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/nonfree/include")
endif()
if(HAVE_CUDA)
ocv_include_directories(${CUDA_INCLUDE_DIRS})
endif()
@ -33,6 +37,9 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
add_executable(${the_target} ${srcs})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
if(HAVE_opencv_nonfree)
target_link_libraries(${the_target} opencv_nonfree)
endif()
set_target_properties(${the_target} PROPERTIES
OUTPUT_NAME "${project}-example-${name}"

View File

@ -1,10 +1,15 @@
#include <iostream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/highgui/highgui.hpp"
#ifdef HAVE_OPENCV_NONFREE
#include "opencv2/nonfree/gpu.hpp"
#endif
using namespace std;
using namespace cv;
using namespace cv::gpu;
@ -14,7 +19,9 @@ enum Method
FGD_STAT,
MOG,
MOG2,
#ifdef HAVE_OPENCV_NONFREE
VIBE,
#endif
GMG
};
@ -38,13 +45,25 @@ int main(int argc, const char** argv)
string file = cmd.get<string>("file");
string method = cmd.get<string>("method");
if (method != "fgd" && method != "mog" && method != "mog2" && method != "vibe" && method != "gmg")
if (method != "fgd"
&& method != "mog"
&& method != "mog2"
#ifdef HAVE_OPENCV_NONFREE
&& method != "vibe"
#endif
&& method != "gmg")
{
cerr << "Incorrect method" << endl;
return -1;
}
Method m = method == "fgd" ? FGD_STAT : method == "mog" ? MOG : method == "mog2" ? MOG2 : method == "vibe" ? VIBE : GMG;
Method m = method == "fgd" ? FGD_STAT :
method == "mog" ? MOG :
method == "mog2" ? MOG2 :
#ifdef HAVE_OPENCV_NONFREE
method == "vibe" ? VIBE :
#endif
GMG;
VideoCapture cap;
@ -67,7 +86,9 @@ int main(int argc, const char** argv)
FGDStatModel fgd_stat;
MOG_GPU mog;
MOG2_GPU mog2;
#ifdef HAVE_OPENCV_NONFREE
VIBE_GPU vibe;
#endif
GMG_GPU gmg;
gmg.numInitializationFrames = 40;
@ -93,9 +114,11 @@ int main(int argc, const char** argv)
mog2(d_frame, d_fgmask);
break;
#ifdef HAVE_OPENCV_NONFREE
case VIBE:
vibe.initialize(d_frame);
break;
#endif
case GMG:
gmg.initialize(d_frame.size());
@ -105,8 +128,14 @@ int main(int argc, const char** argv)
namedWindow("image", WINDOW_NORMAL);
namedWindow("foreground mask", WINDOW_NORMAL);
namedWindow("foreground image", WINDOW_NORMAL);
if (m != VIBE && m != GMG)
if (m != GMG
#ifdef HAVE_OPENCV_NONFREE
&& m != VIBE
#endif
)
{
namedWindow("mean background image", WINDOW_NORMAL);
}
for(;;)
{
@ -136,9 +165,11 @@ int main(int argc, const char** argv)
mog2.getBackgroundImage(d_bgimg);
break;
#ifdef HAVE_OPENCV_NONFREE
case VIBE:
vibe(d_frame, d_fgmask);
break;
#endif
case GMG:
gmg(d_frame, d_fgmask);

View File

@ -73,9 +73,6 @@ GpuMat d_right[2];
StereoBM_GPU* bm[2];
GpuMat d_result[2];
// CPU result
Mat result;
static void printHelp()
{
std::cout << "Usage: driver_api_stereo_multi_gpu --left <left_image> --right <right_image>\n";

View File

@ -3,9 +3,17 @@ set(the_target "example_gpu_performance")
file(GLOB sources "performance/*.cpp")
file(GLOB headers "performance/*.h")
if(HAVE_opencv_nonfree)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/nonfree/include")
endif()
add_executable(${the_target} ${sources} ${headers})
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})
if(HAVE_opencv_nonfree)
target_link_libraries(${the_target} opencv_nonfree)
endif()
set_target_properties(${the_target} PROPERTIES
OUTPUT_NAME "performance_gpu"
PROJECT_LABEL "(EXAMPLE_GPU) performance")

View File

@ -4,10 +4,15 @@
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "performance.h"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
#include "opencv2/nonfree/gpu.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#endif
using namespace std;
using namespace cv;
@ -266,6 +271,7 @@ TEST(meanShift)
}
}
#ifdef HAVE_OPENCV_NONFREE
TEST(SURF)
{
@ -294,6 +300,8 @@ TEST(SURF)
GPU_OFF;
}
#endif
TEST(FAST)
{

View File

@ -44,9 +44,6 @@ GpuMat d_right[2];
StereoBM_GPU* bm[2];
GpuMat d_result[2];
// CPU result
Mat result;
static void printHelp()
{
std::cout << "Usage: stereo_multi_gpu --left <image> --right <image>\n";

View File

@ -1,9 +1,14 @@
#include <iostream>
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/nonfree/gpu.hpp"
using namespace std;
using namespace cv;
@ -81,3 +86,13 @@ int main(int argc, char* argv[])
return 0;
}
#else
int main()
{
std::cerr << "OpenCV was built without nonfree module" << std::endl;
return 0;
}
#endif