move obsolete algorithms from cudabgsegm to cudalegacy:

* GMG
* FGD
This commit is contained in:
Vladislav Vinogradov 2014-12-30 17:35:25 +03:00
parent 17b1152fa3
commit dae188d14f
16 changed files with 499 additions and 319 deletions

View File

@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Background Segmentation")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow)
ocv_define_module(cudabgsegm opencv_video OPTIONAL opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc)
ocv_define_module(cudabgsegm opencv_video)

View File

@ -147,115 +147,6 @@ CV_EXPORTS Ptr<cuda::BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16,
bool detectShadows = true);
////////////////////////////////////////////////////
// GMG
/** @brief Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite Gold2012 .
*/
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual int getMaxFeatures() const = 0;
virtual void setMaxFeatures(int maxFeatures) = 0;
virtual double getDefaultLearningRate() const = 0;
virtual void setDefaultLearningRate(double lr) = 0;
virtual int getNumFrames() const = 0;
virtual void setNumFrames(int nframes) = 0;
virtual int getQuantizationLevels() const = 0;
virtual void setQuantizationLevels(int nlevels) = 0;
virtual double getBackgroundPrior() const = 0;
virtual void setBackgroundPrior(double bgprior) = 0;
virtual int getSmoothingRadius() const = 0;
virtual void setSmoothingRadius(int radius) = 0;
virtual double getDecisionThreshold() const = 0;
virtual void setDecisionThreshold(double thresh) = 0;
virtual bool getUpdateBackgroundModel() const = 0;
virtual void setUpdateBackgroundModel(bool update) = 0;
virtual double getMinVal() const = 0;
virtual void setMinVal(double val) = 0;
virtual double getMaxVal() const = 0;
virtual void setMaxVal(double val) = 0;
};
/** @brief Creates GMG Background Subtractor
@param initializationFrames Number of frames of video to use to initialize histograms.
@param decisionThreshold Value above which pixel is determined to be FG.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
////////////////////////////////////////////////////
// FGD
/** @brief The class discriminates between foreground and background pixels by building and maintaining a model
of the background.
Any pixel which does not fit this model is then deemed to be foreground. The class implements
algorithm described in @cite FGD2003 .
@sa BackgroundSubtractor
*/
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
struct CV_EXPORTS FGDParams
{
int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
//!< Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
//!< Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
//!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
//! default Params
FGDParams();
};
/** @brief Creates FGD Background Subtractor
@param params Algorithm's parameters. See @cite FGD2003 for explanation.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
//! @}
}} // namespace cv { namespace cuda {

View File

@ -42,10 +42,6 @@
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
using namespace std;
using namespace testing;
using namespace perf;
@ -63,83 +59,6 @@ using namespace perf;
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
const int numIters = 10;
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame), foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorFGD> d_fgd = cv::cuda::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_fgd->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
d_fgd->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
#ifdef HAVE_OPENCV_CUDAIMGPROC
cv::cuda::GpuMat background3, background;
d_fgd->getBackgroundImage(background3);
cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
#endif
}
else
{
FAIL_NO_CPU();
}
}
#endif
//////////////////////////////////////////////////////
// MOG
@ -484,118 +403,3 @@ PERF_TEST_P(Video_Cn, MOG2GetBackgroundImage,
}
#endif
//////////////////////////////////////////////////////
// GMG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int);
PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
Combine(Values(string("gpu/video/768x576.avi")),
CUDA_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const int numIters = 150;
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame);
cv::cuda::GpuMat foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
d_gmg->setMaxFeatures(maxFeatures);
d_gmg->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_gmg->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
d_gmg->apply(d_frame, foreground);
}
CUDA_SANITY_CHECK(foreground);
}
else
{
FAIL_NO_CPU();
}
}
#endif

View File

@ -51,16 +51,4 @@
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFILTERS
# include "opencv2/cudafilters.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
#endif /* __OPENCV_PRECOMP_H__ */

View File

@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Computer Vision (legacy)")
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4130 /wd4324 /wd4512 /wd4310 -Wundef -Wmissing-declarations -Wuninitialized -Wshadow)
ocv_define_module(cudalegacy opencv_core OPTIONAL opencv_objdetect)
ocv_define_module(cudalegacy opencv_core opencv_video OPTIONAL opencv_objdetect opencv_imgproc opencv_cudaarithm opencv_cudafilters opencv_cudaimgproc)

View File

@ -49,6 +49,7 @@
#include "opencv2/cudalegacy/NCVPyramid.hpp"
#include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
#include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
#include "opencv2/video/background_segm.hpp"
/**
@addtogroup cuda
@ -59,6 +60,9 @@
namespace cv { namespace cuda {
//! @addtogroup cudalegacy
//! @{
class CV_EXPORTS ImagePyramid : public Algorithm
{
public:
@ -67,6 +71,117 @@ public:
CV_EXPORTS Ptr<ImagePyramid> createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null());
////////////////////////////////////////////////////
// GMG
/** @brief Background/Foreground Segmentation Algorithm.
The class discriminates between foreground and background pixels by building and maintaining a model
of the background. Any pixel which does not fit this model is then deemed to be foreground. The
class implements algorithm described in @cite Gold2012 .
*/
class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
{
public:
using cv::BackgroundSubtractor::apply;
virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
virtual int getMaxFeatures() const = 0;
virtual void setMaxFeatures(int maxFeatures) = 0;
virtual double getDefaultLearningRate() const = 0;
virtual void setDefaultLearningRate(double lr) = 0;
virtual int getNumFrames() const = 0;
virtual void setNumFrames(int nframes) = 0;
virtual int getQuantizationLevels() const = 0;
virtual void setQuantizationLevels(int nlevels) = 0;
virtual double getBackgroundPrior() const = 0;
virtual void setBackgroundPrior(double bgprior) = 0;
virtual int getSmoothingRadius() const = 0;
virtual void setSmoothingRadius(int radius) = 0;
virtual double getDecisionThreshold() const = 0;
virtual void setDecisionThreshold(double thresh) = 0;
virtual bool getUpdateBackgroundModel() const = 0;
virtual void setUpdateBackgroundModel(bool update) = 0;
virtual double getMinVal() const = 0;
virtual void setMinVal(double val) = 0;
virtual double getMaxVal() const = 0;
virtual void setMaxVal(double val) = 0;
};
/** @brief Creates GMG Background Subtractor
@param initializationFrames Number of frames of video to use to initialize histograms.
@param decisionThreshold Value above which pixel is determined to be FG.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
////////////////////////////////////////////////////
// FGD
/** @brief The class discriminates between foreground and background pixels by building and maintaining a model
of the background.
Any pixel which does not fit this model is then deemed to be foreground. The class implements
algorithm described in @cite FGD2003 .
@sa BackgroundSubtractor
*/
class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
{
public:
/** @brief Returns the output foreground regions calculated by findContours.
@param foreground_regions Output array (CPU memory).
*/
virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
};
struct CV_EXPORTS FGDParams
{
int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
//!< Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
//!< Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
//!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
//! default Params
FGDParams();
};
/** @brief Creates FGD Background Subtractor
@param params Algorithm's parameters. See @cite FGD2003 for explanation.
*/
CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
//! @}
}}
#endif /* __OPENCV_CUDALEGACY_HPP__ */

View File

@ -0,0 +1,256 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
using namespace std;
using namespace testing;
using namespace perf;
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(WIN32) /* assume that we have ffmpeg */
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
const int numIters = 10;
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame), foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorFGD> d_fgd = cv::cuda::createBackgroundSubtractorFGD();
d_fgd->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_fgd->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
d_fgd->apply(d_frame, foreground);
}
#ifdef HAVE_OPENCV_CUDAIMGPROC
cv::cuda::GpuMat background3, background;
d_fgd->getBackgroundImage(background3);
cv::cuda::cvtColor(background3, background, cv::COLOR_BGR2BGRA);
CUDA_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
#endif
}
else
{
FAIL_NO_CPU();
}
SANITY_CHECK_NOTHING();
}
#endif
//////////////////////////////////////////////////////
// GMG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int);
PERF_TEST_P(Video_Cn_MaxFeatures, GMG,
Combine(Values(string("gpu/video/768x576.avi")),
CUDA_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const int numIters = 150;
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat d_frame(frame);
cv::cuda::GpuMat foreground;
cv::Ptr<cv::cuda::BackgroundSubtractorGMG> d_gmg = cv::cuda::createBackgroundSubtractorGMG();
d_gmg->setMaxFeatures(maxFeatures);
d_gmg->apply(d_frame, foreground);
int i = 0;
// collect performance data
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer();
if(!next())
break;
d_gmg->apply(d_frame, foreground);
stopTimer();
}
// process last frame in sequence to get data for sanity test
for (; i < numIters; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
d_gmg->apply(d_frame, foreground);
}
}
else
{
FAIL_NO_CPU();
}
SANITY_CHECK_NOTHING();
}
#endif

View File

@ -0,0 +1,47 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace perf;
CV_PERF_TEST_CUDA_MAIN(cudalegacy)

View File

@ -0,0 +1,66 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/ts/cuda_perf.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#endif

View File

@ -56,6 +56,18 @@
# include "opencv2/objdetect.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFILTERS
# include "opencv2/cudafilters.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/cudalegacy/private.hpp"

View File

@ -4,6 +4,7 @@
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/cudabgsegm.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"