split gpuvideo onto gpuoptflow and gpubgsegm

This commit is contained in:
Vladislav Vinogradov 2013-04-17 18:35:09 +04:00
parent eda124ec32
commit ac0f506d0e
53 changed files with 1613 additions and 900 deletions

View File

@ -7,7 +7,7 @@ set(the_description "GPU-accelerated Computer Vision")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
ocv_define_module(gpu opencv_gpuarithm opencv_gpufilters opencv_gpuwarping opencv_gpuimgproc
opencv_gpufeatures2d opencv_gpuvideo opencv_gpustereo)
opencv_gpufeatures2d opencv_gpuoptflow opencv_gpubgsegm opencv_gpustereo)
if(HAVE_CUDA)
add_subdirectory(perf4au)

View File

@ -49,7 +49,8 @@
#include "opencv2/gpuwarping.hpp"
#include "opencv2/gpuimgproc.hpp"
#include "opencv2/gpufeatures2d.hpp"
#include "opencv2/gpuvideo.hpp"
#include "opencv2/gpuoptflow.hpp"
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/gpustereo.hpp"
namespace cv { namespace gpu {

View File

@ -0,0 +1,9 @@
if(ANDROID OR IOS)
ocv_module_disable(gpubgsegm)
endif()
set(the_description "GPU-accelerated Background Segmentation")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations)
ocv_define_module(gpubgsegm opencv_video opencv_legacy opencv_gpufilters opencv_gpuimgproc)

View File

@ -0,0 +1,8 @@
********************************************
gpu. GPU-accelerated Background Segmentation
********************************************
.. toctree::
:maxdepth: 1
background_segmentation

View File

@ -40,8 +40,8 @@
//
//M*/
#ifndef __OPENCV_GPUVIDEO_HPP__
#define __OPENCV_GPUVIDEO_HPP__
#ifndef __OPENCV_GPUBGSEGM_HPP__
#define __OPENCV_GPUBGSEGM_HPP__
#include <memory>
@ -50,266 +50,6 @@
namespace cv { namespace gpu {
////////////////////////////////// Optical Flow //////////////////////////////////////////
class CV_EXPORTS BroxOpticalFlow
{
public:
BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) :
alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_),
inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_)
{
}
//! Compute optical flow
//! frame0 - source frame (supports only CV_32FC1 type)
//! frame1 - frame to track (with the same size and type as frame0)
//! u - flow horizontal component (along x axis)
//! v - flow vertical component (along y axis)
void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null());
//! flow smoothness
float alpha;
//! gradient constancy importance
float gamma;
//! pyramid scale factor
float scale_factor;
//! number of lagged non-linearity iterations (inner loop)
int inner_iterations;
//! number of warping iterations (number of pyramid levels)
int outer_iterations;
//! number of linear system solver iterations
int solver_iterations;
GpuMat buf;
};
class CV_EXPORTS PyrLKOpticalFlow
{
public:
PyrLKOpticalFlow();
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
void releaseMemory();
Size winSize;
int maxLevel;
int iters;
bool useInitialFlow;
private:
std::vector<GpuMat> prevPyr_;
std::vector<GpuMat> nextPyr_;
GpuMat buf_;
GpuMat uPyr_[2];
GpuMat vPyr_[2];
};
class CV_EXPORTS FarnebackOpticalFlow
{
public:
FarnebackOpticalFlow()
{
numLevels = 5;
pyrScale = 0.5;
fastPyramids = false;
winSize = 13;
numIters = 10;
polyN = 5;
polySigma = 1.1;
flags = 0;
}
int numLevels;
double pyrScale;
bool fastPyramids;
int winSize;
int numIters;
int polyN;
double polySigma;
int flags;
void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
void releaseMemory()
{
frames_[0].release();
frames_[1].release();
pyrLevel_[0].release();
pyrLevel_[1].release();
M_.release();
bufM_.release();
R_[0].release();
R_[1].release();
blurredFrame_[0].release();
blurredFrame_[1].release();
pyramid0_.clear();
pyramid1_.clear();
}
private:
void prepareGaussian(
int n, double sigma, float *g, float *xg, float *xxg,
double &ig11, double &ig03, double &ig33, double &ig55);
void setPolynomialExpansionConsts(int n, double sigma);
void updateFlow_boxFilter(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
void updateFlow_gaussianBlur(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
GpuMat frames_[2];
GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
std::vector<GpuMat> pyramid0_, pyramid1_;
};
// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
//
// see reference:
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
class CV_EXPORTS OpticalFlowDual_TVL1_GPU
{
public:
OpticalFlowDual_TVL1_GPU();
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy);
void collectGarbage();
/**
* Time step of the numerical scheme.
*/
double tau;
/**
* Weight parameter for the data term, attachment parameter.
* This is the most relevant parameter, which determines the smoothness of the output.
* The smaller this parameter is, the smoother the solutions we obtain.
* It depends on the range of motions of the images, so its value should be adapted to each image sequence.
*/
double lambda;
/**
* Weight parameter for (u - v)^2, tightness parameter.
* It serves as a link between the attachment and the regularization terms.
* In theory, it should have a small value in order to maintain both parts in correspondence.
* The method is stable for a large range of values of this parameter.
*/
double theta;
/**
* Number of scales used to create the pyramid of images.
*/
int nscales;
/**
* Number of warpings per scale.
* Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale.
* This is a parameter that assures the stability of the method.
* It also affects the running time, so it is a compromise between speed and accuracy.
*/
int warps;
/**
* Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
* A small value will yield more accurate solutions at the expense of a slower convergence.
*/
double epsilon;
/**
* Stopping criterion iterations number used in the numerical scheme.
*/
int iterations;
double scaleStep;
bool useInitialFlow;
private:
void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2);
std::vector<GpuMat> I0s;
std::vector<GpuMat> I1s;
std::vector<GpuMat> u1s;
std::vector<GpuMat> u2s;
GpuMat I1x_buf;
GpuMat I1y_buf;
GpuMat I1w_buf;
GpuMat I1wx_buf;
GpuMat I1wy_buf;
GpuMat grad_buf;
GpuMat rho_c_buf;
GpuMat p11_buf;
GpuMat p12_buf;
GpuMat p21_buf;
GpuMat p22_buf;
GpuMat diff_buf;
GpuMat norm_buf;
};
//! Calculates optical flow for 2 images using block matching algorithm */
CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
Size block_size, Size shift_size, Size max_range, bool use_previous,
GpuMat& velx, GpuMat& vely, GpuMat& buf,
Stream& stream = Stream::Null());
class CV_EXPORTS FastOpticalFlowBM
{
public:
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
private:
GpuMat buffer;
GpuMat extended_I0;
GpuMat extended_I1;
};
//! Interpolate frames (images) using provided optical flow (displacement field).
//! frame0 - frame 0 (32-bit floating point images, single channel)
//! frame1 - frame 1 (the same type and size)
//! fu - forward horizontal displacement
//! fv - forward vertical displacement
//! bu - backward horizontal displacement
//! bv - backward vertical displacement
//! pos - new frame position
//! newFrame - new frame
//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat;
//! occlusion masks 0, occlusion masks 1,
//! interpolated forward flow 0, interpolated forward flow 1,
//! interpolated backward flow 0, interpolated backward flow 1
//!
CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
const GpuMat& fu, const GpuMat& fv,
const GpuMat& bu, const GpuMat& bv,
float pos, GpuMat& newFrame, GpuMat& buf,
Stream& stream = Stream::Null());
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
//////////////////////// Background/foreground segmentation ////////////////////////
// Foreground Object Detection from Videos Containing Complex Background.
// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
// ACM MM2003 9p
@ -583,4 +323,4 @@ private:
}} // namespace cv { namespace gpu {
#endif /* __OPENCV_GPUVIDEO_HPP__ */
#endif /* __OPENCV_GPUBGSEGM_HPP__ */

View File

@ -0,0 +1,536 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(WIN32) /* assume that we have ffmpeg */
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
{
cvReleaseBGStatModel(&obj);
}
}
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, Video_FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(4);
d_model.create(d_frame);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer(); next();
d_model.update(d_frame);
stopTimer();
}
const cv::gpu::GpuMat background = d_model.background;
const cv::gpu::GpuMat foreground = d_model.foreground;
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
}
else
{
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
startTimer(); next();
cvUpdateBGStatModel(&ipl_frame, model);
stopTimer();
}
const cv::Mat background = cv::cvarrToMat(model->background);
const cv::Mat foreground = cv::cvarrToMat(model->foreground);
CPU_SANITY_CHECK(background);
CPU_SANITY_CHECK(foreground);
}
}
#endif
//////////////////////////////////////////////////////
// MOG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_LearningRate, string, MatCn, double);
PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4,
Values(0.0, 0.01)))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const float learningRate = static_cast<float>(GET_PARAM(2));
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::MOG_GPU d_mog;
cv::gpu::GpuMat foreground;
d_mog(d_frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
d_mog(d_frame, foreground, learningRate);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground;
mog->apply(frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer(); next();
mog->apply(frame, foreground, learningRate);
stopTimer();
}
CPU_SANITY_CHECK(foreground);
}
}
#endif
//////////////////////////////////////////////////////
// MOG2
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn, string, int);
PERF_TEST_P(Video_Cn, Video_MOG2,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::MOG2_GPU d_mog2;
d_mog2.bShadowDetection = false;
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat foreground;
d_mog2(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
d_mog2(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
mog2->set("detectShadows", false);
cv::Mat foreground;
mog2->apply(frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer(); next();
mog2->apply(frame, foreground);
stopTimer();
}
CPU_SANITY_CHECK(foreground);
}
}
#endif
//////////////////////////////////////////////////////
// MOG2GetBackgroundImage
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame;
cv::gpu::MOG2_GPU d_mog2;
cv::gpu::GpuMat d_foreground;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
d_mog2(d_frame, d_foreground);
}
cv::gpu::GpuMat background;
TEST_CYCLE() d_mog2.getBackgroundImage(background);
GPU_SANITY_CHECK(background, 1);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
mog2->apply(frame, foreground);
}
cv::Mat background;
TEST_CYCLE() mog2->getBackgroundImage(background);
CPU_SANITY_CHECK(background);
}
}
#endif
//////////////////////////////////////////////////////
// GMG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int);
PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
Combine(Values(string("gpu/video/768x576.avi")),
GPU_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat foreground;
cv::gpu::GMG_GPU d_gmg;
d_gmg.maxFeatures = maxFeatures;
d_gmg(d_frame, foreground);
for (int i = 0; i < 150; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
d_gmg(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
gmg->set("maxFeatures", maxFeatures);
//gmg.initialize(frame.size(), 0.0, 255.0);
gmg->apply(frame, foreground);
for (int i = 0; i < 150; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer(); next();
gmg->apply(frame, foreground);
stopTimer();
}
CPU_SANITY_CHECK(foreground);
}
}
#endif

View File

@ -54,8 +54,7 @@
#include "opencv2/ts.hpp"
#include "opencv2/ts/gpu_perf.hpp"
#include "opencv2/gpuvideo.hpp"
#include "opencv2/gpuimgproc.hpp"
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/video.hpp"
#include "opencv2/legacy.hpp"

View File

@ -45,11 +45,10 @@
#include <limits>
#include "opencv2/gpuvideo.hpp"
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/gpuarithm.hpp"
#include "opencv2/gpufilters.hpp"
#include "opencv2/gpuwarping.hpp"
#include "opencv2/gpuimgproc.hpp"
#include "opencv2/video.hpp"

View File

@ -0,0 +1,64 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <fstream>
#include "opencv2/ts.hpp"
#include "opencv2/ts/gpu_test.hpp"
#include "opencv2/gpubgsegm.hpp"
#include "opencv2/video.hpp"
#include "opencv2/legacy.hpp"
#endif

View File

@ -556,3 +556,34 @@ Downloads results from :ocv:func:`gpu::HoughCircles` to host memory.
:param h_circles: Output host array.
.. seealso:: :ocv:func:`gpu::HoughCircles`
gpu::GoodFeaturesToTrackDetector_GPU
------------------------------------
.. ocv:class:: gpu::GoodFeaturesToTrackDetector_GPU
Class used for strong corners detection on an image. ::
class GoodFeaturesToTrackDetector_GPU
{
public:
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners_ = 1000, double qualityLevel_ = 0.01, double minDistance_ = 0.0,
int blockSize_ = 3, bool useHarrisDetector_ = false, double harrisK_ = 0.04);
void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat());
int maxCorners;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double harrisK;
void releaseMemory();
};
The class finds the most prominent corners in the image.
.. seealso:: :ocv:func:`goodFeaturesToTrack`

View File

@ -1092,3 +1092,42 @@ PERF_TEST_P(Sz_Depth_Cn_KernelSz, BilateralFilter,
CPU_SANITY_CHECK(dst);
}
}
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
DEF_PARAM_TEST(Image_MinDistance, string, double);
PERF_TEST_P(Image_MinDistance, GoodFeaturesToTrack,
Combine(Values<string>("gpu/perf/aloe.png"),
Values(0.0, 3.0)))
{
const string fileName = GET_PARAM(0);
const double minDistance = GET_PARAM(1);
const cv::Mat image = readImage(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
const int maxCorners = 8000;
const double qualityLevel = 0.01;
if (PERF_RUN_GPU())
{
cv::gpu::GoodFeaturesToTrackDetector_GPU d_detector(maxCorners, qualityLevel, minDistance);
const cv::gpu::GpuMat d_image(image);
cv::gpu::GpuMat pts;
TEST_CYCLE() d_detector(d_image, pts);
GPU_SANITY_CHECK(pts);
}
else
{
cv::Mat pts;
TEST_CYCLE() cv::goodFeaturesToTrack(image, pts, maxCorners, qualityLevel, minDistance);
CPU_SANITY_CHECK(pts);
}
}

View File

@ -0,0 +1,9 @@
if(ANDROID OR IOS)
ocv_module_disable(gpuoptflow)
endif()
set(the_description "GPU-accelerated Optical Flow")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations)
ocv_define_module(gpuoptflow opencv_video opencv_legacy opencv_gpuarithm opencv_gpuwarping opencv_gpuimgproc OPTIONAL opencv_gpulegacy)

View File

@ -0,0 +1,8 @@
****************************************
gpuoptflow. GPU-accelerated Optical Flow
****************************************
.. toctree::
:maxdepth: 1
optflow

View File

@ -0,0 +1,238 @@
Video Analysis
==============
.. highlight:: cpp
gpu::BroxOpticalFlow
--------------------
.. ocv:class:: gpu::BroxOpticalFlow
Class computing the optical flow for two images using Brox et al Optical Flow algorithm ([Brox2004]_). ::
class BroxOpticalFlow
{
public:
BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_);
//! Compute optical flow
//! frame0 - source frame (supports only CV_32FC1 type)
//! frame1 - frame to track (with the same size and type as frame0)
//! u - flow horizontal component (along x axis)
//! v - flow vertical component (along y axis)
void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null());
//! flow smoothness
float alpha;
//! gradient constancy importance
float gamma;
//! pyramid scale factor
float scale_factor;
//! number of lagged non-linearity iterations (inner loop)
int inner_iterations;
//! number of warping iterations (number of pyramid levels)
int outer_iterations;
//! number of linear system solver iterations
int solver_iterations;
GpuMat buf;
};
gpu::GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU
---------------------------------------------------------------------
Constructor.
.. ocv:function:: gpu::GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0, int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04)
:param maxCorners: Maximum number of corners to return. If there are more corners than are found, the strongest of them is returned.
:param qualityLevel: Parameter characterizing the minimal accepted quality of image corners. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue (see :ocv:func:`gpu::cornerMinEigenVal` ) or the Harris function response (see :ocv:func:`gpu::cornerHarris` ). The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the ``qualityLevel=0.01`` , then all the corners with the quality measure less than 15 are rejected.
:param minDistance: Minimum possible Euclidean distance between the returned corners.
:param blockSize: Size of an average block for computing a derivative covariation matrix over each pixel neighborhood. See :ocv:func:`cornerEigenValsAndVecs` .
:param useHarrisDetector: Parameter indicating whether to use a Harris detector (see :ocv:func:`gpu::cornerHarris`) or :ocv:func:`gpu::cornerMinEigenVal`.
:param harrisK: Free parameter of the Harris detector.
gpu::FarnebackOpticalFlow
-------------------------
.. ocv:class:: gpu::FarnebackOpticalFlow
Class computing a dense optical flow using the Gunnar Farnebacks algorithm. ::
class CV_EXPORTS FarnebackOpticalFlow
{
public:
FarnebackOpticalFlow()
{
numLevels = 5;
pyrScale = 0.5;
fastPyramids = false;
winSize = 13;
numIters = 10;
polyN = 5;
polySigma = 1.1;
flags = 0;
}
int numLevels;
double pyrScale;
bool fastPyramids;
int winSize;
int numIters;
int polyN;
double polySigma;
int flags;
void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
void releaseMemory();
private:
/* hidden */
};
gpu::FarnebackOpticalFlow::operator ()
--------------------------------------
Computes a dense optical flow using the Gunnar Farnebacks algorithm.
.. ocv:function:: void gpu::FarnebackOpticalFlow::operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null())
:param frame0: First 8-bit gray-scale input image
:param frame1: Second 8-bit gray-scale input image
:param flowx: Flow horizontal component
:param flowy: Flow vertical component
:param s: Stream
.. seealso:: :ocv:func:`calcOpticalFlowFarneback`
gpu::FarnebackOpticalFlow::releaseMemory
----------------------------------------
Releases unused auxiliary memory buffers.
.. ocv:function:: void gpu::FarnebackOpticalFlow::releaseMemory()
gpu::PyrLKOpticalFlow
---------------------
.. ocv:class:: gpu::PyrLKOpticalFlow
Class used for calculating an optical flow. ::
class PyrLKOpticalFlow
{
public:
PyrLKOpticalFlow();
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
Size winSize;
int maxLevel;
int iters;
bool useInitialFlow;
void releaseMemory();
};
The class can calculate an optical flow for a sparse feature set or dense optical flow using the iterative Lucas-Kanade method with pyramids.
.. seealso:: :ocv:func:`calcOpticalFlowPyrLK`
gpu::PyrLKOpticalFlow::sparse
-----------------------------
Calculate an optical flow for a sparse feature set.
.. ocv:function:: void gpu::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err = 0)
:param prevImg: First 8-bit input image (supports both grayscale and color images).
:param nextImg: Second input image of the same size and the same type as ``prevImg`` .
:param prevPts: Vector of 2D points for which the flow needs to be found. It must be one row matrix with CV_32FC2 type.
:param nextPts: Output vector of 2D points (with single-precision floating-point coordinates) containing the calculated new positions of input features in the second image. When ``useInitialFlow`` is true, the vector must have the same size as in the input.
:param status: Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.
:param err: Output vector (CV_32FC1 type) that contains the difference between patches around the original and moved points or min eigen value if ``getMinEigenVals`` is checked. It can be NULL, if not needed.
.. seealso:: :ocv:func:`calcOpticalFlowPyrLK`
gpu::PyrLKOpticalFlow::dense
-----------------------------
Calculate dense optical flow.
.. ocv:function:: void gpu::PyrLKOpticalFlow::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0)
:param prevImg: First 8-bit grayscale input image.
:param nextImg: Second input image of the same size and the same type as ``prevImg`` .
:param u: Horizontal component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param v: Vertical component of the optical flow of the same size as input images, 32-bit floating-point, single-channel
:param err: Output vector (CV_32FC1 type) that contains the difference between patches around the original and moved points or min eigen value if ``getMinEigenVals`` is checked. It can be NULL, if not needed.
gpu::PyrLKOpticalFlow::releaseMemory
------------------------------------
Releases inner buffers memory.
.. ocv:function:: void gpu::PyrLKOpticalFlow::releaseMemory()
gpu::interpolateFrames
----------------------
Interpolates frames (images) using provided optical flow (displacement field).
.. ocv:function:: void gpu::interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, const GpuMat& fu, const GpuMat& fv, const GpuMat& bu, const GpuMat& bv, float pos, GpuMat& newFrame, GpuMat& buf, Stream& stream = Stream::Null())
:param frame0: First frame (32-bit floating point images, single channel).
:param frame1: Second frame. Must have the same type and size as ``frame0`` .
:param fu: Forward horizontal displacement.
:param fv: Forward vertical displacement.
:param bu: Backward horizontal displacement.
:param bv: Backward vertical displacement.
:param pos: New frame position.
:param newFrame: Output image.
:param buf: Temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow, interpolated backward vertical flow.
:param stream: Stream for the asynchronous version.
.. [Brox2004] T. Brox, A. Bruhn, N. Papenberg, J. Weickert. *High accuracy optical flow estimation based on a theory for warping*. ECCV 2004.

View File

@ -0,0 +1,310 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPUOPTFLOW_HPP__
#define __OPENCV_GPUOPTFLOW_HPP__
#include "opencv2/core/gpumat.hpp"
namespace cv { namespace gpu {
////////////////////////////////// Optical Flow //////////////////////////////////////////
class CV_EXPORTS BroxOpticalFlow
{
public:
BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) :
alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_),
inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_)
{
}
//! Compute optical flow
//! frame0 - source frame (supports only CV_32FC1 type)
//! frame1 - frame to track (with the same size and type as frame0)
//! u - flow horizontal component (along x axis)
//! v - flow vertical component (along y axis)
void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null());
//! flow smoothness
float alpha;
//! gradient constancy importance
float gamma;
//! pyramid scale factor
float scale_factor;
//! number of lagged non-linearity iterations (inner loop)
int inner_iterations;
//! number of warping iterations (number of pyramid levels)
int outer_iterations;
//! number of linear system solver iterations
int solver_iterations;
GpuMat buf;
};
class CV_EXPORTS PyrLKOpticalFlow
{
public:
PyrLKOpticalFlow();
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
GpuMat& status, GpuMat* err = 0);
void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
void releaseMemory();
Size winSize;
int maxLevel;
int iters;
bool useInitialFlow;
private:
std::vector<GpuMat> prevPyr_;
std::vector<GpuMat> nextPyr_;
GpuMat buf_;
GpuMat uPyr_[2];
GpuMat vPyr_[2];
};
class CV_EXPORTS FarnebackOpticalFlow
{
public:
FarnebackOpticalFlow()
{
numLevels = 5;
pyrScale = 0.5;
fastPyramids = false;
winSize = 13;
numIters = 10;
polyN = 5;
polySigma = 1.1;
flags = 0;
}
int numLevels;
double pyrScale;
bool fastPyramids;
int winSize;
int numIters;
int polyN;
double polySigma;
int flags;
void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
void releaseMemory()
{
frames_[0].release();
frames_[1].release();
pyrLevel_[0].release();
pyrLevel_[1].release();
M_.release();
bufM_.release();
R_[0].release();
R_[1].release();
blurredFrame_[0].release();
blurredFrame_[1].release();
pyramid0_.clear();
pyramid1_.clear();
}
private:
void prepareGaussian(
int n, double sigma, float *g, float *xg, float *xxg,
double &ig11, double &ig03, double &ig33, double &ig55);
void setPolynomialExpansionConsts(int n, double sigma);
void updateFlow_boxFilter(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
void updateFlow_gaussianBlur(
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
GpuMat frames_[2];
GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
std::vector<GpuMat> pyramid0_, pyramid1_;
};
// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
//
// see reference:
// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
class CV_EXPORTS OpticalFlowDual_TVL1_GPU
{
public:
OpticalFlowDual_TVL1_GPU();
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy);
void collectGarbage();
/**
* Time step of the numerical scheme.
*/
double tau;
/**
* Weight parameter for the data term, attachment parameter.
* This is the most relevant parameter, which determines the smoothness of the output.
* The smaller this parameter is, the smoother the solutions we obtain.
* It depends on the range of motions of the images, so its value should be adapted to each image sequence.
*/
double lambda;
/**
* Weight parameter for (u - v)^2, tightness parameter.
* It serves as a link between the attachment and the regularization terms.
* In theory, it should have a small value in order to maintain both parts in correspondence.
* The method is stable for a large range of values of this parameter.
*/
double theta;
/**
* Number of scales used to create the pyramid of images.
*/
int nscales;
/**
* Number of warpings per scale.
* Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale.
* This is a parameter that assures the stability of the method.
* It also affects the running time, so it is a compromise between speed and accuracy.
*/
int warps;
/**
* Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
* A small value will yield more accurate solutions at the expense of a slower convergence.
*/
double epsilon;
/**
* Stopping criterion iterations number used in the numerical scheme.
*/
int iterations;
double scaleStep;
bool useInitialFlow;
private:
void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2);
std::vector<GpuMat> I0s;
std::vector<GpuMat> I1s;
std::vector<GpuMat> u1s;
std::vector<GpuMat> u2s;
GpuMat I1x_buf;
GpuMat I1y_buf;
GpuMat I1w_buf;
GpuMat I1wx_buf;
GpuMat I1wy_buf;
GpuMat grad_buf;
GpuMat rho_c_buf;
GpuMat p11_buf;
GpuMat p12_buf;
GpuMat p21_buf;
GpuMat p22_buf;
GpuMat diff_buf;
GpuMat norm_buf;
};
//! Calculates optical flow for 2 images using block matching algorithm */
CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
Size block_size, Size shift_size, Size max_range, bool use_previous,
GpuMat& velx, GpuMat& vely, GpuMat& buf,
Stream& stream = Stream::Null());
class CV_EXPORTS FastOpticalFlowBM
{
public:
void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
private:
GpuMat buffer;
GpuMat extended_I0;
GpuMat extended_I1;
};
//! Interpolate frames (images) using provided optical flow (displacement field).
//! frame0 - frame 0 (32-bit floating point images, single channel)
//! frame1 - frame 1 (the same type and size)
//! fu - forward horizontal displacement
//! fv - forward vertical displacement
//! bu - backward horizontal displacement
//! bv - backward vertical displacement
//! pos - new frame position
//! newFrame - new frame
//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat;
//! occlusion masks 0, occlusion masks 1,
//! interpolated forward flow 0, interpolated forward flow 1,
//! interpolated backward flow 0, interpolated backward flow 1
//!
CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
const GpuMat& fu, const GpuMat& fv,
const GpuMat& bu, const GpuMat& bv,
float pos, GpuMat& newFrame, GpuMat& buf,
Stream& stream = Stream::Null());
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
}} // namespace cv { namespace gpu {
#endif /* __OPENCV_GPUOPTFLOW_HPP__ */

View File

@ -0,0 +1,47 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace perf;
CV_PERF_TEST_MAIN(gpuoptflow, printCudaInfo())

View File

@ -47,26 +47,6 @@ using namespace std;
using namespace testing;
using namespace perf;
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(WIN32) /* assume that we have ffmpeg */
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
{
cvReleaseBGStatModel(&obj);
}
}
//////////////////////////////////////////////////////
// InterpolateFrames
@ -152,45 +132,6 @@ PERF_TEST_P(ImagePair, Video_CreateOpticalFlowNeedleMap,
}
}
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
DEF_PARAM_TEST(Image_MinDistance, string, double);
PERF_TEST_P(Image_MinDistance, Video_GoodFeaturesToTrack,
Combine(Values<string>("gpu/perf/aloe.png"),
Values(0.0, 3.0)))
{
const string fileName = GET_PARAM(0);
const double minDistance = GET_PARAM(1);
const cv::Mat image = readImage(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
const int maxCorners = 8000;
const double qualityLevel = 0.01;
if (PERF_RUN_GPU())
{
cv::gpu::GoodFeaturesToTrackDetector_GPU d_detector(maxCorners, qualityLevel, minDistance);
const cv::gpu::GpuMat d_image(image);
cv::gpu::GpuMat pts;
TEST_CYCLE() d_detector(d_image, pts);
GPU_SANITY_CHECK(pts);
}
else
{
cv::Mat pts;
TEST_CYCLE() cv::goodFeaturesToTrack(image, pts, maxCorners, qualityLevel, minDistance);
CPU_SANITY_CHECK(pts);
}
}
//////////////////////////////////////////////////////
// BroxOpticalFlow
@ -536,472 +477,3 @@ PERF_TEST_P(ImagePair, Video_FastOpticalFlowBM,
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////
// FGDStatModel
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST_1(Video, string);
PERF_TEST_P(Video, Video_FGDStatModel,
Values(string("gpu/video/768x576.avi")))
{
declare.time(60);
const string inputFile = perf::TestBase::getDataPath(GetParam());
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(4);
d_model.create(d_frame);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer(); next();
d_model.update(d_frame);
stopTimer();
}
const cv::gpu::GpuMat background = d_model.background;
const cv::gpu::GpuMat foreground = d_model.foreground;
GPU_SANITY_CHECK(background, 1e-2, ERROR_RELATIVE);
GPU_SANITY_CHECK(foreground, 1e-2, ERROR_RELATIVE);
}
else
{
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
startTimer(); next();
cvUpdateBGStatModel(&ipl_frame, model);
stopTimer();
}
const cv::Mat background = cv::cvarrToMat(model->background);
const cv::Mat foreground = cv::cvarrToMat(model->foreground);
CPU_SANITY_CHECK(background);
CPU_SANITY_CHECK(foreground);
}
}
#endif
//////////////////////////////////////////////////////
// MOG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_LearningRate, string, MatCn, double);
PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4,
Values(0.0, 0.01)))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const float learningRate = static_cast<float>(GET_PARAM(2));
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::MOG_GPU d_mog;
cv::gpu::GpuMat foreground;
d_mog(d_frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
d_mog(d_frame, foreground, learningRate);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground;
mog->apply(frame, foreground, learningRate);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer(); next();
mog->apply(frame, foreground, learningRate);
stopTimer();
}
CPU_SANITY_CHECK(foreground);
}
}
#endif
//////////////////////////////////////////////////////
// MOG2
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn, string, int);
PERF_TEST_P(Video_Cn, Video_MOG2,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::MOG2_GPU d_mog2;
d_mog2.bShadowDetection = false;
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat foreground;
d_mog2(d_frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
d_mog2(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
mog2->set("detectShadows", false);
cv::Mat foreground;
mog2->apply(frame, foreground);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer(); next();
mog2->apply(frame, foreground);
stopTimer();
}
CPU_SANITY_CHECK(foreground);
}
}
#endif
//////////////////////////////////////////////////////
// MOG2GetBackgroundImage
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"),
GPU_CHANNELS_1_3_4))
{
const string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame;
cv::gpu::MOG2_GPU d_mog2;
cv::gpu::GpuMat d_foreground;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
d_mog2(d_frame, d_foreground);
}
cv::gpu::GpuMat background;
TEST_CYCLE() d_mog2.getBackgroundImage(background);
GPU_SANITY_CHECK(background, 1);
}
else
{
cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
mog2->apply(frame, foreground);
}
cv::Mat background;
TEST_CYCLE() mog2->getBackgroundImage(background);
CPU_SANITY_CHECK(background);
}
}
#endif
//////////////////////////////////////////////////////
// GMG
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, MatCn, int);
PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
Combine(Values(string("gpu/video/768x576.avi")),
GPU_CHANNELS_1_3_4,
Values(20, 40, 60)))
{
const std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
const int cn = GET_PARAM(1);
const int maxFeatures = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_frame(frame);
cv::gpu::GpuMat foreground;
cv::gpu::GMG_GPU d_gmg;
d_gmg.maxFeatures = maxFeatures;
d_gmg(d_frame, foreground);
for (int i = 0; i < 150; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
d_frame.upload(frame);
startTimer(); next();
d_gmg(d_frame, foreground);
stopTimer();
}
GPU_SANITY_CHECK(foreground);
}
else
{
cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
gmg->set("maxFeatures", maxFeatures);
//gmg.initialize(frame.size(), 0.0, 255.0);
gmg->apply(frame, foreground);
for (int i = 0; i < 150; ++i)
{
cap >> frame;
if (frame.empty())
{
cap.release();
cap.open(inputFile);
cap >> frame;
}
if (cn != 3)
{
cv::Mat temp;
if (cn == 1)
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
else
cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA);
cv::swap(temp, frame);
}
startTimer(); next();
gmg->apply(frame, foreground);
stopTimer();
}
CPU_SANITY_CHECK(foreground);
}
}
#endif

View File

@ -0,0 +1,43 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"

View File

@ -0,0 +1,66 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/ts/gpu_perf.hpp"
#include "opencv2/gpuoptflow.hpp"
#include "opencv2/video.hpp"
#include "opencv2/legacy.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#endif

View File

@ -0,0 +1,43 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"

View File

@ -0,0 +1,62 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include <limits>
#include "opencv2/gpuoptflow.hpp"
#include "opencv2/gpuarithm.hpp"
#include "opencv2/gpuwarping.hpp"
#include "opencv2/video.hpp"
#include "opencv2/core/gpu_private.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPULEGACY
# include "opencv2/gpulegacy/private.hpp"
#endif
#endif /* __OPENCV_PRECOMP_H__ */

View File

@ -0,0 +1,45 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
CV_GPU_TEST_MAIN("gpu")

View File

@ -151,88 +151,6 @@ GPU_TEST_P(BroxOpticalFlow, OpticalFlowNan)
INSTANTIATE_TEST_CASE_P(GPU_Video, BroxOpticalFlow, ALL_DEVICES);
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
namespace
{
IMPLEMENT_PARAM_CLASS(MinDistance, double)
}
PARAM_TEST_CASE(GoodFeaturesToTrack, cv::gpu::DeviceInfo, MinDistance)
{
cv::gpu::DeviceInfo devInfo;
double minDistance;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minDistance = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(GoodFeaturesToTrack, Accuracy)
{
cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat d_pts;
detector(loadMat(image), d_pts);
ASSERT_FALSE(d_pts.empty());
std::vector<cv::Point2f> pts(d_pts.cols);
cv::Mat pts_mat(1, d_pts.cols, CV_32FC2, (void*) &pts[0]);
d_pts.download(pts_mat);
std::vector<cv::Point2f> pts_gold;
cv::goodFeaturesToTrack(image, pts_gold, maxCorners, qualityLevel, minDistance);
ASSERT_EQ(pts_gold.size(), pts.size());
size_t mistmatch = 0;
for (size_t i = 0; i < pts.size(); ++i)
{
cv::Point2i a = pts_gold[i];
cv::Point2i b = pts[i];
bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
if (!eq)
++mistmatch;
}
double bad_ratio = static_cast<double>(mistmatch) / pts.size();
ASSERT_LE(bad_ratio, 0.01);
}
GPU_TEST_P(GoodFeaturesToTrack, EmptyCorners)
{
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat src(100, 100, CV_8UC1, cv::Scalar::all(0));
cv::gpu::GpuMat corners(1, maxCorners, CV_32FC2);
detector(src, corners);
ASSERT_TRUE(corners.empty());
}
INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));
//////////////////////////////////////////////////////
// PyrLKOpticalFlow

View File

@ -0,0 +1,43 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"

View File

@ -56,9 +56,8 @@
#include "opencv2/ts.hpp"
#include "opencv2/ts/gpu_test.hpp"
#include "opencv2/gpuvideo.hpp"
#include "opencv2/gpuoptflow.hpp"
#include "opencv2/gpuimgproc.hpp"
#include "opencv2/video.hpp"
#include "opencv2/legacy.hpp"

View File

@ -1,9 +0,0 @@
if(ANDROID OR IOS)
ocv_module_disable(gpuvideo)
endif()
set(the_description "GPU-accelerated Video Analysis")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations)
ocv_define_module(gpuvideo opencv_video opencv_legacy opencv_gpufilters opencv_gpuwarping opencv_gpuimgproc OPTIONAL opencv_gpulegacy)

View File

@ -1,8 +0,0 @@
***********************************
gpu. GPU-accelerated Video Analysis
***********************************
.. toctree::
:maxdepth: 1
video

View File

@ -22,7 +22,8 @@ if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND)
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuwarping/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuimgproc/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpufeatures2d/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuvideo/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpuoptflow/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpubgsegm/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpustereo/include")
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include")
endif()

View File

@ -3,7 +3,7 @@ SET(OPENCV_GPU_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc ope
opencv_calib3d opencv_legacy opencv_contrib opencv_gpu
opencv_nonfree opencv_softcascade opencv_superres
opencv_gpucodec opencv_gpuarithm opencv_gpufilters opencv_gpuwarping opencv_gpuimgproc
opencv_gpufeatures2d opencv_gpuvideo
opencv_gpufeatures2d opencv_gpuoptflow opencv_gpubgsegm
opencv_gpustereo opencv_gpulegacy)
ocv_check_dependencies(${OPENCV_GPU_SAMPLES_REQUIRED_DEPS})