From 3fb0bf6e996b5aa7c094d055578a520e7764aa6f Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Fri, 16 Aug 2013 14:19:46 +0800 Subject: [PATCH 01/55] Added MOG and MOG2. --- modules/ocl/include/opencv2/ocl/ocl.hpp | 149 ++++++ modules/ocl/perf/perf_bgfg.cpp | 333 +++++++++++++ modules/ocl/src/bgfg_mog.cpp | 630 ++++++++++++++++++++++++ modules/ocl/src/opencl/bgfg_mog.cl | 543 ++++++++++++++++++++ modules/ocl/test/test_bgfg.cpp | 232 +++++++++ modules/ocl/test/test_optflow.cpp | 6 +- modules/ocl/test/utility.cpp | 38 ++ modules/ocl/test/utility.hpp | 3 + samples/ocl/bgfg_segm.cpp | 135 +++++ 9 files changed, 2066 insertions(+), 3 deletions(-) create mode 100644 modules/ocl/perf/perf_bgfg.cpp create mode 100644 modules/ocl/src/bgfg_mog.cpp create mode 100644 modules/ocl/src/opencl/bgfg_mog.cl create mode 100644 modules/ocl/test/test_bgfg.cpp create mode 100644 samples/ocl/bgfg_segm.cpp diff --git a/modules/ocl/include/opencv2/ocl/ocl.hpp b/modules/ocl/include/opencv2/ocl/ocl.hpp index aa0283fbe..f250646f5 100644 --- a/modules/ocl/include/opencv2/ocl/ocl.hpp +++ b/modules/ocl/include/opencv2/ocl/ocl.hpp @@ -1698,6 +1698,155 @@ namespace cv // keys = {1, 2, 3} (CV_8UC1) // values = {6,2, 10,5, 4,3} (CV_8UC2) void CV_EXPORTS sortByKey(oclMat& keys, oclMat& values, int method, bool isGreaterThan = false); + /*!Base class for MOG and MOG2!*/ + class CV_EXPORTS BackgroundSubtractor + { + public: + //! the virtual destructor + virtual ~BackgroundSubtractor(); + //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image. + virtual void operator()(const oclMat& image, oclMat& fgmask, float learningRate); + + //! computes a background image + virtual void getBackgroundImage(oclMat& backgroundImage) const = 0; + }; + /*! + Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm + + The class implements the following algorithm: + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf + */ + class CV_EXPORTS MOG: public cv::ocl::BackgroundSubtractor + { + public: + //! the default constructor + MOG(int nmixtures = -1); + + //! re-initiaization method + void initialize(Size frameSize, int frameType); + + //! the update operator + void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = 0.f); + + //! computes a background image which are the mean of all background gaussians + void getBackgroundImage(oclMat& backgroundImage) const; + + //! releases all inner buffers + void release(); + + int history; + float varThreshold; + float backgroundRatio; + float noiseSigma; + + private: + int nmixtures_; + + Size frameSize_; + int frameType_; + int nframes_; + + oclMat weight_; + oclMat sortKey_; + oclMat mean_; + oclMat var_; + }; + + /*! + The class implements the following algorithm: + "Improved adaptive Gausian mixture model for background subtraction" + Z.Zivkovic + International Conference Pattern Recognition, UK, August, 2004. + http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf + */ + class CV_EXPORTS MOG2: public cv::ocl::BackgroundSubtractor + { + public: + //! the default constructor + MOG2(int nmixtures = -1); + + //! re-initiaization method + void initialize(Size frameSize, int frameType); + + //! the update operator + void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = -1.0f); + + //! computes a background image which are the mean of all background gaussians + void getBackgroundImage(oclMat& backgroundImage) const; + + //! releases all inner buffers + void release(); + + // parameters + // you should call initialize after parameters changes + + int history; + + //! here it is the maximum allowed number of mixture components. + //! Actual number is determined dynamically per pixel + float varThreshold; + // threshold on the squared Mahalanobis distance to decide if it is well described + // by the background model or not. Related to Cthr from the paper. + // This does not influence the update of the background. A typical value could be 4 sigma + // and that is varThreshold=4*4=16; Corresponds to Tb in the paper. + + ///////////////////////// + // less important parameters - things you might change but be carefull + //////////////////////// + + float backgroundRatio; + // corresponds to fTB=1-cf from the paper + // TB - threshold when the component becomes significant enough to be included into + // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0. + // For alpha=0.001 it means that the mode should exist for approximately 105 frames before + // it is considered foreground + // float noiseSigma; + float varThresholdGen; + + //correspondts to Tg - threshold on the squared Mahalan. dist. to decide + //when a sample is close to the existing components. If it is not close + //to any a new component will be generated. I use 3 sigma => Tg=3*3=9. + //Smaller Tg leads to more generated components and higher Tg might make + //lead to small number of components but they can grow too large + float fVarInit; + float fVarMin; + float fVarMax; + + //initial variance for the newly generated components. + //It will will influence the speed of adaptation. A good guess should be made. + //A simple way is to estimate the typical standard deviation from the images. + //I used here 10 as a reasonable value + // min and max can be used to further control the variance + float fCT; //CT - complexity reduction prior + //this is related to the number of samples needed to accept that a component + //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get + //the standard Stauffer&Grimson algorithm (maybe not exact but very similar) + + //shadow detection parameters + bool bShadowDetection; //default 1 - do shadow detection + unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value + float fTau; + // Tau - shadow threshold. The shadow is detected if the pixel is darker + //version of the background. Tau is a threshold on how much darker the shadow can be. + //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow + //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003. + + private: + int nmixtures_; + + Size frameSize_; + int frameType_; + int nframes_; + + oclMat weight_; + oclMat variance_; + oclMat mean_; + + oclMat bgmodelUsedModes_; //keep track of number of modes per pixel + }; } } #if defined _MSC_VER && _MSC_VER >= 1200 diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp new file mode 100644 index 000000000..e7aad759f --- /dev/null +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -0,0 +1,333 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Jin Ma, jin@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" +using namespace cv; +using namespace cv::ocl; + +void cvtFrameFmt(std::vector& input, std::vector& output, int output_cn) +{ + for(int i=0; i frame_buffer_init; + std::vector frame_buffer(nframe); + std::vector frame_buffer_ocl; + std::vector foreground_buf_ocl; + std::vector foreground_buf_cpu; + BackgroundSubtractorMOG mog_cpu; + cv::ocl::MOG d_mog; + for(int i = 0; i < nframe; i++) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + frame_buffer_init.push_back(frame); + } + + for(unsigned int i = 0; i < sizeof(learningRate)/sizeof(float); i++) + { + for(unsigned int j = 0; j < sizeof(cn)/sizeof(int); j++) + { + SUBTEST << frame.cols << 'x' << frame.rows << ".avi; "<<"channels: "< frame_buffer_init; + std::vector frame_buffer(nframe); + std::vector frame_buffer_ocl; + std::vector foreground_buf_ocl; + std::vector foreground_buf_cpu; + cv::ocl::oclMat foreground_ocl; + + for(int i = 0; i < nframe; i++) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + frame_buffer_init.push_back(frame); + } + cv::ocl::MOG2 d_mog; + + for(unsigned int j = 0; j < sizeof(cn)/sizeof(int); j++) + { + SUBTEST << frame.cols << 'x' << frame.rows << ".avi; "<<"channels: "<> frame; + ASSERT_FALSE(frame.empty()); + + int nframe = 5; + std::vector frame_buffer_init; + std::vector frame_buffer(nframe); + std::vector frame_buffer_ocl; + std::vector foreground_buf_ocl; + std::vector foreground_buf_cpu; + + for(int i = 0; i < nframe; i++) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + frame_buffer_init.push_back(frame); + } + + for(unsigned int j = 0; j < sizeof(cn)/sizeof(int); j++) + { + SUBTEST << frame.cols << 'x' << frame.rows << ".avi; "<<"channels: "< 0 ? nmixtures : mog::defaultNMixtures, 8); + history = mog::defaultHistory; + varThreshold = mog::defaultVarThreshold; + backgroundRatio = mog::defaultBackgroundRatio; + noiseSigma = mog::defaultNoiseSigma; +} + +void cv::ocl::MOG::initialize(cv::Size frameSize, int frameType) +{ + CV_Assert(frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4); + + frameSize_ = frameSize; + frameType_ = frameType; + + int ch = CV_MAT_CN(frameType); + int work_ch = ch; + + // for each gaussian mixture of each pixel bg model we store + // the mixture sort key (w/sum_of_variances), the mixture weight (w), + // the mean (nchannels values) and + // the diagonal covariance matrix (another nchannels values) + + weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1); + sortKey_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1); + mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch)); + var_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch)); + + weight_.setTo(cv::Scalar::all(0)); + sortKey_.setTo(cv::Scalar::all(0)); + mean_.setTo(cv::Scalar::all(0)); + var_.setTo(cv::Scalar::all(0)); + + nframes_ = 0; +} + +void cv::ocl::MOG::operator()(const cv::ocl::oclMat& frame, cv::ocl::oclMat& fgmask, float learningRate) +{ + using namespace cv::ocl::device::mog; + + CV_Assert(frame.depth() == CV_8U); + + int ch = frame.oclchannels(); + int work_ch = ch; + + if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.oclchannels()) + initialize(frame.size(), frame.type()); + + fgmask.create(frameSize_, CV_8UC1); + + ++nframes_; + learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(nframes_, history); + CV_Assert(learningRate >= 0.0f); + + mog_ocl(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_, + varThreshold, learningRate, backgroundRatio, noiseSigma); +} + +void cv::ocl::MOG::getBackgroundImage(oclMat& backgroundImage) const +{ + using namespace cv::ocl::device::mog; + + backgroundImage.create(frameSize_, frameType_); + + cv::ocl::device::mog::getBackgroundImage_ocl(backgroundImage.oclchannels(), weight_, mean_, backgroundImage, nmixtures_, backgroundRatio); +} + +void cv::ocl::MOG::release() +{ + frameSize_ = Size(0, 0); + frameType_ = 0; + nframes_ = 0; + + weight_.release(); + sortKey_.release(); + mean_.release(); + var_.release(); + clReleaseMemObject(cl_constants); +} + +static void mog_withoutLearning(const oclMat& frame, int cn, oclMat& fgmask, oclMat& weight, oclMat& mean, oclMat& var, + int nmixtures, float varThreshold, float backgroundRatio) +{ + Context* clCxt = Context::getContext(); + + size_t local_thread[] = {32, 8, 1}; + size_t global_thread[] = {frame.cols, frame.rows, 1}; + + int frame_step = (int)(frame.step/frame.elemSize()); + int fgmask_step = (int)(fgmask.step/fgmask.elemSize()); + int weight_step = (int)(weight.step/weight.elemSize()); + int mean_step = (int)(mean.step/mean.elemSize()); + int var_step = (int)(var.step/var.elemSize()); + + int fgmask_offset_y = (int)(fgmask.offset/fgmask.step); + int fgmask_offset_x = (int)(fgmask.offset%fgmask.step); + fgmask_offset_x = fgmask_offset_x/(int)fgmask.elemSize(); + + int frame_offset_y = (int)(frame.offset/frame.step); + int frame_offset_x = (int)(frame.offset%frame.step); + frame_offset_x = frame_offset_x/(int)frame.elemSize(); + + char build_option[50]; + if(cn == 1) + { + snprintf(build_option, 50, "-D CN1 -D NMIXTURES=%d", nmixtures); + }else + { + snprintf(build_option, 50, "-D NMIXTURES=%d", nmixtures); + } + + String kernel_name = "mog_withoutLearning_kernel"; + vector< pair > args; + + args.push_back(make_pair(sizeof(cl_mem), (void*)&frame.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&fgmask.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&weight.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&mean.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&var.data)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame.rows)); + args.push_back(make_pair(sizeof(cl_int), (void*)&frame.cols)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&weight_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&mean_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&var_step)); + + args.push_back(make_pair(sizeof(cl_float), (void*)&varThreshold)); + args.push_back(make_pair(sizeof(cl_float), (void*)&backgroundRatio)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_offset_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_offset_y)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_y)); + + openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); +} + + +static void mog_withLearning(const oclMat& frame, int cn, oclMat& fgmask, oclMat& weight, oclMat& sortKey, oclMat& mean, oclMat& var, + int nmixtures, float varThreshold, float backgroundRatio, float learningRate, float minVar) +{ + Context* clCxt = Context::getContext(); + + size_t local_thread[] = {32, 8, 1}; + size_t global_thread[] = {frame.cols, frame.rows, 1}; + + int frame_step = (int)(frame.step/frame.elemSize()); + int fgmask_step = (int)(fgmask.step/fgmask.elemSize()); + int weight_step = (int)(weight.step/weight.elemSize()); + int sortKey_step = (int)(sortKey.step/sortKey.elemSize()); + int mean_step = (int)(mean.step/mean.elemSize()); + int var_step = (int)(var.step/var.elemSize()); + + int fgmask_offset_y = (int)(fgmask.offset/fgmask.step); + int fgmask_offset_x = (int)(fgmask.offset%fgmask.step); + fgmask_offset_x = fgmask_offset_x/(int)fgmask.elemSize(); + + int frame_offset_y = (int)(frame.offset/frame.step); + int frame_offset_x = (int)(frame.offset%frame.step); + frame_offset_x = frame_offset_x/(int)frame.elemSize(); + + char build_option[50]; + if(cn == 1) + { + snprintf(build_option, 50, "-D CN1 -D NMIXTURES=%d", nmixtures); + }else + { + snprintf(build_option, 50, "-D NMIXTURES=%d", nmixtures); + } + + String kernel_name = "mog_withLearning_kernel"; + vector< pair > args; + + args.push_back(make_pair(sizeof(cl_mem), (void*)&frame.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&fgmask.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&weight.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&sortKey.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&mean.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&var.data)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame.rows)); + args.push_back(make_pair(sizeof(cl_int), (void*)&frame.cols)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&weight_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&sortKey_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&mean_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&var_step)); + + args.push_back(make_pair(sizeof(cl_float), (void*)&varThreshold)); + args.push_back(make_pair(sizeof(cl_float), (void*)&backgroundRatio)); + args.push_back(make_pair(sizeof(cl_float), (void*)&learningRate)); + args.push_back(make_pair(sizeof(cl_float), (void*)&minVar)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_offset_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_offset_y)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_y)); + + openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); +} + +void cv::ocl::device::mog::mog_ocl(const oclMat& frame, int cn, oclMat& fgmask, oclMat& weight, oclMat& sortKey, oclMat& mean, oclMat& var, + int nmixtures, float varThreshold, float learningRate, float backgroundRatio, float noiseSigma) +{ + const float minVar = noiseSigma * noiseSigma; + + if(learningRate > 0.0f) + mog_withLearning(frame, cn, fgmask, weight, sortKey, mean, var, nmixtures, + varThreshold, backgroundRatio, learningRate, minVar); + else + mog_withoutLearning(frame, cn, fgmask, weight, mean, var, nmixtures, varThreshold, backgroundRatio); +} + +void cv::ocl::device::mog::getBackgroundImage_ocl(int cn, const oclMat& weight, const oclMat& mean, oclMat& dst, int nmixtures, float backgroundRatio) +{ + Context* clCxt = Context::getContext(); + + size_t local_thread[] = {32, 8, 1}; + size_t global_thread[] = {dst.cols, dst.rows, 1}; + + int weight_step = (int)(weight.step/weight.elemSize()); + int mean_step = (int)(mean.step/mean.elemSize()); + int dst_step = (int)(dst.step/dst.elemSize()); + + char build_option[50]; + if(cn == 1) + { + snprintf(build_option, 50, "-D CN1 -D NMIXTURES=%d", nmixtures); + }else + { + snprintf(build_option, 50, "-D NMIXTURES=%d", nmixtures); + } + + String kernel_name = "getBackgroundImage_kernel"; + vector< pair > args; + + args.push_back(make_pair(sizeof(cl_mem), (void*)&weight.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&mean.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&dst.data)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&dst.rows)); + args.push_back(make_pair(sizeof(cl_int), (void*)&dst.cols)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&weight_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&mean_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&dst_step)); + + args.push_back(make_pair(sizeof(cl_float), (void*)&backgroundRatio)); + + openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); +} + +void cv::ocl::device::mog::loadConstants(float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal) +{ + varMin = cv::min(varMin, varMax); + varMax = cv::max(varMin, varMax); + + c_TB = TB; + + _contant_struct *constants = new _contant_struct; + constants->c_Tb = Tb; + constants->c_TB = TB; + constants->c_Tg = Tg; + constants->c_varInit = varInit; + constants->c_varMin = varMin; + constants->c_varMax = varMax; + constants->c_tau = tau; + constants->c_shadowVal = shadowVal; + + cl_constants = load_constant(*((cl_context*)getoclContext()), *((cl_command_queue*)getoclCommandQueue()), + (void *)constants, sizeof(_contant_struct)); +} + +void cv::ocl::device::mog::mog2_ocl(const oclMat& frame, int cn, oclMat& fgmask, oclMat& modesUsed, oclMat& weight, oclMat& variance, + oclMat& mean, float alphaT, float prune, bool detectShadows, int nmixtures) +{ + Context* clCxt = Context::getContext(); + + const float alpha1 = 1.0f - alphaT; + + cl_int detectShadows_flag = 0; + if(detectShadows) + detectShadows_flag = 1; + + size_t local_thread[] = {32, 8, 1}; + size_t global_thread[] = {frame.cols, frame.rows, 1}; + + int frame_step = (int)(frame.step/frame.elemSize()); + int fgmask_step = (int)(fgmask.step/fgmask.elemSize()); + int weight_step = (int)(weight.step/weight.elemSize()); + int modesUsed_step = (int)(modesUsed.step/modesUsed.elemSize()); + int mean_step = (int)(mean.step/mean.elemSize()); + int var_step = (int)(variance.step/variance.elemSize()); + + int fgmask_offset_y = (int)(fgmask.offset/fgmask.step); + int fgmask_offset_x = (int)(fgmask.offset%fgmask.step); + fgmask_offset_x = fgmask_offset_x/(int)fgmask.elemSize(); + + int frame_offset_y = (int)(frame.offset/frame.step); + int frame_offset_x = (int)(frame.offset%frame.step); + frame_offset_x = frame_offset_x/(int)frame.elemSize(); + + String kernel_name = "mog2_kernel"; + vector< pair > args; + + char build_option[50]; + if(cn == 1) + { + snprintf(build_option, 50, "-D CN1 -D NMIXTURES=%d", nmixtures); + }else + { + snprintf(build_option, 50, "-D NMIXTURES=%d", nmixtures); + } + + args.push_back(make_pair(sizeof(cl_mem), (void*)&frame.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&fgmask.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&weight.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&mean.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&modesUsed.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&variance.data)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame.rows)); + args.push_back(make_pair(sizeof(cl_int), (void*)&frame.cols)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&weight_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&mean_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&modesUsed_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&var_step)); + + args.push_back(make_pair(sizeof(cl_float), (void*)&alphaT)); + args.push_back(make_pair(sizeof(cl_float), (void*)&alpha1)); + args.push_back(make_pair(sizeof(cl_float), (void*)&prune)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&detectShadows_flag)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_offset_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&fgmask_offset_y)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_y)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&cl_constants)); + + openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); +} + +void cv::ocl::device::mog::getBackgroundImage2_ocl(int cn, const oclMat& modesUsed, const oclMat& weight, const oclMat& mean, oclMat& dst, int nmixtures) +{ + Context* clCxt = Context::getContext(); + + size_t local_thread[] = {32, 8, 1}; + size_t global_thread[] = {modesUsed.cols, modesUsed.rows, 1}; + + int weight_step = (int)(weight.step/weight.elemSize()); + int modesUsed_step = (int)(modesUsed.step/modesUsed.elemSize()); + int mean_step = (int)(mean.step/mean.elemSize()); + int dst_step = (int)(dst.step/dst.elemSize()); + + int dst_y = (int)(dst.offset/dst.step); + int dst_x = (int)(dst.offset%dst.step); + dst_x = dst_x/(int)dst.elemSize(); + + String kernel_name = "getBackgroundImage2_kernel"; + vector< pair > args; + + char build_option[50]; + if(cn == 1) + { + snprintf(build_option, 50, "-D CN1 -D NMIXTURES=%d", nmixtures); + }else + { + snprintf(build_option, 50, "-D NMIXTURES=%d", nmixtures); + } + + args.push_back(make_pair(sizeof(cl_mem), (void*)&modesUsed.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&weight.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&mean.data)); + args.push_back(make_pair(sizeof(cl_mem), (void*)&dst.data)); + args.push_back(make_pair(sizeof(cl_float), (void*)&c_TB)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&modesUsed.rows)); + args.push_back(make_pair(sizeof(cl_int), (void*)&modesUsed.cols)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&modesUsed_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&weight_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&mean_step)); + args.push_back(make_pair(sizeof(cl_int), (void*)&dst_step)); + + args.push_back(make_pair(sizeof(cl_int), (void*)&dst_x)); + args.push_back(make_pair(sizeof(cl_int), (void*)&dst_y)); + + openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); +} + +///////////////////////////////////////////////////////////////// +// MOG2 + +namespace mog2 +{ + // default parameters of gaussian background detection algorithm + const int defaultHistory = 500; // Learning rate; alpha = 1/defaultHistory2 + const float defaultVarThreshold = 4.0f * 4.0f; + const int defaultNMixtures = 5; // maximal number of Gaussians in mixture + const float defaultBackgroundRatio = 0.9f; // threshold sum of weights for background test + const float defaultVarThresholdGen = 3.0f * 3.0f; + const float defaultVarInit = 15.0f; // initial variance for new components + const float defaultVarMax = 5.0f * defaultVarInit; + const float defaultVarMin = 4.0f; + + // additional parameters + const float defaultfCT = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components + const unsigned char defaultnShadowDetection = 127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection + const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation +} + +cv::ocl::MOG2::MOG2(int nmixtures) : frameSize_(0, 0), frameType_(0), nframes_(0) +{ + nmixtures_ = nmixtures > 0 ? nmixtures : mog2::defaultNMixtures; + + history = mog2::defaultHistory; + varThreshold = mog2::defaultVarThreshold; + bShadowDetection = true; + + backgroundRatio = mog2::defaultBackgroundRatio; + fVarInit = mog2::defaultVarInit; + fVarMax = mog2::defaultVarMax; + fVarMin = mog2::defaultVarMin; + + varThresholdGen = mog2::defaultVarThresholdGen; + fCT = mog2::defaultfCT; + nShadowDetection = mog2::defaultnShadowDetection; + fTau = mog2::defaultfTau; +} + +void cv::ocl::MOG2::initialize(cv::Size frameSize, int frameType) +{ + using namespace cv::ocl::device::mog; + CV_Assert(frameType == CV_8UC1 || frameType == CV_8UC3 || frameType == CV_8UC4); + + frameSize_ = frameSize; + frameType_ = frameType; + nframes_ = 0; + + int ch = CV_MAT_CN(frameType); + int work_ch = ch; + + // for each gaussian mixture of each pixel bg model we store ... + // the mixture weight (w), + // the mean (nchannels values) and + // the covariance + weight_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1); + weight_.setTo(Scalar::all(0)); + + variance_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC1); + variance_.setTo(Scalar::all(0)); + + mean_.create(frameSize.height * nmixtures_, frameSize_.width, CV_32FC(work_ch)); //4 channels + mean_.setTo(Scalar::all(0)); + + //make the array for keeping track of the used modes per pixel - all zeros at start + bgmodelUsedModes_.create(frameSize_, CV_8UC1); + bgmodelUsedModes_.setTo(cv::Scalar::all(0)); + + loadConstants(varThreshold, backgroundRatio, varThresholdGen, fVarInit, fVarMin, fVarMax, fTau, nShadowDetection); +} + +void cv::ocl::MOG2::operator()(const oclMat& frame, oclMat& fgmask, float learningRate) +{ + using namespace cv::ocl::device::mog; + + int ch = frame.oclchannels(); + int work_ch = ch; + + if (nframes_ == 0 || learningRate >= 1.0f || frame.size() != frameSize_ || work_ch != mean_.oclchannels()) + initialize(frame.size(), frame.type()); + + fgmask.create(frameSize_, CV_8UC1); + fgmask.setTo(cv::Scalar::all(0)); + + ++nframes_; + learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(2 * nframes_, history); + CV_Assert(learningRate >= 0.0f); + + mog2_ocl(frame, frame.oclchannels(), fgmask, bgmodelUsedModes_, weight_, variance_, mean_, learningRate, -learningRate * fCT, bShadowDetection, nmixtures_); +} + +void cv::ocl::MOG2::getBackgroundImage(oclMat& backgroundImage) const +{ + using namespace cv::ocl::device::mog; + + backgroundImage.create(frameSize_, frameType_); + + cv::ocl::device::mog::getBackgroundImage2_ocl(backgroundImage.oclchannels(), bgmodelUsedModes_, weight_, mean_, backgroundImage, nmixtures_); +} + +void cv::ocl::MOG2::release() +{ + frameSize_ = Size(0, 0); + frameType_ = 0; + nframes_ = 0; + + weight_.release(); + variance_.release(); + mean_.release(); + + bgmodelUsedModes_.release(); +} + diff --git a/modules/ocl/src/opencl/bgfg_mog.cl b/modules/ocl/src/opencl/bgfg_mog.cl new file mode 100644 index 000000000..4ad6a52f7 --- /dev/null +++ b/modules/ocl/src/opencl/bgfg_mog.cl @@ -0,0 +1,543 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Jin Ma jin@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#if defined (CN1) +#define T_FRAME uchar +#define T_MEAN_VAR float +#define CONVERT_TYPE convert_uchar_sat +#define F_ZERO (0.0f) +float cvt(uchar val) +{ + return val; +} + +float sqr(float val) +{ + return val * val; +} + +float sum(float val) +{ + return val; +} + +float clamp1(float var, float learningRate, float diff, float minVar) +{ + return fmax(var + learningRate * (diff * diff - var), minVar); +} +#else +#define T_FRAME uchar4 +#define T_MEAN_VAR float4 +#define CONVERT_TYPE convert_uchar4_sat +#define F_ZERO (0.0f, 0.0f, 0.0f, 0.0f) +float4 cvt(const uchar4 val) +{ + float4 result; + result.x = val.x; + result.y = val.y; + result.z = val.z; + result.w = val.w; + + return result; +} + +float sqr(const float4 val) +{ + return val.x * val.x + val.y * val.y + val.z * val.z; +} + +float sum(const float4 val) +{ + return (val.x + val.y + val.z); +} + +float4 clamp1(const float4 var, float learningRate, const float4 diff, float minVar) +{ + float4 result; + result.x = fmax(var.x + learningRate * (diff.x * diff.x - var.x), minVar); + result.y = fmax(var.y + learningRate * (diff.y * diff.y - var.y), minVar); + result.z = fmax(var.z + learningRate * (diff.z * diff.z - var.z), minVar); + result.w = 0.0f; + return result; +} +#endif + +typedef struct +{ + float c_Tb; + float c_TB; + float c_Tg; + float c_varInit; + float c_varMin; + float c_varMax; + float c_tau; + uchar c_shadowVal; +}con_srtuct_t; + +void swap(__global float* ptr, int x, int y, int k, int rows, int ptr_step) +{ + float val = ptr[(k * rows + y) * ptr_step + x]; + ptr[(k * rows + y) * ptr_step + x] = ptr[((k + 1) * rows + y) * ptr_step + x]; + ptr[((k + 1) * rows + y) * ptr_step + x] = val; +} + +void swap4(__global float4* ptr, int x, int y, int k, int rows, int ptr_step) +{ + float4 val = ptr[(k * rows + y) * ptr_step + x]; + ptr[(k * rows + y) * ptr_step + x] = ptr[((k + 1) * rows + y) * ptr_step + x]; + ptr[((k + 1) * rows + y) * ptr_step + x] = val; +} + +__kernel void mog_withoutLearning_kernel(__global T_FRAME* frame, __global uchar* fgmask, + __global float* weight, __global T_MEAN_VAR* mean, __global T_MEAN_VAR* var, + int frame_row, int frame_col, int frame_step, int fgmask_step, + int weight_step, int mean_step, int var_step, + float varThreshold, float backgroundRatio, int fgmask_offset_x, + int fgmask_offset_y, int frame_offset_x, int frame_offset_y) +{ + int x = get_global_id(0); + int y = get_global_id(1); + + if (x < frame_col && y < frame_row) + { + + T_MEAN_VAR pix = cvt(frame[(y + frame_offset_y) * frame_step + (x + frame_offset_x)]); + + int kHit = -1; + int kForeground = -1; + + for (int k = 0; k < (NMIXTURES); ++k) + { + if (weight[(k * frame_row + y) * weight_step + x] < 1.192092896e-07f) + break; + + T_MEAN_VAR mu = mean[(k * frame_row + y) * mean_step + x]; + T_MEAN_VAR _var = var[(k * frame_row + y) + var_step + x]; + + T_MEAN_VAR diff = pix - mu; + + if (sqr(diff) < varThreshold * sum(_var)) + { + kHit = k; + break; + } + } + + if (kHit >= 0) + { + float wsum = 0.0f; + for (int k = 0; k < (NMIXTURES); ++k) + { + wsum += weight[(k * frame_row + y) * weight_step + x]; + + if (wsum > backgroundRatio) + { + kForeground = k + 1; + break; + } + } + } + + if(kHit < 0 || kHit >= kForeground) + fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar) (-1); + else + fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar) (0); + + } +} + +__kernel void mog_withLearning_kernel(__global T_FRAME* frame, __global uchar* fgmask, + __global float* weight, __global float* sortKey, __global T_MEAN_VAR* mean, + __global T_MEAN_VAR* var, int frame_row, int frame_col, int frame_step, int fgmask_step, + int weight_step, int sortKey_step, int mean_step, int var_step, + float varThreshold, float backgroundRatio, float learningRate, float minVar, + int fgmask_offset_x, int fgmask_offset_y, int frame_offset_x, int frame_offset_y) +{ + const float w0 = 0.05f; + const float sk0 = w0 / 30.0f; + const float var0 = 900.f; + + int x = get_global_id(0); + int y = get_global_id(1); + + if(x < frame_col && y < frame_row) + { + + float wsum = 0.0f; + int kHit = -1; + int kForeground = -1; + int k = 0; + + T_MEAN_VAR pix = cvt(frame[(y + frame_offset_y) * frame_step + (x + frame_offset_x)]); + + for (; k < (NMIXTURES); ++k) + { + float w = weight[(k * frame_row + y) * weight_step + x]; + wsum += w; + + if (w < 1.192092896e-07f) + break; + + T_MEAN_VAR mu = mean[(k * frame_row + y) * mean_step + x]; + T_MEAN_VAR _var = var[(k * frame_row + y) * var_step + x]; + + T_MEAN_VAR diff = pix - mu; + + if (sqr(diff) < varThreshold * sum(_var)) + { + wsum -= w; + float dw = learningRate * (1.0f - w); + + _var = clamp1(_var, learningRate, diff, minVar); + + float sortKey_prev = w / sqr(sum(_var)); + sortKey[(k * frame_row + y) * sortKey_step + x] = sortKey_prev; + + float weight_prev = w + dw; + weight[(k * frame_row + y) * weight_step + x] = weight_prev; + + T_MEAN_VAR mean_prev = mu + learningRate * diff; + mean[(k * frame_row + y) * mean_step + x] = mean_prev; + + T_MEAN_VAR var_prev = _var; + var[(k * frame_row + y) * var_step + x] = var_prev; + + int k1 = k - 1; + + if (k1 >= 0) + { + float sortKey_next = sortKey[(k1 * frame_row + y) * sortKey_step + x]; + float weight_next = weight[(k1 * frame_row + y) * weight_step + x]; + T_MEAN_VAR mean_next = mean[(k1 * frame_row + y) * mean_step + x]; + T_MEAN_VAR var_next = var[(k1 * frame_row + y) * var_step + x]; + + for (; sortKey_next < sortKey_prev && k1 >= 0; --k1) + { + sortKey[(k1 * frame_row + y) * sortKey_step + x] = sortKey_prev; + sortKey[((k1 + 1) * frame_row + y) * sortKey_step + x] = sortKey_next; + + weight[(k1 * frame_row + y) * weight_step + x] = weight_prev; + weight[((k1 + 1) * frame_row + y) * weight_step + x] = weight_next; + + mean[(k1 * frame_row + y) * mean_step + x] = mean_prev; + mean[((k1 + 1) * frame_row + y) * mean_step + x] = mean_next; + + var[(k1 * frame_row + y) * var_step + x] = var_prev; + var[((k1 + 1) * frame_row + y) * var_step + x] = var_next; + + sortKey_prev = sortKey_next; + sortKey_next = k1 > 0 ? sortKey[((k1 - 1) * frame_row + y) * sortKey_step + x] : 0.0f; + + weight_prev = weight_next; + weight_next = k1 > 0 ? weight[((k1 - 1) * frame_row + y) * weight_step + x] : 0.0f; + + mean_prev = mean_next; + mean_next = k1 > 0 ? mean[((k1 - 1) * frame_row + y) * mean_step + x] : (T_MEAN_VAR)F_ZERO; + + var_prev = var_next; + var_next = k1 > 0 ? var[((k1 - 1) * frame_row + y) * var_step + x] : (T_MEAN_VAR)F_ZERO; + } + } + + kHit = k1 + 1; + break; + } + } + + if (kHit < 0) + { + kHit = k = k < ((NMIXTURES) - 1) ? k : ((NMIXTURES) - 1); + wsum += w0 - weight[(k * frame_row + y) * weight_step + x]; + + weight[(k * frame_row + y) * weight_step + x] = w0; + mean[(k * frame_row + y) * mean_step + x] = pix; + #if defined (CN1) + var[(k * frame_row + y) * var_step + x] = (T_MEAN_VAR)(var0); + #else + var[(k * frame_row + y) * var_step + x] = (T_MEAN_VAR)(var0, var0, var0, var0); + #endif + sortKey[(k * frame_row + y) * sortKey_step + x] = sk0; + } + else + { + for( ; k < (NMIXTURES); k++) + wsum += weight[(k * frame_row + y) * weight_step + x]; + } + + float wscale = 1.0f / wsum; + wsum = 0; + for (k = 0; k < (NMIXTURES); ++k) + { + float w = weight[(k * frame_row + y) * weight_step + x]; + wsum += w *= wscale; + + weight[(k * frame_row + y) * weight_step + x] = w; + sortKey[(k * frame_row + y) * sortKey_step + x] *= wscale; + + if (wsum > backgroundRatio && kForeground < 0) + kForeground = k + 1; + } + if(kHit >= kForeground) + fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar)(-1); + else + fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar)(0); + } +} + +__kernel void getBackgroundImage_kernel(__global float* weight, __global T_MEAN_VAR* mean, __global T_FRAME* dst, + int dst_row, int dst_col, int weight_step, int mean_step, int dst_step, + float backgroundRatio) +{ + int x = get_global_id(0); + int y = get_global_id(1); + + if(x < dst_col && y < dst_row) + { + T_MEAN_VAR meanVal = (T_MEAN_VAR)F_ZERO; + float totalWeight = 0.0f; + + for (int mode = 0; mode < (NMIXTURES); ++mode) + { + float _weight = weight[(mode * dst_row + y) * weight_step + x]; + + T_MEAN_VAR _mean = mean[(mode * dst_row + y) * mean_step + x]; + meanVal = meanVal + _weight * _mean; + + totalWeight += _weight; + + if(totalWeight > backgroundRatio) + break; + } + meanVal = meanVal * (1.f / totalWeight); + dst[y * dst_step + x] = CONVERT_TYPE(meanVal); + } +} + +__kernel void mog2_kernel(__global T_FRAME * frame, __global uchar* fgmask, __global float* weight, __global T_MEAN_VAR * mean, + __global uchar* modesUsed, __global float* variance, int frame_row, int frame_col, int frame_step, + int fgmask_step, int weight_step, int mean_step, int modesUsed_step, int var_step, float alphaT, float alpha1, float prune, + int detectShadows_flag, int fgmask_offset_x, int fgmask_offset_y, int frame_offset_x, int frame_offset_y, __constant con_srtuct_t* constants) +{ + int x = get_global_id(0); + int y = get_global_id(1); + + if(x < frame_col && y < frame_row) + { + T_MEAN_VAR pix = cvt(frame[(y + frame_offset_y) * frame_step + x + frame_offset_x]); + + bool background = false; // true - the pixel classified as background + + bool fitsPDF = false; //if it remains zero a new GMM mode will be added + + int nmodes = modesUsed[y * modesUsed_step + x]; + int nNewModes = nmodes; //current number of modes in GMM + + float totalWeight = 0.0f; + + for (int mode = 0; mode < nmodes; ++mode) + { + float _weight = alpha1 * weight[(mode * frame_row + y) * weight_step + x] + prune; + + if (!fitsPDF) + { + float var = variance[(mode * frame_row + y) * var_step + x]; + + T_MEAN_VAR _mean = mean[(mode * frame_row + y) * mean_step + x]; + + T_MEAN_VAR diff = _mean - pix; + float dist2 = sqr(diff); + + if (totalWeight < constants -> c_TB && dist2 < constants -> c_Tb * var) + background = true; + + if (dist2 < constants -> c_Tg * var) + { + fitsPDF = true; + _weight += alphaT; + float k = alphaT / _weight; + mean[(mode * frame_row + y) * mean_step + x] = _mean - k * diff; + float varnew = var + k * (dist2 - var); + varnew = fmax(varnew, constants -> c_varMin); + varnew = fmin(varnew, constants -> c_varMax); + + variance[(mode * frame_row + y) * var_step + x] = varnew; + for (int i = mode; i > 0; --i) + { + if (_weight < weight[((i - 1) * frame_row + y) * weight_step + x]) + break; + swap(weight, x, y, i - 1, frame_row, weight_step); + swap(variance, x, y, i - 1, frame_row, var_step); + #if defined (CN1) + swap(mean, x, y, i - 1, frame_row, mean_step); + #else + swap4(mean, x, y, i - 1, frame_row, mean_step); + #endif + } + } + } // !fitsPDF + + if (_weight < -prune) + { + _weight = 0.0; + nmodes--; + } + + weight[(mode * frame_row + y) * weight_step + x] = _weight; //update weight by the calculated value + totalWeight += _weight; + } + + totalWeight = 1.f / totalWeight; + for (int mode = 0; mode < nmodes; ++mode) + weight[(mode * frame_row + y) * weight_step + x] *= totalWeight; + + nmodes = nNewModes; + + if (!fitsPDF) + { + int mode = nmodes == (NMIXTURES) ? (NMIXTURES) - 1 : nmodes++; + + if (nmodes == 1) + weight[(mode * frame_row + y) * weight_step + x] = 1.f; + else + { + weight[(mode * frame_row + y) * weight_step + x] = alphaT; + + for (int i = 0; i < nmodes - 1; ++i) + weight[(i * frame_row + y) * weight_step + x] *= alpha1; + } + + mean[(mode * frame_row + y) * mean_step + x] = pix; + variance[(mode * frame_row + y) * var_step + x] = constants -> c_varInit; + + for (int i = nmodes - 1; i > 0; --i) + { + // check one up + if (alphaT < weight[((i - 1) * frame_row + y) * weight_step + x]) + break; + + swap(weight, x, y, i - 1, frame_row, weight_step); + swap(variance, x, y, i - 1, frame_row, var_step); + #if defined (CN1) + swap(mean, x, y, i - 1, frame_row, mean_step); + #else + swap4(mean, x, y, i - 1, frame_row, mean_step); + #endif + } + } + + modesUsed[y * modesUsed_step + x] = nmodes; + + bool isShadow = false; + if (detectShadows_flag && !background) + { + float tWeight = 0.0f; + + for (int mode = 0; mode < nmodes; ++mode) + { + T_MEAN_VAR _mean = mean[(mode * frame_row + y) * mean_step + x]; + + T_MEAN_VAR pix_mean = pix * _mean; + + float numerator = sum(pix_mean); + float denominator = sqr(_mean); + + if (denominator == 0) + break; + + if (numerator <= denominator && numerator >= constants -> c_tau * denominator) + { + float a = numerator / denominator; + + T_MEAN_VAR dD = a * _mean - pix; + + if (sqr(dD) < constants -> c_Tb * variance[(mode * frame_row + y) * var_step + x] * a * a) + { + isShadow = true; + break; + } + } + + tWeight += weight[(mode * frame_row + y) * weight_step + x]; + if (tWeight > constants -> c_TB) + break; + } + } + + fgmask[(y + fgmask_offset_y) * fgmask_step + x + fgmask_offset_x] = background ? 0 : isShadow ? constants -> c_shadowVal : 255; + } +} + +__kernel void getBackgroundImage2_kernel(__global uchar* modesUsed, __global float* weight, __global T_MEAN_VAR* mean, + __global T_FRAME* dst, float c_TB, int modesUsed_row, int modesUsed_col, int modesUsed_step, int weight_step, + int mean_step, int dst_step, int dst_x, int dst_y) +{ + int x = get_global_id(0); + int y = get_global_id(1); + + if(x < modesUsed_col && y < modesUsed_row) + { + int nmodes = modesUsed[y * modesUsed_step + x]; + + T_MEAN_VAR meanVal = (T_MEAN_VAR)F_ZERO; + + float totalWeight = 0.0f; + + for (int mode = 0; mode < nmodes; ++mode) + { + float _weight = weight[(mode * modesUsed_row + y) * weight_step + x]; + + T_MEAN_VAR _mean = mean[(mode * modesUsed_row + y) * mean_step + x]; + meanVal = meanVal + _weight * _mean; + + totalWeight += _weight; + + if(totalWeight > c_TB) + break; + } + + meanVal = meanVal * (1.f / totalWeight); + dst[(y + dst_y) * dst_step + x + dst_x] = CONVERT_TYPE(meanVal); + } +} diff --git a/modules/ocl/test/test_bgfg.cpp b/modules/ocl/test/test_bgfg.cpp new file mode 100644 index 000000000..f2bda321b --- /dev/null +++ b/modules/ocl/test/test_bgfg.cpp @@ -0,0 +1,232 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Jin Ma, jin@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +#ifdef HAVE_OPENCL + +using namespace cv; +using namespace cv::ocl; +using namespace cvtest; +using namespace testing; +using namespace std; + +extern string workdir; +////////////////////////////////////////////////////// +// MOG + +namespace +{ + IMPLEMENT_PARAM_CLASS(UseGray, bool) + IMPLEMENT_PARAM_CLASS(LearningRate, double) +} + +PARAM_TEST_CASE(mog, UseGray, LearningRate, bool) +{ + bool useGray; + double learningRate; + bool useRoi; + + virtual void SetUp() + { + useGray = GET_PARAM(0); + + learningRate = GET_PARAM(1); + + useRoi = GET_PARAM(2); + } +}; + +TEST_P(mog, Update) +{ + std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/768x576.avi"; + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::ocl::MOG mog; + cv::ocl::oclMat foreground = createMat_ocl(frame.size(), CV_8UC1, useRoi); + + cv::BackgroundSubtractorMOG mog_gold; + cv::Mat foreground_gold; + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + if (useGray) + { + cv::Mat temp; + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + cv::swap(temp, frame); + } + + mog(loadMat_ocl(frame, useRoi), foreground, (float)learningRate); + + mog_gold(frame, foreground_gold, learningRate); + + EXPECT_MAT_NEAR(foreground_gold, foreground, 0.0); + } +} +INSTANTIATE_TEST_CASE_P(OCL_Video, mog, testing::Combine( + testing::Values(UseGray(false), UseGray(true)), + testing::Values(LearningRate(0.0), LearningRate(0.01)), + Values(true, false))); + +////////////////////////////////////////////////////// +// MOG2 + +namespace +{ + IMPLEMENT_PARAM_CLASS(DetectShadow, bool) +} + +PARAM_TEST_CASE(mog2, UseGray, DetectShadow, bool) +{ + bool useGray; + bool detectShadow; + bool useRoi; + virtual void SetUp() + { + useGray = GET_PARAM(0); + detectShadow = GET_PARAM(1); + useRoi = GET_PARAM(2); + } +}; + +TEST_P(mog2, Update) +{ + std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/768x576.avi"; + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::ocl::MOG2 mog2; + mog2.bShadowDetection = detectShadow; + cv::ocl::oclMat foreground = createMat_ocl(frame.size(), CV_8UC1, useRoi); + + cv::BackgroundSubtractorMOG2 mog2_gold; + mog2_gold.set("detectShadows", detectShadow); + cv::Mat foreground_gold; + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + if (useGray) + { + cv::Mat temp; + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + cv::swap(temp, frame); + } + + mog2(loadMat_ocl(frame, useRoi), foreground); + + mog2_gold(frame, foreground_gold); + + if (detectShadow) + { + + EXPECT_MAT_SIMILAR(foreground_gold, foreground, 1e-2); + } + else + { + EXPECT_MAT_NEAR(foreground_gold, foreground, 0); + } + } +} + +TEST_P(mog2, getBackgroundImage) +{ + if (useGray) + return; + + std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/768x576.avi"; + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + + cv::ocl::MOG2 mog2; + mog2.bShadowDetection = detectShadow; + cv::ocl::oclMat foreground; + + cv::BackgroundSubtractorMOG2 mog2_gold; + mog2_gold.set("detectShadows", detectShadow); + cv::Mat foreground_gold; + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + mog2(loadMat_ocl(frame, useRoi), foreground); + + mog2_gold(frame, foreground_gold); + } + + cv::ocl::oclMat background = createMat_ocl(frame.size(), frame.type(), useRoi); + mog2.getBackgroundImage(background); + + cv::Mat background_gold; + mog2_gold.getBackgroundImage(background_gold); + + EXPECT_MAT_NEAR(background_gold, background, 1.0); +} + +INSTANTIATE_TEST_CASE_P(OCL_Video, mog2, testing::Combine( + testing::Values(UseGray(true), UseGray(false)), + testing::Values(DetectShadow(true), DetectShadow(false)), + Values(true, false))); + +#endif \ No newline at end of file diff --git a/modules/ocl/test/test_optflow.cpp b/modules/ocl/test/test_optflow.cpp index 4693d46dd..8fcc105a1 100644 --- a/modules/ocl/test/test_optflow.cpp +++ b/modules/ocl/test/test_optflow.cpp @@ -146,10 +146,10 @@ PARAM_TEST_CASE(TVL1, bool) TEST_P(TVL1, Accuracy) { - cv::Mat frame0 = readImage("gpu/opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE); + cv::Mat frame0 = readImage("F:/mcw/opencv/opencv/samples/gpu/rubberwhale1.png", cv::IMREAD_GRAYSCALE); ASSERT_FALSE(frame0.empty()); - cv::Mat frame1 = readImage("gpu/opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE); + cv::Mat frame1 = readImage("../../../opencv/samples/gpu/rubberwhale2.png", cv::IMREAD_GRAYSCALE); ASSERT_FALSE(frame1.empty()); cv::ocl::OpticalFlowDual_TVL1_OCL d_alg; @@ -168,7 +168,7 @@ TEST_P(TVL1, Accuracy) EXPECT_MAT_SIMILAR(gold[0], d_flowx, 3e-3); EXPECT_MAT_SIMILAR(gold[1], d_flowy, 3e-3); } -INSTANTIATE_TEST_CASE_P(OCL_Video, TVL1, Values(true, false)); +INSTANTIATE_TEST_CASE_P(OCL_Video, TVL1, Values(false, true)); ///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/modules/ocl/test/utility.cpp b/modules/ocl/test/utility.cpp index 440a89d4a..750c3c82b 100644 --- a/modules/ocl/test/utility.cpp +++ b/modules/ocl/test/utility.cpp @@ -100,6 +100,44 @@ Mat randomMat(Size size, int type, double minVal, double maxVal) return randomMat(TS::ptr()->get_rng(), size, type, minVal, maxVal, false); } +cv::ocl::oclMat createMat_ocl(Size size, int type, bool useRoi) +{ + Size size0 = size; + + if (useRoi) + { + size0.width += randomInt(5, 15); + size0.height += randomInt(5, 15); + } + + cv::ocl::oclMat d_m(size0, type); + + if (size0 != size) + d_m = d_m(Rect((size0.width - size.width) / 2, (size0.height - size.height) / 2, size.width, size.height)); + + return d_m; +} + +cv::ocl::oclMat loadMat_ocl(const Mat& m, bool useRoi) +{ + CV_Assert(m.type() == CV_8UC1 || m.type() == CV_8UC3); + cv::ocl::oclMat d_m; + d_m = createMat_ocl(m.size(), m.type(), useRoi); + + Size ls; + Point pt; + + d_m.locateROI(ls, pt); + + Rect roi(pt.x, pt.y, d_m.size().width, d_m.size().height); + + cv::ocl::oclMat m_ocl(m); + + cv::ocl::oclMat d_m_roi(d_m, roi); + + m_ocl.copyTo(d_m); + return d_m; +} /* void showDiff(InputArray gold_, InputArray actual_, double eps) { diff --git a/modules/ocl/test/utility.hpp b/modules/ocl/test/utility.hpp index 0b101ec50..1e17c6dbc 100644 --- a/modules/ocl/test/utility.hpp +++ b/modules/ocl/test/utility.hpp @@ -70,6 +70,9 @@ double checkNorm(const cv::Mat &m); double checkNorm(const cv::Mat &m1, const cv::Mat &m2); double checkSimilarity(const cv::Mat &m1, const cv::Mat &m2); +//oclMat create +cv::ocl::oclMat createMat_ocl(cv::Size size, int type, bool useRoi = false); +cv::ocl::oclMat loadMat_ocl(const cv::Mat& m, bool useRoi = false); #define EXPECT_MAT_NORM(mat, eps) \ { \ EXPECT_LE(checkNorm(cv::Mat(mat)), eps) \ diff --git a/samples/ocl/bgfg_segm.cpp b/samples/ocl/bgfg_segm.cpp new file mode 100644 index 000000000..410f34693 --- /dev/null +++ b/samples/ocl/bgfg_segm.cpp @@ -0,0 +1,135 @@ +#include +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/ocl/ocl.hpp" +#include "opencv2/highgui/highgui.hpp" + +using namespace std; +using namespace cv; +using namespace cv::ocl; + +#define M_MOG 1 +#define M_MOG2 2 + +int main(int argc, const char** argv) +{ + + cv::CommandLineParser cmd(argc, argv, + "{ c | camera | false | use camera }" + "{ f | file | 768x576.avi | input video file }" + "{ m | method | mog | method (mog, mog2) }" + "{ h | help | false | print help message }"); + + if (cmd.get("help")) + { + cout << "Usage : bgfg_segm [options]" << endl; + cout << "Avaible options:" << endl; + cmd.printParams(); + return 0; + } + + bool useCamera = cmd.get("camera"); + string file = cmd.get("file"); + string method = cmd.get("method"); + + if (method != "mog" && method != "mog2") + { + cerr << "Incorrect method" << endl; + return -1; + } + + int m = method == "mog" ? M_MOG : M_MOG2; + + VideoCapture cap; + + if (useCamera) + cap.open(0); + else + cap.open(file); + + if (!cap.isOpened()) + { + cerr << "can not open camera or video file" << endl; + return -1; + } + + std::vectorinfo; + cv::ocl::getDevice(info); + + Mat frame; + cap >> frame; + + oclMat d_frame(frame); + + cv::ocl::MOG mog; + cv::ocl::MOG2 mog2; + + oclMat d_fgmask; + oclMat d_fgimg; + oclMat d_bgimg; + + d_fgimg.create(d_frame.size(), d_frame.type()); + + Mat fgmask; + Mat fgimg; + Mat bgimg; + + switch (m) + { + case M_MOG: + mog(d_frame, d_fgmask, 0.01f); + break; + + case M_MOG2: + mog2(d_frame, d_fgmask); + break; + } + + for(;;) + { + cap >> frame; + if (frame.empty()) + break; + d_frame.upload(frame); + + int64 start = cv::getTickCount(); + + //update the model + switch (m) + { + case M_MOG: + mog(d_frame, d_fgmask, 0.01f); + mog.getBackgroundImage(d_bgimg); + break; + + case M_MOG2: + mog2(d_frame, d_fgmask); + mog2.getBackgroundImage(d_bgimg); + break; + } + + double fps = cv::getTickFrequency() / (cv::getTickCount() - start); + std::cout << "FPS : " << fps << std::endl; + + d_fgimg.setTo(Scalar::all(0)); + d_frame.copyTo(d_fgimg, d_fgmask); + + d_fgmask.download(fgmask); + d_fgimg.download(fgimg); + if (!d_bgimg.empty()) + d_bgimg.download(bgimg); + + imshow("image", frame); + imshow("foreground mask", fgmask); + imshow("foreground image", fgimg); + if (!bgimg.empty()) + imshow("mean background image", bgimg); + + int key = waitKey(30); + if (key == 27) + break; + } + + return 0; +} From 8feaadc69f5467201913d06eff678588034d9dce Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Fri, 16 Aug 2013 15:24:55 +0800 Subject: [PATCH 02/55] Resolved a compiling error under Linux. --- modules/ocl/src/bgfg_mog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ocl/src/bgfg_mog.cpp b/modules/ocl/src/bgfg_mog.cpp index 33bf143d2..0bdfe6f2f 100644 --- a/modules/ocl/src/bgfg_mog.cpp +++ b/modules/ocl/src/bgfg_mog.cpp @@ -69,7 +69,7 @@ namespace cv } } -#if _MSC_VER +#if defined _MSC_VER #define snprintf sprintf_s #endif From 916b92bc3b323a5e73aae18cef4a9aebfda11035 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Fri, 16 Aug 2013 15:33:43 +0800 Subject: [PATCH 03/55] Resolved a compiling warning under Windows. --- modules/ocl/perf/perf_bgfg.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp index e7aad759f..358b8ff38 100644 --- a/modules/ocl/perf/perf_bgfg.cpp +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -48,9 +48,9 @@ using namespace cv::ocl; void cvtFrameFmt(std::vector& input, std::vector& output, int output_cn) { - for(int i=0; i Date: Fri, 16 Aug 2013 16:07:53 +0800 Subject: [PATCH 04/55] Resolved compiling errors under Linux. --- modules/ocl/perf/perf_bgfg.cpp | 2 +- modules/ocl/test/test_bgfg.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp index 358b8ff38..a75d6b8b9 100644 --- a/modules/ocl/perf/perf_bgfg.cpp +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -46,7 +46,7 @@ using namespace cv; using namespace cv::ocl; -void cvtFrameFmt(std::vector& input, std::vector& output, int output_cn) +static void cvtFrameFmt(std::vector& input, std::vector& output, int output_cn) { for(int i = 0; i< (int)(input.size()); i++) { diff --git a/modules/ocl/test/test_bgfg.cpp b/modules/ocl/test/test_bgfg.cpp index f2bda321b..f5afd12ee 100644 --- a/modules/ocl/test/test_bgfg.cpp +++ b/modules/ocl/test/test_bgfg.cpp @@ -43,7 +43,7 @@ // //M*/ -#include "precomp.hpp" +#include "test_precomp.hpp" #ifdef HAVE_OPENCL From 1bcd1fd3a2db3c8741e63ff976aab17ac87b23d8 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Fri, 16 Aug 2013 17:39:17 +0800 Subject: [PATCH 05/55] Used perf_precomp.hpp instead of precomp.hpp for test. --- modules/ocl/perf/perf_bgfg.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp index a75d6b8b9..d507a3b5e 100644 --- a/modules/ocl/perf/perf_bgfg.cpp +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -42,7 +42,7 @@ // the use of this software, even if advised of the possibility of such damage. // //M*/ -#include "precomp.hpp" +#include "perf_precomp.hpp" using namespace cv; using namespace cv::ocl; From c1de14c27ac555fe6e60fff1f3fd307a26e79a24 Mon Sep 17 00:00:00 2001 From: kdrobnyh Date: Sat, 17 Aug 2013 20:31:03 +0400 Subject: [PATCH 06/55] Fix bilateralFilter function --- modules/imgproc/src/smooth.cpp | 68 ++++++++++++------- .../imgproc/test/test_bilateral_filter.cpp | 2 +- 2 files changed, 46 insertions(+), 24 deletions(-) diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 00be08618..3dad2c087 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -1875,6 +1875,41 @@ private: float *space_weight, *color_weight; }; +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +class IPPBilateralFilter_8u_Invoker : + public ParallelLoopBody +{ +public: + IPPBilateralFilter_8u_Invoker(Mat &_src, Mat &_dst, double _sigma_color, double _sigma_space, int _radius, bool *_ok) : + ParallelLoopBody(), src(_src), dst(_dst), sigma_color(_sigma_color), sigma_space(_sigma_space), radius(_radius), ok(_ok) + { + *ok = true; + } + + virtual void operator() (const Range& range) const + { + int d = radius * 2 + 1; + IppiSize kernel = {d, d}; + IppiSize roi={dst.cols, range.end - range.start}; + int bufsize=0; + ippiFilterBilateralGetBufSize_8u_C1R( ippiFilterBilateralGauss, roi, kernel, &bufsize); + AutoBuffer buf(bufsize); + IppiFilterBilateralSpec *pSpec = (IppiFilterBilateralSpec *)alignPtr(&buf[0], 32); + ippiFilterBilateralInit_8u_C1R( ippiFilterBilateralGauss, kernel, (Ipp32f)sigma_color, (Ipp32f)sigma_space, 1, pSpec ); + if( ippiFilterBilateral_8u_C1R( src.ptr(range.start) + radius * ((int)src.step[0] + 1), (int)src.step[0], dst.ptr(range.start), (int)dst.step[0], roi, kernel, pSpec ) < 0) + *ok = false; + } +private: + Mat &src; + Mat &dst; + double sigma_color; + double sigma_space; + int radius; + bool *ok; + const IPPBilateralFilter_8u_Invoker& operator= (const IPPBilateralFilter_8u_Invoker&); +}; +#endif + static void bilateralFilter_8u( const Mat& src, Mat& dst, int d, double sigma_color, double sigma_space, @@ -1904,32 +1939,19 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d, radius = MAX(radius, 1); d = radius*2 + 1; -#if 0 && defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7) - if(cn == 1) - { - IppiSize kernel = {d, d}; - IppiSize roi={src.cols, src.rows}; - int bufsize=0; - ippiFilterBilateralGetBufSize_8u_C1R( ippiFilterBilateralGauss, roi, kernel, &bufsize); - AutoBuffer buf(bufsize+128); - IppiFilterBilateralSpec *pSpec = (IppiFilterBilateralSpec *)alignPtr(&buf[0], 32); - ippiFilterBilateralInit_8u_C1R( ippiFilterBilateralGauss, kernel, sigma_color*sigma_color, sigma_space*sigma_space, 1, pSpec ); - Mat tsrc; - const Mat* psrc = &src; - if( src.data == dst.data ) - { - src.copyTo(tsrc); - psrc = &tsrc; - } - if( ippiFilterBilateral_8u_C1R(psrc->data, (int)psrc->step[0], - dst.data, (int)dst.step[0], - roi, kernel, pSpec) >= 0 ) - return; - } -#endif Mat temp; copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); +#if defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7) + if( cn == 1 ) + { + bool ok; + IPPBilateralFilter_8u_Invoker body(temp, dst, sigma_color * sigma_color, sigma_space * sigma_space, radius, &ok ); + parallel_for_(Range(0, dst.rows), body, dst.total()/(double)(1<<16)); + if( ok ) return; + } +#endif + vector _color_weight(cn*256); vector _space_weight(d*d); vector _space_ofs(d*d); diff --git a/modules/imgproc/test/test_bilateral_filter.cpp b/modules/imgproc/test/test_bilateral_filter.cpp index 2d45fdcf7..0bfc3dc4c 100644 --- a/modules/imgproc/test/test_bilateral_filter.cpp +++ b/modules/imgproc/test/test_bilateral_filter.cpp @@ -251,7 +251,7 @@ namespace cvtest int CV_BilateralFilterTest::validate_test_results(int test_case_index) { - static const double eps = 1; + static const double eps = 4; Mat reference_dst, reference_src; if (_src.depth() == CV_32F) From 1e8194fd3ccf37bb894f4af410f1fc62ff2c8b23 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Thu, 22 Aug 2013 15:42:07 +0800 Subject: [PATCH 07/55] Optimized mog and mog2, which have much better performance. --- modules/ocl/src/bgfg_mog.cpp | 15 ++- modules/ocl/src/opencl/bgfg_mog.cl | 207 ++++++++++++++--------------- modules/ocl/test/test_bgfg.cpp | 2 +- 3 files changed, 114 insertions(+), 110 deletions(-) diff --git a/modules/ocl/src/bgfg_mog.cpp b/modules/ocl/src/bgfg_mog.cpp index 0bdfe6f2f..c079c6b8f 100644 --- a/modules/ocl/src/bgfg_mog.cpp +++ b/modules/ocl/src/bgfg_mog.cpp @@ -254,7 +254,7 @@ static void mog_withoutLearning(const oclMat& frame, int cn, oclMat& fgmask, ocl } -static void mog_withLearning(const oclMat& frame, int cn, oclMat& fgmask, oclMat& weight, oclMat& sortKey, oclMat& mean, oclMat& var, +static void mog_withLearning(const oclMat& frame, int cn, oclMat& fgmask_raw, oclMat& weight, oclMat& sortKey, oclMat& mean, oclMat& var, int nmixtures, float varThreshold, float backgroundRatio, float learningRate, float minVar) { Context* clCxt = Context::getContext(); @@ -262,6 +262,8 @@ static void mog_withLearning(const oclMat& frame, int cn, oclMat& fgmask, oclMat size_t local_thread[] = {32, 8, 1}; size_t global_thread[] = {frame.cols, frame.rows, 1}; + oclMat fgmask(fgmask_raw.size(), CV_32SC1); + int frame_step = (int)(frame.step/frame.elemSize()); int fgmask_step = (int)(fgmask.step/fgmask.elemSize()); int weight_step = (int)(weight.step/weight.elemSize()); @@ -318,6 +320,8 @@ static void mog_withLearning(const oclMat& frame, int cn, oclMat& fgmask, oclMat args.push_back(make_pair(sizeof(cl_int), (void*)&frame_offset_y)); openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); + fgmask.convertTo(fgmask, CV_8U); + fgmask.copyTo(fgmask_raw); } void cv::ocl::device::mog::mog_ocl(const oclMat& frame, int cn, oclMat& fgmask, oclMat& weight, oclMat& sortKey, oclMat& mean, oclMat& var, @@ -392,9 +396,11 @@ void cv::ocl::device::mog::loadConstants(float Tb, float TB, float Tg, float var (void *)constants, sizeof(_contant_struct)); } -void cv::ocl::device::mog::mog2_ocl(const oclMat& frame, int cn, oclMat& fgmask, oclMat& modesUsed, oclMat& weight, oclMat& variance, +void cv::ocl::device::mog::mog2_ocl(const oclMat& frame, int cn, oclMat& fgmaskRaw, oclMat& modesUsed, oclMat& weight, oclMat& variance, oclMat& mean, float alphaT, float prune, bool detectShadows, int nmixtures) { + oclMat fgmask(fgmaskRaw.size(), CV_32SC1); + Context* clCxt = Context::getContext(); const float alpha1 = 1.0f - alphaT; @@ -464,6 +470,9 @@ void cv::ocl::device::mog::mog2_ocl(const oclMat& frame, int cn, oclMat& fgmask, args.push_back(make_pair(sizeof(cl_mem), (void*)&cl_constants)); openCLExecuteKernel(clCxt, &bgfg_mog, kernel_name, global_thread, local_thread, args, -1, -1, build_option); + + fgmask.convertTo(fgmask, CV_8U); + fgmask.copyTo(fgmaskRaw); } void cv::ocl::device::mog::getBackgroundImage2_ocl(int cn, const oclMat& modesUsed, const oclMat& weight, const oclMat& mean, oclMat& dst, int nmixtures) @@ -580,7 +589,7 @@ void cv::ocl::MOG2::initialize(cv::Size frameSize, int frameType) mean_.setTo(Scalar::all(0)); //make the array for keeping track of the used modes per pixel - all zeros at start - bgmodelUsedModes_.create(frameSize_, CV_8UC1); + bgmodelUsedModes_.create(frameSize_, CV_32FC1); bgmodelUsedModes_.setTo(cv::Scalar::all(0)); loadConstants(varThreshold, backgroundRatio, varThresholdGen, fVarInit, fVarMin, fVarMax, fTau, nShadowDetection); diff --git a/modules/ocl/src/opencl/bgfg_mog.cl b/modules/ocl/src/opencl/bgfg_mog.cl index 4ad6a52f7..77bdb9c2a 100644 --- a/modules/ocl/src/opencl/bgfg_mog.cl +++ b/modules/ocl/src/opencl/bgfg_mog.cl @@ -188,7 +188,7 @@ __kernel void mog_withoutLearning_kernel(__global T_FRAME* frame, __global uchar } } -__kernel void mog_withLearning_kernel(__global T_FRAME* frame, __global uchar* fgmask, +__kernel void mog_withLearning_kernel(__global T_FRAME* frame, __global int* fgmask, __global float* weight, __global float* sortKey, __global T_MEAN_VAR* mean, __global T_MEAN_VAR* var, int frame_row, int frame_col, int frame_step, int fgmask_step, int weight_step, int sortKey_step, int mean_step, int var_step, @@ -202,130 +202,125 @@ __kernel void mog_withLearning_kernel(__global T_FRAME* frame, __global uchar* f int x = get_global_id(0); int y = get_global_id(1); - if(x < frame_col && y < frame_row) + if(x >= frame_col || y >= frame_row) return; + float wsum = 0.0f; + int kHit = -1; + int kForeground = -1; + int k = 0; + + T_MEAN_VAR pix = cvt(frame[(y + frame_offset_y) * frame_step + (x + frame_offset_x)]); + + for (; k < (NMIXTURES); ++k) { + float w = weight[(k * frame_row + y) * weight_step + x]; + wsum += w; - float wsum = 0.0f; - int kHit = -1; - int kForeground = -1; - int k = 0; + if (w < 1.192092896e-07f) + break; - T_MEAN_VAR pix = cvt(frame[(y + frame_offset_y) * frame_step + (x + frame_offset_x)]); - - for (; k < (NMIXTURES); ++k) + T_MEAN_VAR mu = mean[(k * frame_row + y) * mean_step + x]; + T_MEAN_VAR _var = var[(k * frame_row + y) * var_step + x]; + + float sortKey_prev, weight_prev; + T_MEAN_VAR mean_prev, var_prev; + if (sqr(pix - mu) < varThreshold * sum(_var)) { - float w = weight[(k * frame_row + y) * weight_step + x]; - wsum += w; + wsum -= w; + float dw = learningRate * (1.0f - w); - if (w < 1.192092896e-07f) - break; + _var = clamp1(_var, learningRate, pix - mu, minVar); - T_MEAN_VAR mu = mean[(k * frame_row + y) * mean_step + x]; - T_MEAN_VAR _var = var[(k * frame_row + y) * var_step + x]; + sortKey_prev = w / sqr(sum(_var)); + sortKey[(k * frame_row + y) * sortKey_step + x] = sortKey_prev; - T_MEAN_VAR diff = pix - mu; + weight_prev = w + dw; + weight[(k * frame_row + y) * weight_step + x] = weight_prev; - if (sqr(diff) < varThreshold * sum(_var)) + mean_prev = mu + learningRate * (pix - mu); + mean[(k * frame_row + y) * mean_step + x] = mean_prev; + + var_prev = _var; + var[(k * frame_row + y) * var_step + x] = var_prev; + } + + int k1 = k - 1; + + if (k1 >= 0 && sqr(pix - mu) < varThreshold * sum(_var)) + { + float sortKey_next = sortKey[(k1 * frame_row + y) * sortKey_step + x]; + float weight_next = weight[(k1 * frame_row + y) * weight_step + x]; + T_MEAN_VAR mean_next = mean[(k1 * frame_row + y) * mean_step + x]; + T_MEAN_VAR var_next = var[(k1 * frame_row + y) * var_step + x]; + + for (; sortKey_next < sortKey_prev && k1 >= 0; --k1) { - wsum -= w; - float dw = learningRate * (1.0f - w); + sortKey[(k1 * frame_row + y) * sortKey_step + x] = sortKey_prev; + sortKey[((k1 + 1) * frame_row + y) * sortKey_step + x] = sortKey_next; - _var = clamp1(_var, learningRate, diff, minVar); + weight[(k1 * frame_row + y) * weight_step + x] = weight_prev; + weight[((k1 + 1) * frame_row + y) * weight_step + x] = weight_next; - float sortKey_prev = w / sqr(sum(_var)); - sortKey[(k * frame_row + y) * sortKey_step + x] = sortKey_prev; + mean[(k1 * frame_row + y) * mean_step + x] = mean_prev; + mean[((k1 + 1) * frame_row + y) * mean_step + x] = mean_next; - float weight_prev = w + dw; - weight[(k * frame_row + y) * weight_step + x] = weight_prev; + var[(k1 * frame_row + y) * var_step + x] = var_prev; + var[((k1 + 1) * frame_row + y) * var_step + x] = var_next; - T_MEAN_VAR mean_prev = mu + learningRate * diff; - mean[(k * frame_row + y) * mean_step + x] = mean_prev; + sortKey_prev = sortKey_next; + sortKey_next = k1 > 0 ? sortKey[((k1 - 1) * frame_row + y) * sortKey_step + x] : 0.0f; - T_MEAN_VAR var_prev = _var; - var[(k * frame_row + y) * var_step + x] = var_prev; + weight_prev = weight_next; + weight_next = k1 > 0 ? weight[((k1 - 1) * frame_row + y) * weight_step + x] : 0.0f; - int k1 = k - 1; + mean_prev = mean_next; + mean_next = k1 > 0 ? mean[((k1 - 1) * frame_row + y) * mean_step + x] : (T_MEAN_VAR)F_ZERO; - if (k1 >= 0) - { - float sortKey_next = sortKey[(k1 * frame_row + y) * sortKey_step + x]; - float weight_next = weight[(k1 * frame_row + y) * weight_step + x]; - T_MEAN_VAR mean_next = mean[(k1 * frame_row + y) * mean_step + x]; - T_MEAN_VAR var_next = var[(k1 * frame_row + y) * var_step + x]; - - for (; sortKey_next < sortKey_prev && k1 >= 0; --k1) - { - sortKey[(k1 * frame_row + y) * sortKey_step + x] = sortKey_prev; - sortKey[((k1 + 1) * frame_row + y) * sortKey_step + x] = sortKey_next; - - weight[(k1 * frame_row + y) * weight_step + x] = weight_prev; - weight[((k1 + 1) * frame_row + y) * weight_step + x] = weight_next; - - mean[(k1 * frame_row + y) * mean_step + x] = mean_prev; - mean[((k1 + 1) * frame_row + y) * mean_step + x] = mean_next; - - var[(k1 * frame_row + y) * var_step + x] = var_prev; - var[((k1 + 1) * frame_row + y) * var_step + x] = var_next; - - sortKey_prev = sortKey_next; - sortKey_next = k1 > 0 ? sortKey[((k1 - 1) * frame_row + y) * sortKey_step + x] : 0.0f; - - weight_prev = weight_next; - weight_next = k1 > 0 ? weight[((k1 - 1) * frame_row + y) * weight_step + x] : 0.0f; - - mean_prev = mean_next; - mean_next = k1 > 0 ? mean[((k1 - 1) * frame_row + y) * mean_step + x] : (T_MEAN_VAR)F_ZERO; - - var_prev = var_next; - var_next = k1 > 0 ? var[((k1 - 1) * frame_row + y) * var_step + x] : (T_MEAN_VAR)F_ZERO; - } - } - - kHit = k1 + 1; - break; + var_prev = var_next; + var_next = k1 > 0 ? var[((k1 - 1) * frame_row + y) * var_step + x] : (T_MEAN_VAR)F_ZERO; } } - if (kHit < 0) - { - kHit = k = k < ((NMIXTURES) - 1) ? k : ((NMIXTURES) - 1); - wsum += w0 - weight[(k * frame_row + y) * weight_step + x]; - - weight[(k * frame_row + y) * weight_step + x] = w0; - mean[(k * frame_row + y) * mean_step + x] = pix; - #if defined (CN1) - var[(k * frame_row + y) * var_step + x] = (T_MEAN_VAR)(var0); - #else - var[(k * frame_row + y) * var_step + x] = (T_MEAN_VAR)(var0, var0, var0, var0); - #endif - sortKey[(k * frame_row + y) * sortKey_step + x] = sk0; - } - else - { - for( ; k < (NMIXTURES); k++) - wsum += weight[(k * frame_row + y) * weight_step + x]; - } - - float wscale = 1.0f / wsum; - wsum = 0; - for (k = 0; k < (NMIXTURES); ++k) - { - float w = weight[(k * frame_row + y) * weight_step + x]; - wsum += w *= wscale; - - weight[(k * frame_row + y) * weight_step + x] = w; - sortKey[(k * frame_row + y) * sortKey_step + x] *= wscale; - - if (wsum > backgroundRatio && kForeground < 0) - kForeground = k + 1; - } - if(kHit >= kForeground) - fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar)(-1); - else - fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar)(0); + kHit = k1 + 1; + break; } + + if (kHit < 0) + { + kHit = k = k < ((NMIXTURES) - 1) ? k : ((NMIXTURES) - 1); + wsum += w0 - weight[(k * frame_row + y) * weight_step + x]; + + weight[(k * frame_row + y) * weight_step + x] = w0; + mean[(k * frame_row + y) * mean_step + x] = pix; +#if defined (CN1) + var[(k * frame_row + y) * var_step + x] = (T_MEAN_VAR)(var0); +#else + var[(k * frame_row + y) * var_step + x] = (T_MEAN_VAR)(var0, var0, var0, var0); +#endif + sortKey[(k * frame_row + y) * sortKey_step + x] = sk0; + } + else + { + for( ; k < (NMIXTURES); k++) + wsum += weight[(k * frame_row + y) * weight_step + x]; + } + + float wscale = 1.0f / wsum; + wsum = 0; + for (k = 0; k < (NMIXTURES); ++k) + { + float w = weight[(k * frame_row + y) * weight_step + x]; + w *= wscale; + wsum += w; + + weight[(k * frame_row + y) * weight_step + x] = w; + sortKey[(k * frame_row + y) * sortKey_step + x] *= wscale; + + kForeground = select(kForeground, k + 1, wsum > backgroundRatio && kForeground < 0); + } + fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar)(-(kHit >= kForeground)); } + __kernel void getBackgroundImage_kernel(__global float* weight, __global T_MEAN_VAR* mean, __global T_FRAME* dst, int dst_row, int dst_col, int weight_step, int mean_step, int dst_step, float backgroundRatio) @@ -355,8 +350,8 @@ __kernel void getBackgroundImage_kernel(__global float* weight, __global T_MEAN_ } } -__kernel void mog2_kernel(__global T_FRAME * frame, __global uchar* fgmask, __global float* weight, __global T_MEAN_VAR * mean, - __global uchar* modesUsed, __global float* variance, int frame_row, int frame_col, int frame_step, +__kernel void mog2_kernel(__global T_FRAME * frame, __global int* fgmask, __global float* weight, __global T_MEAN_VAR * mean, + __global int* modesUsed, __global float* variance, int frame_row, int frame_col, int frame_step, int fgmask_step, int weight_step, int mean_step, int modesUsed_step, int var_step, float alphaT, float alpha1, float prune, int detectShadows_flag, int fgmask_offset_x, int fgmask_offset_y, int frame_offset_x, int frame_offset_y, __constant con_srtuct_t* constants) { @@ -509,7 +504,7 @@ __kernel void mog2_kernel(__global T_FRAME * frame, __global uchar* fgmask, __gl } } -__kernel void getBackgroundImage2_kernel(__global uchar* modesUsed, __global float* weight, __global T_MEAN_VAR* mean, +__kernel void getBackgroundImage2_kernel(__global int* modesUsed, __global float* weight, __global T_MEAN_VAR* mean, __global T_FRAME* dst, float c_TB, int modesUsed_row, int modesUsed_col, int modesUsed_step, int weight_step, int mean_step, int dst_step, int dst_x, int dst_y) { diff --git a/modules/ocl/test/test_bgfg.cpp b/modules/ocl/test/test_bgfg.cpp index f5afd12ee..e35f26e3b 100644 --- a/modules/ocl/test/test_bgfg.cpp +++ b/modules/ocl/test/test_bgfg.cpp @@ -191,7 +191,7 @@ TEST_P(mog2, getBackgroundImage) if (useGray) return; - std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "gpu/768x576.avi"; + std::string inputFile = string(cvtest::TS::ptr()->get_data_path()) + "video/768x576.avi"; cv::VideoCapture cap(inputFile); ASSERT_TRUE(cap.isOpened()); From 26b5eb3e3990b31be90ff3ca0afe18eaf68768da Mon Sep 17 00:00:00 2001 From: yao Date: Thu, 29 Aug 2013 10:48:15 +0800 Subject: [PATCH 08/55] add adaptive bilateral filter (cpp and ocl version) --- modules/imgproc/doc/filtering.rst | 22 + .../include/opencv2/imgproc/imgproc.hpp | 4 + modules/imgproc/src/smooth.cpp | 230 ++++++++++ modules/ocl/doc/image_filtering.rst | 2 +- modules/ocl/include/opencv2/ocl/ocl.hpp | 10 +- modules/ocl/perf/perf_filters.cpp | 79 ++++ modules/ocl/src/filtering.cpp | 98 ++++ .../opencl/filtering_adaptive_bilateral.cl | 424 ++++++++++++++++++ modules/ocl/test/test_filters.cpp | 76 ++++ modules/ocl/test/test_imgproc.cpp | 65 --- samples/ocl/adaptive_bilateral_filter.cpp | 51 +++ 11 files changed, 994 insertions(+), 67 deletions(-) create mode 100644 modules/ocl/src/opencl/filtering_adaptive_bilateral.cl create mode 100644 samples/ocl/adaptive_bilateral_filter.cpp diff --git a/modules/imgproc/doc/filtering.rst b/modules/imgproc/doc/filtering.rst index 3d230d1ca..1816c6a43 100755 --- a/modules/imgproc/doc/filtering.rst +++ b/modules/imgproc/doc/filtering.rst @@ -412,6 +412,28 @@ http://www.dai.ed.ac.uk/CVonline/LOCAL\_COPIES/MANDUCHI1/Bilateral\_Filtering.ht This filter does not work inplace. +adaptiveBilateralFilter +----------------------- +Applies the adaptive bilateral filter to an image. + +.. ocv:function:: void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize, double sigmaSpace, Point anchor=Point(-1, -1), int borderType=BORDER_DEFAULT ) + +.. ocv:pyfunction:: cv2.adaptiveBilateralFilter(src, ksize, sigmaSpace[, dst[, anchor[, borderType]]]) -> dst + + :param src: Source 8-bit, 1-channel or 3-channel image. + + :param dst: Destination image of the same size and type as ``src`` . + + :param ksize: filter kernel size. + + :param sigmaSpace: Filter sigma in the coordinate space. It has similar meaning with ``sigmaSpace`` in ``bilateralFilter``. + + :param anchor: anchor point; default value ``Point(-1,-1)`` means that the anchor is at the kernel center. Only default value is supported now. + + :param borderType: border mode used to extrapolate pixels outside of the image. + +The function applies adaptive bilateral filtering to the input image. This filter is similar to ``bilateralFilter``, in that dissimilarity from and distance to the center pixel is punished. Instead of using ``sigmaColor``, we employ the variance of pixel values in the neighbourhood. + blur diff --git a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp index f51bbaab7..1981a61d9 100644 --- a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp @@ -398,6 +398,10 @@ CV_EXPORTS_W void GaussianBlur( InputArray src, CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT ); +//! smooths the image using adaptive bilateral filter +CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize, + double sigmaSpace, Point anchor=Point(-1, -1), + int borderType=BORDER_DEFAULT ); //! smooths the image using the box filter. Each pixel is processed in O(1) time CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, Size ksize, Point anchor=Point(-1,-1), diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 00be08618..e38487aa5 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -2250,6 +2250,236 @@ void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d, "Bilateral filtering is only implemented for 8u and 32f images" ); } + +/****************************************************************************************\ + Adaptive Bilateral Filtering +\****************************************************************************************/ + +namespace cv +{ +#define CALCVAR 1 +#define FIXED_WEIGHT 0 + +class adaptiveBilateralFilter_8u_Invoker : + public ParallelLoopBody +{ +public: + adaptiveBilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, Size _ksize, double _sigma_space, Point _anchor) : + temp(&_temp), dest(&_dest), ksize(_ksize), sigma_space(_sigma_space), anchor(_anchor) + { + if( sigma_space <= 0 ) + sigma_space = 1; + CV_Assert((ksize.width & 1) && (ksize.height & 1)); + space_weight.resize(ksize.width * ksize.height); + double sigma2 = sigma_space * sigma_space; + int idx = 0; + int w = ksize.width / 2; + int h = ksize.height / 2; + for(int y=-h; y<=h; y++) + for(int x=-w; x<=w; x++) + { + space_weight[idx++] = (float)(sigma2 / (sigma2 + x * x + y * y)); + } + } + virtual void operator()(const Range& range) const + { + int cn = dest->channels(); + int anX = anchor.x; + + const uchar *tptr; + + for(int i = range.start;i < range.end; i++) + { + int startY = i; + if(cn == 1) + { + float var; + int currVal; + int sumVal = 0; + int sumValSqr = 0; + int currValCenter; + int currWRTCenter; + float weight; + float totalWeight = 0.; + float tmpSum = 0.; + + for(int j = 0;j < dest->cols *cn; j+=cn) + { + sumVal = 0; + sumValSqr= 0; + totalWeight = 0.; + tmpSum = 0.; + + // Top row: don't sum the very last element + int startLMJ = 0; + int endLMJ = ksize.width - 1; + int howManyAll = (anX *2 +1)*(ksize.width ); +#if CALCVAR + for(int x = startLMJ; x< endLMJ; x++) + { + tptr = temp->ptr(startY + x) +j; + for(int y=-anX; y<=anX; y++) + { + currVal = tptr[cn*(y+anX)]; + sumVal += currVal; + sumValSqr += (currVal *currVal); + } + } + var = ( (sumValSqr * howManyAll)- sumVal * sumVal ) / ( (float)(howManyAll*howManyAll)); +#else + var = 900.0; +#endif + startLMJ = 0; + endLMJ = ksize.width; + tptr = temp->ptr(startY + (startLMJ+ endLMJ)/2); + currValCenter =tptr[j+cn*anX]; + for(int x = startLMJ; x< endLMJ; x++) + { + tptr = temp->ptr(startY + x) +j; + for(int y=-anX; y<=anX; y++) + { +#if FIXED_WEIGHT + weight = 1.0; +#else + currVal = tptr[cn*(y+anX)]; + currWRTCenter = currVal - currValCenter; + + weight = var / ( var + (currWRTCenter * currWRTCenter) ) * space_weight[x*ksize.width+y+anX];; +#endif + tmpSum += ((float)tptr[cn*(y+anX)] * weight); + totalWeight += weight; + } + } + tmpSum /= totalWeight; + + dest->at(startY ,j)= static_cast(tmpSum); + } + } + else + { + assert(cn == 3); + float var_b, var_g, var_r; + int currVal_b, currVal_g, currVal_r; + int sumVal_b= 0, sumVal_g= 0, sumVal_r= 0; + int sumValSqr_b= 0, sumValSqr_g= 0, sumValSqr_r= 0; + int currValCenter_b= 0, currValCenter_g= 0, currValCenter_r= 0; + int currWRTCenter_b, currWRTCenter_g, currWRTCenter_r; + float weight_b, weight_g, weight_r; + float totalWeight_b= 0., totalWeight_g= 0., totalWeight_r= 0.; + float tmpSum_b = 0., tmpSum_g= 0., tmpSum_r = 0.; + + for(int j = 0;j < dest->cols *cn; j+=cn) + { + sumVal_b= 0, sumVal_g= 0, sumVal_r= 0; + sumValSqr_b= 0, sumValSqr_g= 0, sumValSqr_r= 0; + totalWeight_b= 0., totalWeight_g= 0., totalWeight_r= 0.; + tmpSum_b = 0., tmpSum_g= 0., tmpSum_r = 0.; + + // Top row: don't sum the very last element + int startLMJ = 0; + int endLMJ = ksize.width - 1; + int howManyAll = (anX *2 +1)*(ksize.width); +#if CALCVAR + for(int x = startLMJ; x< endLMJ; x++) + { + tptr = temp->ptr(startY + x) +j; + for(int y=-anX; y<=anX; y++) + { + currVal_b = tptr[cn*(y+anX)], currVal_g = tptr[cn*(y+anX)+1], currVal_r =tptr[cn*(y+anX)+2]; + sumVal_b += currVal_b; + sumVal_g += currVal_g; + sumVal_r += currVal_r; + sumValSqr_b += (currVal_b *currVal_b); + sumValSqr_g += (currVal_g *currVal_g); + sumValSqr_r += (currVal_r *currVal_r); + } + } + var_b = ( (sumValSqr_b * howManyAll)- sumVal_b * sumVal_b ) / ( (float)(howManyAll*howManyAll)); + var_g = ( (sumValSqr_g * howManyAll)- sumVal_g * sumVal_g ) / ( (float)(howManyAll*howManyAll)); + var_r = ( (sumValSqr_r * howManyAll)- sumVal_r * sumVal_r ) / ( (float)(howManyAll*howManyAll)); +#else + var_b = 900.0; var_g = 900.0;var_r = 900.0; +#endif + startLMJ = 0; + endLMJ = ksize.width; + tptr = temp->ptr(startY + (startLMJ+ endLMJ)/2) + j; + currValCenter_b =tptr[cn*anX], currValCenter_g =tptr[cn*anX+1], currValCenter_r =tptr[cn*anX+2]; + for(int x = startLMJ; x< endLMJ; x++) + { + tptr = temp->ptr(startY + x) +j; + for(int y=-anX; y<=anX; y++) + { +#if FIXED_WEIGHT + weight_b = 1.0; + weight_g = 1.0; + weight_r = 1.0; +#else + currVal_b = tptr[cn*(y+anX)];currVal_g=tptr[cn*(y+anX)+1];currVal_r=tptr[cn*(y+anX)+2]; + currWRTCenter_b = currVal_b - currValCenter_b; + currWRTCenter_g = currVal_g - currValCenter_g; + currWRTCenter_r = currVal_r - currValCenter_r; + + float cur_spw = space_weight[x*ksize.width+y+anX]; + weight_b = var_b / ( var_b + (currWRTCenter_b * currWRTCenter_b) ) * cur_spw; + weight_g = var_g / ( var_g + (currWRTCenter_g * currWRTCenter_g) ) * cur_spw; + weight_r = var_r / ( var_r + (currWRTCenter_r * currWRTCenter_r) ) * cur_spw; +#endif + tmpSum_b += ((float)tptr[cn*(y+anX)] * weight_b); + tmpSum_g += ((float)tptr[cn*(y+anX)+1] * weight_g); + tmpSum_r += ((float)tptr[cn*(y+anX)+2] * weight_r); + totalWeight_b += weight_b, totalWeight_g += weight_g, totalWeight_r += weight_r; + } + } + tmpSum_b /= totalWeight_b; + tmpSum_g /= totalWeight_g; + tmpSum_r /= totalWeight_r; + + dest->at(startY,j )= static_cast(tmpSum_b); + dest->at(startY,j+1)= static_cast(tmpSum_g); + dest->at(startY,j+2)= static_cast(tmpSum_r); + } + } + } + } +private: + const Mat *temp; + Mat *dest; + Size ksize; + double sigma_space; + Point anchor; + vector space_weight; +}; +static void adaptiveBilateralFilter_8u( const Mat& src, Mat& dst, Size ksize, double sigmaSpace, Point anchor, int borderType ) +{ + Size size = src.size(); + + CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && + src.type() == dst.type() && src.size() == dst.size() && + src.data != dst.data ); + Mat temp; + copyMakeBorder(src, temp, anchor.x, anchor.y, anchor.x, anchor.y, borderType); + + adaptiveBilateralFilter_8u_Invoker body(dst, temp, ksize, sigmaSpace, anchor); + parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16)); +} +} +void cv::adaptiveBilateralFilter( InputArray _src, OutputArray _dst, Size ksize, + double sigmaSpace, Point anchor, int borderType ) +{ + Mat src = _src.getMat(); + _dst.create(src.size(), src.type()); + Mat dst = _dst.getMat(); + + CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3); + + anchor = normalizeAnchor(anchor,ksize); + if( src.depth() == CV_8U ) + adaptiveBilateralFilter_8u( src, dst, ksize, sigmaSpace, anchor, borderType ); + else + CV_Error( CV_StsUnsupportedFormat, + "Adaptive Bilateral filtering is only implemented for 8u images" ); +} + ////////////////////////////////////////////////////////////////////////////////////////// CV_IMPL void diff --git a/modules/ocl/doc/image_filtering.rst b/modules/ocl/doc/image_filtering.rst index ce89e85de..1f90eedda 100644 --- a/modules/ocl/doc/image_filtering.rst +++ b/modules/ocl/doc/image_filtering.rst @@ -127,7 +127,7 @@ ocl::bilateralFilter -------------------- Returns void -.. ocv:function:: void ocl::bilateralFilter(const oclMat &src, oclMat &dst, int d, double sigmaColor, double sigmaSpave, int borderType=BORDER_DEFAULT) +.. ocv:function:: void ocl::bilateralFilter(const oclMat &src, oclMat &dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT) :param src: The source image diff --git a/modules/ocl/include/opencv2/ocl/ocl.hpp b/modules/ocl/include/opencv2/ocl/ocl.hpp index 5b3642d03..f2b858caa 100644 --- a/modules/ocl/include/opencv2/ocl/ocl.hpp +++ b/modules/ocl/include/opencv2/ocl/ocl.hpp @@ -520,7 +520,15 @@ namespace cv //! bilateralFilter // supports 8UC1 8UC4 - CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpave, int borderType=BORDER_DEFAULT); + CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT); + + //! Applies an adaptive bilateral filter to the input image + // This is not truly a bilateral filter. Instead of using user provided fixed parameters, + // the function calculates a constant at each window based on local standard deviation, + // and use this constant to do filtering. + // supports 8UC1 8UC3 + CV_EXPORTS void adaptiveBilateralFilter(const oclMat& src, oclMat& dst, Size ksize, double sigmaSpace, Point anchor = Point(-1, -1), int borderType=BORDER_DEFAULT); + //! computes exponent of each matrix element (b = e**a) // supports only CV_32FC1 type CV_EXPORTS void exp(const oclMat &a, oclMat &b); diff --git a/modules/ocl/perf/perf_filters.cpp b/modules/ocl/perf/perf_filters.cpp index 28c290096..5f510d63f 100644 --- a/modules/ocl/perf/perf_filters.cpp +++ b/modules/ocl/perf/perf_filters.cpp @@ -321,3 +321,82 @@ PERF_TEST_P(filter2DFixture, filter2D, else OCL_PERF_ELSE } + +///////////// Bilateral//////////////////////// + +typedef Size_MatType BilateralFixture; + +PERF_TEST_P(BilateralFixture, Bilateral, + ::testing::Combine(OCL_TYPICAL_MAT_SIZES, + OCL_PERF_ENUM(CV_8UC1, CV_8UC3))) +{ + const Size_MatType_t params = GetParam(); + const Size srcSize = get<0>(params); + const int type = get<1>(params), d = 7; + double sigmacolor = 50.0, sigmaspace = 50.0; + + Mat src(srcSize, type), dst(srcSize, type); + declare.in(src, WARMUP_RNG).out(dst); + + if (srcSize == OCL_SIZE_4000 && type == CV_8UC3) + declare.time(8); + + if (RUN_OCL_IMPL) + { + ocl::oclMat oclSrc(src), oclDst(srcSize, type); + + OCL_TEST_CYCLE() cv::ocl::bilateralFilter(oclSrc, oclDst, d, sigmacolor, sigmaspace); + + oclDst.download(dst); + + SANITY_CHECK(dst); + } + else if (RUN_PLAIN_IMPL) + { + TEST_CYCLE() cv::bilateralFilter(src, dst, d, sigmacolor, sigmaspace); + + SANITY_CHECK(dst); + } + else + OCL_PERF_ELSE +} + +///////////// adaptiveBilateral//////////////////////// + +typedef Size_MatType adaptiveBilateralFixture; + +PERF_TEST_P(adaptiveBilateralFixture, adaptiveBilateral, + ::testing::Combine(OCL_TYPICAL_MAT_SIZES, + OCL_PERF_ENUM(CV_8UC1, CV_8UC3))) +{ + const Size_MatType_t params = GetParam(); + const Size srcSize = get<0>(params); + const int type = get<1>(params); + double sigmaspace = 10.0; + Size ksize(9,9); + + Mat src(srcSize, type), dst(srcSize, type); + declare.in(src, WARMUP_RNG).out(dst); + + if (srcSize == OCL_SIZE_4000) + declare.time(15); + + if (RUN_OCL_IMPL) + { + ocl::oclMat oclSrc(src), oclDst(srcSize, type); + + OCL_TEST_CYCLE() cv::ocl::adaptiveBilateralFilter(oclSrc, oclDst, ksize, sigmaspace); + + oclDst.download(dst); + + SANITY_CHECK(dst, 1.); + } + else if (RUN_PLAIN_IMPL) + { + TEST_CYCLE() cv::adaptiveBilateralFilter(src, dst, ksize, sigmaspace); + + SANITY_CHECK(dst); + } + else + OCL_PERF_ELSE +} diff --git a/modules/ocl/src/filtering.cpp b/modules/ocl/src/filtering.cpp index a08f0ed2b..c0557980b 100644 --- a/modules/ocl/src/filtering.cpp +++ b/modules/ocl/src/filtering.cpp @@ -64,6 +64,7 @@ extern const char *filter_sep_row; extern const char *filter_sep_col; extern const char *filtering_laplacian; extern const char *filtering_morph; +extern const char *filtering_adaptive_bilateral; } } @@ -1616,3 +1617,100 @@ void cv::ocl::GaussianBlur(const oclMat &src, oclMat &dst, Size ksize, double si Ptr f = createGaussianFilter_GPU(src.type(), ksize, sigma1, sigma2, bordertype); f->apply(src, dst); } + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Adaptive Bilateral Filter + +void cv::ocl::adaptiveBilateralFilter(const oclMat& src, oclMat& dst, Size ksize, double sigmaSpace, Point anchor, int borderType) +{ + CV_Assert((ksize.width & 1) && (ksize.height & 1)); // ksize must be odd + CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3); // source must be 8bit RGB image + if( sigmaSpace <= 0 ) + sigmaSpace = 1; + Mat lut(Size(ksize.width, ksize.height), CV_32FC1); + double sigma2 = sigmaSpace * sigmaSpace; + int idx = 0; + int w = ksize.width / 2; + int h = ksize.height / 2; + for(int y=-h; y<=h; y++) + for(int x=-w; x<=w; x++) + { + lut.at(idx++) = sigma2 / (sigma2 + x * x + y * y); + } + oclMat dlut(lut); + int depth = src.depth(); + int cn = src.oclchannels(); + + normalizeAnchor(anchor, ksize); + const static String kernelName = "edgeEnhancingFilter"; + + dst.create(src.size(), src.type()); + + char btype[30]; + switch(borderType) + { + case BORDER_CONSTANT: + sprintf(btype, "BORDER_CONSTANT"); + break; + case BORDER_REPLICATE: + sprintf(btype, "BORDER_REPLICATE"); + break; + case BORDER_REFLECT: + sprintf(btype, "BORDER_REFLECT"); + break; + case BORDER_WRAP: + sprintf(btype, "BORDER_WRAP"); + break; + case BORDER_REFLECT101: + sprintf(btype, "BORDER_REFLECT_101"); + break; + default: + CV_Error(CV_StsBadArg, "This border type is not supported"); + break; + } + + //the following constants may be adjusted for performance concerns + const static size_t blockSizeX = 64, blockSizeY = 1, EXTRA = ksize.height - 1; + + //Normalize the result by default + const float alpha = ksize.height * ksize.width; + + const size_t gSize = blockSizeX - ksize.width / 2 * 2; + const size_t globalSizeX = (src.cols) % gSize == 0 ? + src.cols / gSize * blockSizeX : + (src.cols / gSize + 1) * blockSizeX; + const size_t rows_per_thread = 1 + EXTRA; + const size_t globalSizeY = ((src.rows + rows_per_thread - 1) / rows_per_thread) % blockSizeY == 0 ? + ((src.rows + rows_per_thread - 1) / rows_per_thread) : + (((src.rows + rows_per_thread - 1) / rows_per_thread) / blockSizeY + 1) * blockSizeY; + + size_t globalThreads[3] = { globalSizeX, globalSizeY, 1}; + size_t localThreads[3] = { blockSizeX, blockSizeY, 1}; + + char build_options[250]; + + //LDATATYPESIZE is sizeof local data store. This is to exemplify effect of LDS on kernel performance + sprintf(build_options, + "-D VAR_PER_CHANNEL=1 -D CALCVAR=1 -D FIXED_WEIGHT=0 -D EXTRA=%d" + " -D THREADS=%d -D anX=%d -D anY=%d -D ksX=%d -D ksY=%d -D %s", + static_cast(EXTRA), static_cast(blockSizeX), anchor.x, anchor.y, ksize.width, ksize.height, btype); + + std::vector > args; + args.push_back(std::make_pair(sizeof(cl_mem), &src.data)); + args.push_back(std::make_pair(sizeof(cl_mem), &dst.data)); + args.push_back(std::make_pair(sizeof(cl_float), (void *)&alpha)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.offset)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholerows)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholecols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.step)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.offset)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.rows)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.cols)); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&dst.step)); + args.push_back(std::make_pair(sizeof(cl_mem), &dlut.data)); + int lut_step = dlut.step1(); + args.push_back(std::make_pair(sizeof(cl_int), (void *)&lut_step)); + + openCLExecuteKernel(Context::getContext(), &filtering_adaptive_bilateral, kernelName, + globalThreads, localThreads, args, cn, depth, build_options); +} \ No newline at end of file diff --git a/modules/ocl/src/opencl/filtering_adaptive_bilateral.cl b/modules/ocl/src/opencl/filtering_adaptive_bilateral.cl new file mode 100644 index 000000000..a8e0fd17e --- /dev/null +++ b/modules/ocl/src/opencl/filtering_adaptive_bilateral.cl @@ -0,0 +1,424 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Harris Gasparakis, harris.gasparakis@amd.com +// Xiaopeng Fu, fuxiaopeng2222@163.com +// Yao Wang, bitwangyaoyao@gmail.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifdef BORDER_REPLICATE +//BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh +#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i)) +#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) :(i)) +#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (b_edge)-1 :(addr)) +#endif + +#ifdef BORDER_REFLECT +//BORDER_REFLECT: fedcba|abcdefgh|hgfedcb +#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i)) +#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i)-1 : (i)) +#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-1+((b_edge)<<1) : (addr)) +#endif + +#ifdef BORDER_REFLECT_101 +//BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba +#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i)) +#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i) : (i)) +#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-2+((b_edge)<<1) : (addr)) +#endif + +//blur function does not support BORDER_WRAP +#ifdef BORDER_WRAP +//BORDER_WRAP: cdefgh|abcdefgh|abcdefg +#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i)) +#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr)) +#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (i)+(b_edge) : (i)) +#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (i)-(b_edge) : (addr)) +#endif + +__kernel void +edgeEnhancingFilter_C4_D0( + __global const uchar4 * restrict src, + __global uchar4 *dst, + float alpha, + int src_offset, + int src_whole_rows, + int src_whole_cols, + int src_step, + int dst_offset, + int dst_rows, + int dst_cols, + int dst_step, + __global const float* lut, + int lut_step) +{ + int col = get_local_id(0); + const int gX = get_group_id(0); + const int gY = get_group_id(1); + + int src_x_off = (src_offset % src_step) >> 2; + int src_y_off = src_offset / src_step; + int dst_x_off = (dst_offset % dst_step) >> 2; + int dst_y_off = dst_offset / dst_step; + + int startX = gX * (THREADS-ksX+1) - anX + src_x_off; + int startY = (gY * (1+EXTRA)) - anY + src_y_off; + + int dst_startX = gX * (THREADS-ksX+1) + dst_x_off; + int dst_startY = (gY * (1+EXTRA)) + dst_y_off; + + int posX = dst_startX - dst_x_off + col; + int posY = (gY * (1+EXTRA)) ; + + __local uchar4 data[ksY+EXTRA][THREADS]; + + float4 tmp_sum[1+EXTRA]; + for(int tmpint = 0; tmpint < 1+EXTRA; tmpint++) + { + tmp_sum[tmpint] = (float4)(0,0,0,0); + } + +#ifdef BORDER_CONSTANT + bool con; + uchar4 ss; + for(int j = 0; j < ksY+EXTRA; j++) + { + con = (startX+col >= 0 && startX+col < src_whole_cols && startY+j >= 0 && startY+j < src_whole_rows); + + int cur_col = clamp(startX + col, 0, src_whole_cols); + if(con) + { + ss = src[(startY+j)*(src_step>>2) + cur_col]; + } + + data[j][col] = con ? ss : (uchar4)0; + } +#else + for(int j= 0; j < ksY+EXTRA; j++) + { + int selected_row; + int selected_col; + selected_row = ADDR_H(startY+j, 0, src_whole_rows); + selected_row = ADDR_B(startY+j, src_whole_rows, selected_row); + + selected_col = ADDR_L(startX+col, 0, src_whole_cols); + selected_col = ADDR_R(startX+col, src_whole_cols, selected_col); + + data[j][col] = src[selected_row * (src_step>>2) + selected_col]; + } +#endif + + barrier(CLK_LOCAL_MEM_FENCE); + + float4 var[1+EXTRA]; + +#if VAR_PER_CHANNEL + float4 weight; + float4 totalWeight = (float4)(0,0,0,0); +#else + float weight; + float totalWeight = 0; +#endif + + int4 currValCenter; + int4 currWRTCenter; + + int4 sumVal = 0; + int4 sumValSqr = 0; + + if(col < (THREADS-(ksX-1))) + { + int4 currVal; + + int howManyAll = (2*anX+1)*(ksY); + + //find variance of all data + int startLMj; + int endLMj ; +#if CALCVAR + // Top row: don't sum the very last element + for(int extraCnt = 0; extraCnt <=EXTRA; extraCnt++) + { + startLMj = extraCnt; + endLMj = ksY+extraCnt-1; + sumVal =0; + sumValSqr=0; + for(int j = startLMj; j < endLMj; j++) + { + for(int i=-anX; i<=anX; i++) + { + currVal = convert_int4(data[j][col+anX+i]) ; + + sumVal += currVal; + sumValSqr += mul24(currVal, currVal); + } + } + var[extraCnt] = convert_float4( ( (sumValSqr * howManyAll)- mul24(sumVal , sumVal) ) ) / ( (float)(howManyAll*howManyAll) ) ; +#else + var[extraCnt] = (float4)(900.0, 900.0, 900.0, 0.0); +#endif + } + + for(int extraCnt = 0; extraCnt <= EXTRA; extraCnt++) + { + + // top row: include the very first element, even on first time + startLMj = extraCnt; + // go all the way, unless this is the last local mem chunk, + // then stay within limits - 1 + endLMj = extraCnt + ksY; + + // Top row: don't sum the very last element + currValCenter = convert_int4( data[ (startLMj + endLMj)/2][col+anX] ); + + for(int j = startLMj, lut_j = 0; j < endLMj; j++, lut_j++) + { + for(int i=-anX; i<=anX; i++) + { +#if FIXED_WEIGHT +#if VAR_PER_CHANNEL + weight.x = 1.0f; + weight.y = 1.0f; + weight.z = 1.0f; + weight.w = 1.0f; +#else + weight = 1.0f; +#endif +#else + currVal = convert_int4(data[j][col+anX+i]) ; + currWRTCenter = currVal-currValCenter; + +#if VAR_PER_CHANNEL + weight = var[extraCnt] / (var[extraCnt] + convert_float4(currWRTCenter * currWRTCenter)) * (float4)(lut[lut_j*lut_step+anX+i]); + //weight.x = var[extraCnt].x / ( var[extraCnt].x + (float) mul24(currWRTCenter.x , currWRTCenter.x) ) ; + //weight.y = var[extraCnt].y / ( var[extraCnt].y + (float) mul24(currWRTCenter.y , currWRTCenter.y) ) ; + //weight.z = var[extraCnt].z / ( var[extraCnt].z + (float) mul24(currWRTCenter.z , currWRTCenter.z) ) ; + //weight.w = 0; +#else + weight = 1.0f/(1.0f+( mul24(currWRTCenter.x, currWRTCenter.x) + mul24(currWRTCenter.y, currWRTCenter.y) + mul24(currWRTCenter.z, currWRTCenter.z))/(var.x+var.y+var.z)); +#endif +#endif + tmp_sum[extraCnt] += convert_float4(data[j][col+anX+i]) * weight; + totalWeight += weight; + } + } + + tmp_sum[extraCnt] /= totalWeight; + + if(posX >= 0 && posX < dst_cols && (posY+extraCnt) >= 0 && (posY+extraCnt) < dst_rows) + { + dst[(dst_startY+extraCnt) * (dst_step>>2)+ dst_startX + col] = convert_uchar4(tmp_sum[extraCnt]); + } + +#if VAR_PER_CHANNEL + totalWeight = (float4)(0,0,0,0); +#else + totalWeight = 0; +#endif + } + } +} + + +__kernel void +edgeEnhancingFilter_C1_D0( + __global const uchar * restrict src, + __global uchar *dst, + float alpha, + int src_offset, + int src_whole_rows, + int src_whole_cols, + int src_step, + int dst_offset, + int dst_rows, + int dst_cols, + int dst_step, + __global const float * lut, + int lut_step) +{ + int col = get_local_id(0); + const int gX = get_group_id(0); + const int gY = get_group_id(1); + + int src_x_off = (src_offset % src_step); + int src_y_off = src_offset / src_step; + int dst_x_off = (dst_offset % dst_step); + int dst_y_off = dst_offset / dst_step; + + int startX = gX * (THREADS-ksX+1) - anX + src_x_off; + int startY = (gY * (1+EXTRA)) - anY + src_y_off; + + int dst_startX = gX * (THREADS-ksX+1) + dst_x_off; + int dst_startY = (gY * (1+EXTRA)) + dst_y_off; + + int posX = dst_startX - dst_x_off + col; + int posY = (gY * (1+EXTRA)) ; + + __local uchar data[ksY+EXTRA][THREADS]; + + float tmp_sum[1+EXTRA]; + for(int tmpint = 0; tmpint < 1+EXTRA; tmpint++) + { + tmp_sum[tmpint] = (float)(0); + } + +#ifdef BORDER_CONSTANT + bool con; + uchar ss; + for(int j = 0; j < ksY+EXTRA; j++) + { + con = (startX+col >= 0 && startX+col < src_whole_cols && startY+j >= 0 && startY+j < src_whole_rows); + + int cur_col = clamp(startX + col, 0, src_whole_cols); + if(con) + { + ss = src[(startY+j)*(src_step) + cur_col]; + } + + data[j][col] = con ? ss : 0; + } +#else + for(int j= 0; j < ksY+EXTRA; j++) + { + int selected_row; + int selected_col; + selected_row = ADDR_H(startY+j, 0, src_whole_rows); + selected_row = ADDR_B(startY+j, src_whole_rows, selected_row); + + selected_col = ADDR_L(startX+col, 0, src_whole_cols); + selected_col = ADDR_R(startX+col, src_whole_cols, selected_col); + + data[j][col] = src[selected_row * (src_step) + selected_col]; + } +#endif + + barrier(CLK_LOCAL_MEM_FENCE); + + float var[1+EXTRA]; + + float weight; + float totalWeight = 0; + + int currValCenter; + int currWRTCenter; + + int sumVal = 0; + int sumValSqr = 0; + + if(col < (THREADS-(ksX-1))) + { + int currVal; + + int howManyAll = (2*anX+1)*(ksY); + + //find variance of all data + int startLMj; + int endLMj; +#if CALCVAR + // Top row: don't sum the very last element + for(int extraCnt=0; extraCnt<=EXTRA; extraCnt++) + { + startLMj = extraCnt; + endLMj = ksY+extraCnt-1; + sumVal = 0; + sumValSqr =0; + for(int j = startLMj; j < endLMj; j++) + { + for(int i=-anX; i<=anX; i++) + { + currVal = (uint)(data[j][col+anX+i]) ; + + sumVal += currVal; + sumValSqr += mul24(currVal, currVal); + } + } + var[extraCnt] = (float)( ( (sumValSqr * howManyAll)- mul24(sumVal , sumVal) ) ) / ( (float)(howManyAll*howManyAll) ) ; +#else + var[extraCnt] = (float)(900.0); +#endif + } + + for(int extraCnt = 0; extraCnt <= EXTRA; extraCnt++) + { + + // top row: include the very first element, even on first time + startLMj = extraCnt; + // go all the way, unless this is the last local mem chunk, + // then stay within limits - 1 + endLMj = extraCnt + ksY; + + // Top row: don't sum the very last element + currValCenter = (int)( data[ (startLMj + endLMj)/2][col+anX] ); + + for(int j = startLMj, lut_j = 0; j < endLMj; j++, lut_j++) + { + for(int i=-anX; i<=anX; i++) + { +#if FIXED_WEIGHT + weight = 1.0f; +#else + currVal = (int)(data[j][col+anX+i]) ; + currWRTCenter = currVal-currValCenter; + + weight = var[extraCnt] / (var[extraCnt] + (float)mul24(currWRTCenter,currWRTCenter)) * lut[lut_j*lut_step+anX+i] ; +#endif + tmp_sum[extraCnt] += (float)(data[j][col+anX+i] * weight); + totalWeight += weight; + } + } + + tmp_sum[extraCnt] /= totalWeight; + + + if(posX >= 0 && posX < dst_cols && (posY+extraCnt) >= 0 && (posY+extraCnt) < dst_rows) + { + dst[(dst_startY+extraCnt) * (dst_step)+ dst_startX + col] = (uchar)(tmp_sum[extraCnt]); + } + + totalWeight = 0; + } + } +} diff --git a/modules/ocl/test/test_filters.cpp b/modules/ocl/test/test_filters.cpp index c98c8f40d..4a22ec503 100644 --- a/modules/ocl/test/test_filters.cpp +++ b/modules/ocl/test/test_filters.cpp @@ -353,6 +353,69 @@ TEST_P(Filter2D, Mat) Near(1); } } +//////////////////////////////////////////////////////////////////////////////////////////////////// +// Bilateral +struct Bilateral : FilterTestBase +{ + int type; + cv::Size ksize; + int bordertype; + double sigmacolor, sigmaspace; + + virtual void SetUp() + { + type = GET_PARAM(0); + ksize = GET_PARAM(1); + bordertype = GET_PARAM(3); + Init(type); + cv::RNG &rng = TS::ptr()->get_rng(); + sigmacolor = rng.uniform(20, 100); + sigmaspace = rng.uniform(10, 40); + } +}; + +TEST_P(Bilateral, Mat) +{ + for(int j = 0; j < LOOP_TIMES; j++) + { + random_roi(); + cv::bilateralFilter(mat1_roi, dst_roi, ksize.width, sigmacolor, sigmaspace, bordertype); + cv::ocl::bilateralFilter(gmat1, gdst, ksize.width, sigmacolor, sigmaspace, bordertype); + Near(1); + } + +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +// AdaptiveBilateral +struct AdaptiveBilateral : FilterTestBase +{ + int type; + cv::Size ksize; + int bordertype; + Point anchor; + virtual void SetUp() + { + type = GET_PARAM(0); + ksize = GET_PARAM(1); + bordertype = GET_PARAM(3); + Init(type); + anchor = Point(-1,-1); + } +}; + +TEST_P(AdaptiveBilateral, Mat) +{ + for(int j = 0; j < LOOP_TIMES; j++) + { + random_roi(); + cv::adaptiveBilateralFilter(mat1_roi, dst_roi, ksize, 5, anchor, bordertype); + cv::ocl::adaptiveBilateralFilter(gmat1, gdst, ksize, 5, anchor, bordertype); + Near(1); + } + +} + INSTANTIATE_TEST_CASE_P(Filter, Blur, Combine( Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC4), Values(cv::Size(3, 3), cv::Size(5, 5), cv::Size(7, 7)), @@ -400,4 +463,17 @@ INSTANTIATE_TEST_CASE_P(Filter, Filter2D, testing::Combine( Values(Size(0, 0)), //not use Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REFLECT101, (MatType)cv::BORDER_REPLICATE, (MatType)cv::BORDER_REFLECT))); +INSTANTIATE_TEST_CASE_P(Filter, Bilateral, Combine( + Values(CV_8UC1, CV_8UC3), + Values(Size(5, 5), Size(9, 9)), + Values(Size(0, 0)), //not use + Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REPLICATE, + (MatType)cv::BORDER_REFLECT, (MatType)cv::BORDER_WRAP, (MatType)cv::BORDER_REFLECT_101))); + +INSTANTIATE_TEST_CASE_P(Filter, AdaptiveBilateral, Combine( + Values(CV_8UC1, CV_8UC3), + Values(Size(5, 5), Size(9, 9)), + Values(Size(0, 0)), //not use + Values((MatType)cv::BORDER_CONSTANT, (MatType)cv::BORDER_REPLICATE, + (MatType)cv::BORDER_REFLECT, (MatType)cv::BORDER_REFLECT_101))); #endif // HAVE_OPENCL diff --git a/modules/ocl/test/test_imgproc.cpp b/modules/ocl/test/test_imgproc.cpp index 46cd257c8..426fcef3f 100644 --- a/modules/ocl/test/test_imgproc.cpp +++ b/modules/ocl/test/test_imgproc.cpp @@ -475,56 +475,6 @@ TEST_P(equalizeHist, Mat) } - - - -////////////////////////////////bilateralFilter//////////////////////////////////////////// - -struct bilateralFilter : ImgprocTestBase {}; - -TEST_P(bilateralFilter, Mat) -{ - double sigmacolor = 50.0; - int radius = 9; - int d = 2 * radius + 1; - double sigmaspace = 20.0; - int bordertype[] = {cv::BORDER_CONSTANT, cv::BORDER_REPLICATE, cv::BORDER_REFLECT, cv::BORDER_WRAP, cv::BORDER_REFLECT_101}; - //const char *borderstr[] = {"BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101"}; - - if (mat1.depth() != CV_8U || mat1.type() != dst.type()) - { - cout << "Unsupported type" << endl; - EXPECT_DOUBLE_EQ(0.0, 0.0); - } - else - { - for(size_t i = 0; i < sizeof(bordertype) / sizeof(int); i++) - for(int j = 0; j < LOOP_TIMES; j++) - { - random_roi(); - if(((bordertype[i] != cv::BORDER_CONSTANT) && (bordertype[i] != cv::BORDER_REPLICATE) && (mat1_roi.cols <= radius)) || (mat1_roi.cols <= radius) || (mat1_roi.rows <= radius) || (mat1_roi.rows <= radius)) - { - continue; - } - //if((dstx>=radius) && (dsty >= radius) && (dstx+cldst_roi.cols+radius <=cldst_roi.wholecols) && (dsty+cldst_roi.rows+radius <= cldst_roi.wholerows)) - //{ - // dst_roi.adjustROI(radius, radius, radius, radius); - // cldst_roi.adjustROI(radius, radius, radius, radius); - //} - //else - //{ - // continue; - //} - - cv::bilateralFilter(mat1_roi, dst_roi, d, sigmacolor, sigmaspace, bordertype[i] | cv::BORDER_ISOLATED); - cv::ocl::bilateralFilter(clmat1_roi, cldst_roi, d, sigmacolor, sigmaspace, bordertype[i] | cv::BORDER_ISOLATED); - Near(1.); - } - } -} - - - ////////////////////////////////copyMakeBorder//////////////////////////////////////////// struct CopyMakeBorder : ImgprocTestBase {}; @@ -1622,21 +1572,6 @@ INSTANTIATE_TEST_CASE_P(ImgprocTestBase, equalizeHist, Combine( NULL_TYPE, Values(false))); // Values(false) is the reserved parameter -//INSTANTIATE_TEST_CASE_P(ImgprocTestBase, bilateralFilter, Combine( -// ONE_TYPE(CV_8UC1), -// NULL_TYPE, -// ONE_TYPE(CV_8UC1), -// NULL_TYPE, -// NULL_TYPE, -// Values(false))); // Values(false) is the reserved parameter -INSTANTIATE_TEST_CASE_P(ImgprocTestBase, bilateralFilter, Combine( - Values(CV_8UC1, CV_8UC3), - NULL_TYPE, - Values(CV_8UC1, CV_8UC3), - NULL_TYPE, - NULL_TYPE, - Values(false))); // Values(false) is the reserved parameter - INSTANTIATE_TEST_CASE_P(ImgprocTestBase, CopyMakeBorder, Combine( Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4), diff --git a/samples/ocl/adaptive_bilateral_filter.cpp b/samples/ocl/adaptive_bilateral_filter.cpp new file mode 100644 index 000000000..df226b195 --- /dev/null +++ b/samples/ocl/adaptive_bilateral_filter.cpp @@ -0,0 +1,51 @@ +// This sample shows the difference of adaptive bilateral filter and bilateral filter. +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/ocl/ocl.hpp" + +using namespace cv; +using namespace std; + + +int main( int argc, const char** argv ) +{ + const char* keys = + "{ i | input | | specify input image }" + "{ k | ksize | 5 | specify kernel size }"; + CommandLineParser cmd(argc, argv, keys); + string src_path = cmd.get("i"); + int ks = cmd.get("k"); + const char * winName[] = {"input", "adaptive bilateral CPU", "adaptive bilateral OpenCL", "bilateralFilter OpenCL"}; + + Mat src = imread(src_path); + Mat abFilterCPU; + if(src.empty()){ + //cout << "error read image: " << src_path << endl; + return -1; + } + + std::vector infos; + ocl::getDevice(infos); + + ocl::oclMat dsrc(src), dABFilter, dBFilter; + + Size ksize(ks, ks); + adaptiveBilateralFilter(src,abFilterCPU, ksize, 10); + ocl::adaptiveBilateralFilter(dsrc, dABFilter, ksize, 10); + ocl::bilateralFilter(dsrc, dBFilter, ks, 30, 9); + + Mat abFilter = dABFilter; + Mat bFilter = dBFilter; + imshow(winName[0], src); + + imshow(winName[1], abFilterCPU); + + imshow(winName[2], abFilter); + + imshow(winName[3], bFilter); + + waitKey(); + return 0; + +} \ No newline at end of file From ab235cda74577aa93a65b0da959b4273e5498575 Mon Sep 17 00:00:00 2001 From: yao Date: Thu, 29 Aug 2013 11:59:19 +0800 Subject: [PATCH 09/55] fix warnings --- modules/ocl/perf/perf_filters.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ocl/perf/perf_filters.cpp b/modules/ocl/perf/perf_filters.cpp index 5f510d63f..aa562412b 100644 --- a/modules/ocl/perf/perf_filters.cpp +++ b/modules/ocl/perf/perf_filters.cpp @@ -333,7 +333,7 @@ PERF_TEST_P(BilateralFixture, Bilateral, const Size_MatType_t params = GetParam(); const Size srcSize = get<0>(params); const int type = get<1>(params), d = 7; - double sigmacolor = 50.0, sigmaspace = 50.0; + double sigmacolor = 50.0, sigmaspace = 50.0; Mat src(srcSize, type), dst(srcSize, type); declare.in(src, WARMUP_RNG).out(dst); @@ -372,8 +372,8 @@ PERF_TEST_P(adaptiveBilateralFixture, adaptiveBilateral, const Size_MatType_t params = GetParam(); const Size srcSize = get<0>(params); const int type = get<1>(params); - double sigmaspace = 10.0; - Size ksize(9,9); + double sigmaspace = 10.0; + Size ksize(9,9); Mat src(srcSize, type), dst(srcSize, type); declare.in(src, WARMUP_RNG).out(dst); From 5728612f95bf1fa19debdd7245b6f956cc70781f Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Thu, 29 Aug 2013 14:05:56 +0800 Subject: [PATCH 10/55] Removed the trailing whitespace --- modules/ocl/src/bgfg_mog.cpp | 13 ++++++------- modules/ocl/src/opencl/bgfg_mog.cl | 17 +++++++---------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/modules/ocl/src/bgfg_mog.cpp b/modules/ocl/src/bgfg_mog.cpp index c079c6b8f..d39f86394 100644 --- a/modules/ocl/src/bgfg_mog.cpp +++ b/modules/ocl/src/bgfg_mog.cpp @@ -46,7 +46,7 @@ #include "precomp.hpp" using namespace cv; using namespace cv::ocl; -namespace cv +namespace cv { namespace ocl { @@ -82,10 +82,10 @@ namespace cv { namespace ocl { namespace device void getBackgroundImage_ocl(int cn, const oclMat& weight, const oclMat& mean, oclMat& dst, int nmixtures, float backgroundRatio); - void loadConstants(float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, + void loadConstants(float Tb, float TB, float Tg, float varInit, float varMin, float varMax, float tau, unsigned char shadowVal); - void mog2_ocl(const oclMat& frame, int cn, oclMat& fgmask, oclMat& modesUsed, oclMat& weight, oclMat& variance, oclMat& mean, + void mog2_ocl(const oclMat& frame, int cn, oclMat& fgmask, oclMat& modesUsed, oclMat& weight, oclMat& variance, oclMat& mean, float alphaT, float prune, bool detectShadows, int nmixtures); void getBackgroundImage2_ocl(int cn, const oclMat& modesUsed, const oclMat& weight, const oclMat& mean, oclMat& dst, int nmixtures); @@ -392,11 +392,11 @@ void cv::ocl::device::mog::loadConstants(float Tb, float TB, float Tg, float var constants->c_tau = tau; constants->c_shadowVal = shadowVal; - cl_constants = load_constant(*((cl_context*)getoclContext()), *((cl_command_queue*)getoclCommandQueue()), + cl_constants = load_constant(*((cl_context*)getoclContext()), *((cl_command_queue*)getoclCommandQueue()), (void *)constants, sizeof(_contant_struct)); } -void cv::ocl::device::mog::mog2_ocl(const oclMat& frame, int cn, oclMat& fgmaskRaw, oclMat& modesUsed, oclMat& weight, oclMat& variance, +void cv::ocl::device::mog::mog2_ocl(const oclMat& frame, int cn, oclMat& fgmaskRaw, oclMat& modesUsed, oclMat& weight, oclMat& variance, oclMat& mean, float alphaT, float prune, bool detectShadows, int nmixtures) { oclMat fgmask(fgmaskRaw.size(), CV_32SC1); @@ -635,5 +635,4 @@ void cv::ocl::MOG2::release() mean_.release(); bgmodelUsedModes_.release(); -} - +} \ No newline at end of file diff --git a/modules/ocl/src/opencl/bgfg_mog.cl b/modules/ocl/src/opencl/bgfg_mog.cl index 77bdb9c2a..2e269999a 100644 --- a/modules/ocl/src/opencl/bgfg_mog.cl +++ b/modules/ocl/src/opencl/bgfg_mog.cl @@ -134,7 +134,7 @@ __kernel void mog_withoutLearning_kernel(__global T_FRAME* frame, __global uchar __global float* weight, __global T_MEAN_VAR* mean, __global T_MEAN_VAR* var, int frame_row, int frame_col, int frame_step, int fgmask_step, int weight_step, int mean_step, int var_step, - float varThreshold, float backgroundRatio, int fgmask_offset_x, + float varThreshold, float backgroundRatio, int fgmask_offset_x, int fgmask_offset_y, int frame_offset_x, int frame_offset_y) { int x = get_global_id(0); @@ -142,7 +142,6 @@ __kernel void mog_withoutLearning_kernel(__global T_FRAME* frame, __global uchar if (x < frame_col && y < frame_row) { - T_MEAN_VAR pix = cvt(frame[(y + frame_offset_y) * frame_step + (x + frame_offset_x)]); int kHit = -1; @@ -179,20 +178,18 @@ __kernel void mog_withoutLearning_kernel(__global T_FRAME* frame, __global uchar } } } - if(kHit < 0 || kHit >= kForeground) fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar) (-1); else fgmask[(y + fgmask_offset_y) * fgmask_step + (x + fgmask_offset_x)] = (uchar) (0); - } } __kernel void mog_withLearning_kernel(__global T_FRAME* frame, __global int* fgmask, - __global float* weight, __global float* sortKey, __global T_MEAN_VAR* mean, + __global float* weight, __global float* sortKey, __global T_MEAN_VAR* mean, __global T_MEAN_VAR* var, int frame_row, int frame_col, int frame_step, int fgmask_step, int weight_step, int sortKey_step, int mean_step, int var_step, - float varThreshold, float backgroundRatio, float learningRate, float minVar, + float varThreshold, float backgroundRatio, float learningRate, float minVar, int fgmask_offset_x, int fgmask_offset_y, int frame_offset_x, int frame_offset_y) { const float w0 = 0.05f; @@ -322,7 +319,7 @@ __kernel void mog_withLearning_kernel(__global T_FRAME* frame, __global int* fgm __kernel void getBackgroundImage_kernel(__global float* weight, __global T_MEAN_VAR* mean, __global T_FRAME* dst, - int dst_row, int dst_col, int weight_step, int mean_step, int dst_step, + int dst_row, int dst_col, int weight_step, int mean_step, int dst_step, float backgroundRatio) { int x = get_global_id(0); @@ -351,8 +348,8 @@ __kernel void getBackgroundImage_kernel(__global float* weight, __global T_MEAN_ } __kernel void mog2_kernel(__global T_FRAME * frame, __global int* fgmask, __global float* weight, __global T_MEAN_VAR * mean, - __global int* modesUsed, __global float* variance, int frame_row, int frame_col, int frame_step, - int fgmask_step, int weight_step, int mean_step, int modesUsed_step, int var_step, float alphaT, float alpha1, float prune, + __global int* modesUsed, __global float* variance, int frame_row, int frame_col, int frame_step, + int fgmask_step, int weight_step, int mean_step, int modesUsed_step, int var_step, float alphaT, float alpha1, float prune, int detectShadows_flag, int fgmask_offset_x, int fgmask_offset_y, int frame_offset_x, int frame_offset_y, __constant con_srtuct_t* constants) { int x = get_global_id(0); @@ -505,7 +502,7 @@ __kernel void mog2_kernel(__global T_FRAME * frame, __global int* fgmask, __glob } __kernel void getBackgroundImage2_kernel(__global int* modesUsed, __global float* weight, __global T_MEAN_VAR* mean, - __global T_FRAME* dst, float c_TB, int modesUsed_row, int modesUsed_col, int modesUsed_step, int weight_step, + __global T_FRAME* dst, float c_TB, int modesUsed_row, int modesUsed_col, int modesUsed_step, int weight_step, int mean_step, int dst_step, int dst_x, int dst_y) { int x = get_global_id(0); From 14e083f1e0736ef0119a2cb3588b179c7d06a5ee Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Thu, 29 Aug 2013 14:08:56 +0800 Subject: [PATCH 11/55] Removed trailing whitespace --- modules/ocl/test/utility.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/ocl/test/utility.cpp b/modules/ocl/test/utility.cpp index 750c3c82b..5d3195719 100644 --- a/modules/ocl/test/utility.cpp +++ b/modules/ocl/test/utility.cpp @@ -119,7 +119,7 @@ cv::ocl::oclMat createMat_ocl(Size size, int type, bool useRoi) } cv::ocl::oclMat loadMat_ocl(const Mat& m, bool useRoi) -{ +{ CV_Assert(m.type() == CV_8UC1 || m.type() == CV_8UC3); cv::ocl::oclMat d_m; d_m = createMat_ocl(m.size(), m.type(), useRoi); @@ -130,11 +130,11 @@ cv::ocl::oclMat loadMat_ocl(const Mat& m, bool useRoi) d_m.locateROI(ls, pt); Rect roi(pt.x, pt.y, d_m.size().width, d_m.size().height); - + cv::ocl::oclMat m_ocl(m); cv::ocl::oclMat d_m_roi(d_m, roi); - + m_ocl.copyTo(d_m); return d_m; } @@ -289,4 +289,3 @@ double checkRectSimilarity(Size sz, std::vector& ob1, std::vector& o } return final_test_result; } - From 0233c4c198a359014307e7da11b66b6f5f1f14ea Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Thu, 29 Aug 2013 15:49:02 +0800 Subject: [PATCH 12/55] Removed whitespace. --- modules/ocl/src/bgfg_mog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ocl/src/bgfg_mog.cpp b/modules/ocl/src/bgfg_mog.cpp index d39f86394..3051ac82f 100644 --- a/modules/ocl/src/bgfg_mog.cpp +++ b/modules/ocl/src/bgfg_mog.cpp @@ -330,7 +330,7 @@ void cv::ocl::device::mog::mog_ocl(const oclMat& frame, int cn, oclMat& fgmask, const float minVar = noiseSigma * noiseSigma; if(learningRate > 0.0f) - mog_withLearning(frame, cn, fgmask, weight, sortKey, mean, var, nmixtures, + mog_withLearning(frame, cn, fgmask, weight, sortKey, mean, var, nmixtures, varThreshold, backgroundRatio, learningRate, minVar); else mog_withoutLearning(frame, cn, fgmask, weight, mean, var, nmixtures, varThreshold, backgroundRatio); From 4f3349ffe43a8b486f6e7358350dcedc8b31ab88 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Thu, 29 Aug 2013 16:24:26 +0800 Subject: [PATCH 13/55] Added perf namespace. --- modules/ocl/perf/perf_bgfg.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp index d507a3b5e..5bf406daf 100644 --- a/modules/ocl/perf/perf_bgfg.cpp +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -43,6 +43,7 @@ // //M*/ #include "perf_precomp.hpp" +using namespace perf; using namespace cv; using namespace cv::ocl; From 114f3266d801a23dcdbbda70b5cbb99026ac4fd4 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Thu, 29 Aug 2013 17:35:47 +0800 Subject: [PATCH 14/55] Removed performance test. --- modules/ocl/perf/perf_bgfg.cpp | 334 --------------------------------- 1 file changed, 334 deletions(-) delete mode 100644 modules/ocl/perf/perf_bgfg.cpp diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp deleted file mode 100644 index 5bf406daf..000000000 --- a/modules/ocl/perf/perf_bgfg.cpp +++ /dev/null @@ -1,334 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2010-2013, Multicoreware, Inc., all rights reserved. -// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// @Authors -// Jin Ma, jin@multicorewareinc.com -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other oclMaterials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors as is and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ -#include "perf_precomp.hpp" -using namespace perf; -using namespace cv; -using namespace cv::ocl; - -static void cvtFrameFmt(std::vector& input, std::vector& output, int output_cn) -{ - for(int i = 0; i< (int)(input.size()); i++) - { - if(output_cn == 1) - cvtColor(input[i], output[i], COLOR_RGB2GRAY); - else - cvtColor(input[i], output[i], COLOR_RGB2RGBA); - } -} -///////////// MOG//////////////////////// -PERFTEST(mog) -{ - const string inputFile[] = {"768x576.avi", "1920x1080.avi"}; - int cn[] = {1, 3}; - - float learningRate[] = {0.0f, 0.01f}; - - for(unsigned int idx = 0; idx < sizeof(inputFile)/sizeof(string); idx++) - { - VideoCapture cap(inputFile[idx]); - ASSERT_TRUE(cap.isOpened()); - - Mat frame; - int nframe = 5; - Mat foreground_cpu; - oclMat foreground_ocl; - std::vector frame_buffer_init; - std::vector frame_buffer(nframe); - std::vector frame_buffer_ocl; - std::vector foreground_buf_ocl; - std::vector foreground_buf_cpu; - BackgroundSubtractorMOG mog_cpu; - cv::ocl::MOG d_mog; - for(int i = 0; i < nframe; i++) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - frame_buffer_init.push_back(frame); - } - - for(unsigned int i = 0; i < sizeof(learningRate)/sizeof(float); i++) - { - for(unsigned int j = 0; j < sizeof(cn)/sizeof(int); j++) - { - SUBTEST << frame.cols << 'x' << frame.rows << ".avi; "<<"channels: "< frame_buffer_init; - std::vector frame_buffer(nframe); - std::vector frame_buffer_ocl; - std::vector foreground_buf_ocl; - std::vector foreground_buf_cpu; - cv::ocl::oclMat foreground_ocl; - - for(int i = 0; i < nframe; i++) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - frame_buffer_init.push_back(frame); - } - cv::ocl::MOG2 d_mog; - - for(unsigned int j = 0; j < sizeof(cn)/sizeof(int); j++) - { - SUBTEST << frame.cols << 'x' << frame.rows << ".avi; "<<"channels: "<> frame; - ASSERT_FALSE(frame.empty()); - - int nframe = 5; - std::vector frame_buffer_init; - std::vector frame_buffer(nframe); - std::vector frame_buffer_ocl; - std::vector foreground_buf_ocl; - std::vector foreground_buf_cpu; - - for(int i = 0; i < nframe; i++) - { - cap >> frame; - ASSERT_FALSE(frame.empty()); - frame_buffer_init.push_back(frame); - } - - for(unsigned int j = 0; j < sizeof(cn)/sizeof(int); j++) - { - SUBTEST << frame.cols << 'x' << frame.rows << ".avi; "<<"channels: "< Date: Thu, 1 Aug 2013 20:42:59 +0800 Subject: [PATCH 15/55] Prepare Downhill Simplex for pull request This is an implementation of so-called downhill simplex method (https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method) Please, let me know if you have any comments, whoever you'd be. --- modules/optim/doc/downhill_simplex_method.rst | 164 ++++++++++ modules/optim/doc/optim.rst | 1 + modules/optim/include/opencv2/optim.hpp | 39 +++ modules/optim/src/debug.hpp | 18 ++ modules/optim/src/lpsolver.cpp | 8 +- modules/optim/src/simplex.cpp | 289 ++++++++++++++++++ modules/optim/test/test_downhill_simplex.cpp | 63 ++++ 7 files changed, 575 insertions(+), 7 deletions(-) create mode 100644 modules/optim/doc/downhill_simplex_method.rst create mode 100644 modules/optim/src/debug.hpp create mode 100644 modules/optim/src/simplex.cpp create mode 100644 modules/optim/test/test_downhill_simplex.cpp diff --git a/modules/optim/doc/downhill_simplex_method.rst b/modules/optim/doc/downhill_simplex_method.rst new file mode 100644 index 000000000..8bb9be8ba --- /dev/null +++ b/modules/optim/doc/downhill_simplex_method.rst @@ -0,0 +1,164 @@ +Downhill Simplex Method +======================= + +.. highlight:: cpp + +optim::DownhillSolver +--------------------------------- + +.. ocv:class:: optim::DownhillSolver + +This class is used to perform the non-linear non-constrained *minimization* of a function, given on an *n*-dimensional Euclidean space, +using the **Nelder-Mead method**, also known as **downhill simplex method**. The basic idea about the method can be obtained from +(`http://en.wikipedia.org/wiki/Nelder-Mead\_method `_). It should be noted, that +this method, although deterministic, is rather a heuristic and therefore may converge to a local minima, not necessary a global one. +It is iterative optimization technique, which at each step uses an information about the values of a function evaluated only at +*n+1* points, arranged as a *simplex* in *n*-dimensional space (hence the second name of the method). At each step new point is +chosen to evaluate function at, obtained value is compared with previous ones and based on this information simplex changes it's shape +, slowly moving to the local minimum. + +Algorithm stops when the number of function evaluations done exceeds ``termcrit.maxCount``, when the function values at the +vertices of simplex are within ``termcrit.epsilon`` range or simplex becomes so small that it +can enclosed in a box with ``termcrit.epsilon`` sides, whatever comes first, for some defined by user +positive integer ``termcrit.maxCount`` and positive non-integer ``termcrit.epsilon``. + +:: + + class CV_EXPORTS Solver : public Algorithm + { + public: + class CV_EXPORTS Function + { + public: + virtual ~Function() {} + //! ndim - dimensionality + virtual double calc(const double* x) const = 0; + }; + + virtual Ptr getFunction() const = 0; + virtual void setFunction(const Ptr& f) = 0; + + virtual TermCriteria getTermCriteria() const = 0; + virtual void setTermCriteria(const TermCriteria& termcrit) = 0; + + // x contain the initial point before the call and the minima position (if algorithm converged) after. x is assumed to be (something that + // after getMat() will return) row-vector or column-vector. *It's size and should + // be consisted with previous dimensionality data given, if any (otherwise, it determines dimensionality)* + virtual double minimize(InputOutputArray x) = 0; + }; + + class CV_EXPORTS DownhillSolver : public Solver + { + public: + //! returns row-vector, even if the column-vector was given + virtual void getInitStep(OutputArray step) const=0; + //!This should be called at least once before the first call to minimize() and step is assumed to be (something that + //! after getMat() will return) row-vector or column-vector. *It's dimensionality determines the dimensionality of a problem.* + virtual void setInitStep(InputArray step)=0; + }; + +It should be noted, that ``optim::DownhillSolver`` is a derivative of the abstract interface ``optim::Solver``, which in +turn is derived from the ``Algorithm`` interface and is used to encapsulate the functionality, common to all non-linear optimization +algorithms in the ``optim`` module. + +optim::DownhillSolver::getFunction +-------------------------------------------- + +Getter for the optimized function. The optimized function is represented by ``Solver::Function`` interface, which requires +derivatives to implement the sole method ``calc(double*)`` to evaluate the function. + +.. ocv:function:: Ptr optim::DownhillSolver::getFunction() + + :return: Smart-pointer to an object that implements ``Solver::Function`` interface - it represents the function that is being optimized. It can be empty, if no function was given so far. + +optim::DownhillSolver::setFunction +----------------------------------------------- + +Setter for the optimized function. *It should be called at least once before the call to* ``DownhillSolver::minimize()``, as +default value is not usable. + +.. ocv:function:: void optim::DownhillSolver::setFunction(const Ptr& f) + + :param f: The new function to optimize. + +optim::DownhillSolver::getTermCriteria +---------------------------------------------------- + +Getter for the previously set terminal criteria for this algorithm. + +.. ocv:function:: TermCriteria optim::DownhillSolver::getTermCriteria() + + :return: Deep copy of the terminal criteria used at the moment. + +optim::DownhillSolver::setTermCriteria +------------------------------------------ + +Set terminal criteria for downhill simplex method. Two things should be noted. First, this method *is not necessary* to be called +before the first call to ``DownhillSolver::minimize()``, as the default value is sensible. Second, the method will raise an error +if ``termcrit.type!=(TermCriteria::MAX_ITER+TermCriteria::EPS)``, ``termcrit.epsilon<=0`` or ``termcrit.maxCount<=0``. That is, +both ``epsilon`` and ``maxCount`` should be set to positive values (non-integer and integer respectively) and they represent +tolerance and maximal number of function evaluations that is allowed. + +Algorithm stops when the number of function evaluations done exceeds ``termcrit.maxCount``, when the function values at the +vertices of simplex are within ``termcrit.epsilon`` range or simplex becomes so small that it +can enclosed in a box with ``termcrit.epsilon`` sides, whatever comes first. + +.. ocv:function:: void optim::DownhillSolver::setTermCriteria(const TermCriteria& termcrit) + + :param termcrit: Terminal criteria to be used, represented as ``TermCriteria`` structure (defined elsewhere in openCV). Mind you, that it should meet ``(termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0)``, otherwise the error will be raised. + +optim::DownhillSolver::getInitStep +----------------------------------- + +Returns the initial step that will be used in downhill simplex algorithm. See the description +of corresponding setter (follows next) for the meaning of this parameter. + +.. ocv:function:: void optim::getInitStep(OutputArray step) + + :param step: Initial step that will be used in algorithm. Note, that although corresponding setter accepts column-vectors as well as row-vectors, this method will return a row-vector. + +optim::DownhillSolver::setInitStep +---------------------------------- + +Sets the initial step that will be used in downhill simplex algorithm. Step, together with initial point (givin in ``DownhillSolver::minimize``) +are two *n*-dimensional vectors that are used to determine the shape of initial simplex. Roughly said, initial point determines the position +of a simplex (it will become simplex's centroid), while step determines the spread (size in each dimension) of a simplex. To be more precise, +if :math:`s,x_0\in\mathbb{R}^n` are the initial step and initial point respectively, the vertices of a simplex will be: :math:`v_0:=x_0-\frac{1}{2} +s` and :math:`v_i:=x_0+s_i` for :math:`i=1,2,\dots,n` where :math:`s_i` denotes projections of the initial step of *n*-th coordinate (the result +of projection is treated to be vector given by :math:`s_i:=e_i\cdot\left`, where :math:`e_i` form canonical basis) + +.. ocv:function:: void optim::setInitStep(InputArray step) + + :param step: Initial step that will be used in algorithm. Roughly said, it determines the spread (size in each dimension) of an initial simplex. + +optim::DownhillSolver::minimize +----------------------------------- + +The main method of the ``DownhillSolver``. It actually runs the algorithm and performs the minimization. The sole input parameter determines the +centroid of the starting simplex (roughly, it tells where to start), all the others (terminal criteria, initial step, function to be minimized) +are supposed to be set via the setters before the call to this method or the default values (not always sensible) will be used. + +.. ocv:function:: double optim::DownhillSolver::minimize(InputOutputArray x) + + :param x: The initial point, that will become a centroid of an initial simplex. After the algorithm will terminate, it will be setted to the + point where the algorithm stops, the point of possible minimum. + + :return: The value of a function at the point found. + +Explain parameters. + +optim::createDownhillSolver +------------------------------------ + +This function returns the reference to the ready-to-use ``DownhillSolver`` object. All the parameters are optional, so this procedure can be called +even without parameters at all. In this case, the default values will be used. As default value for terminal criteria are the only sensible ones, +``DownhillSolver::setFunction()`` and ``DownhillSolver::setInitStep()`` should be called upon the obtained object, if the respective parameters +were not given to ``createDownhillSolver()``. Otherwise, the two ways (give parameters to ``createDownhillSolver()`` or miss the out and call the +``DownhillSolver::setFunction()`` and ``DownhillSolver::setInitStep()``) are absolutely equivalent (and will drop the same errors in the same way, +should invalid input be detected). + +.. ocv:function:: Ptr optim::createDownhillSolver(const Ptr& f,InputArray initStep, TermCriteria termcrit) + + :param f: Pointer to the function that will be minimized, similarly to the one you submit via ``DownhillSolver::setFunction``. + :param step: Initial step, that will be used to construct the initial simplex, similarly to the one you submit via ``DownhillSolver::setInitStep``. + :param termcrit: Terminal criteria to the algorithm, similarly to the one you submit via ``DownhillSolver::setTermCriteria``. diff --git a/modules/optim/doc/optim.rst b/modules/optim/doc/optim.rst index bead2122a..cdbaeac25 100644 --- a/modules/optim/doc/optim.rst +++ b/modules/optim/doc/optim.rst @@ -8,3 +8,4 @@ optim. Generic numerical optimization :maxdepth: 2 linear_programming + downhill_simplex_method diff --git a/modules/optim/include/opencv2/optim.hpp b/modules/optim/include/opencv2/optim.hpp index 0bbedad38..ad26a09e9 100644 --- a/modules/optim/include/opencv2/optim.hpp +++ b/modules/optim/include/opencv2/optim.hpp @@ -47,6 +47,45 @@ namespace cv{namespace optim { +class CV_EXPORTS Solver : public Algorithm +{ +public: + class CV_EXPORTS Function + { + public: + virtual ~Function() {} + //! ndim - dimensionality + virtual double calc(const double* x) const = 0; + }; + + virtual Ptr getFunction() const = 0; + virtual void setFunction(const Ptr& f) = 0; + + virtual TermCriteria getTermCriteria() const = 0; + virtual void setTermCriteria(const TermCriteria& termcrit) = 0; + + // x contain the initial point before the call and the minima position (if algorithm converged) after. x is assumed to be (something that + // after getMat() will return) row-vector or column-vector. *It's size and should + // be consisted with previous dimensionality data given, if any (otherwise, it determines dimensionality)* + virtual double minimize(InputOutputArray x) = 0; +}; + +//! downhill simplex class +class CV_EXPORTS DownhillSolver : public Solver +{ +public: + //! returns row-vector, even if the column-vector was given + virtual void getInitStep(OutputArray step) const=0; + //!This should be called at least once before the first call to minimize() and step is assumed to be (something that + //! after getMat() will return) row-vector or column-vector. *It's dimensionality determines the dimensionality of a problem.* + virtual void setInitStep(InputArray step)=0; +}; + +// both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does. +CV_EXPORTS_W Ptr createDownhillSolver(const Ptr& f=Ptr(), + InputArray initStep=Mat_(1,1,0.0), + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001)); + //!the return codes for solveLP() function enum { diff --git a/modules/optim/src/debug.hpp b/modules/optim/src/debug.hpp new file mode 100644 index 000000000..fe5d00e87 --- /dev/null +++ b/modules/optim/src/debug.hpp @@ -0,0 +1,18 @@ +namespace cv{namespace optim{ +#ifdef ALEX_DEBUG +#define dprintf(x) printf x +static void print_matrix(const Mat& x){ + printf("\ttype:%d vs %d,\tsize: %d-on-%d\n",x.type(),CV_64FC1,x.rows,x.cols); + for(int i=0;i(i,j)); + } + printf("]\n"); + } +} +#else +#define dprintf(x) +#define print_matrix(x) +#endif +}} diff --git a/modules/optim/src/lpsolver.cpp b/modules/optim/src/lpsolver.cpp index 924b756a9..a046ddae1 100644 --- a/modules/optim/src/lpsolver.cpp +++ b/modules/optim/src/lpsolver.cpp @@ -2,16 +2,12 @@ #include #include #include +#include namespace cv{namespace optim{ using std::vector; #ifdef ALEX_DEBUG -#define dprintf(x) printf x -static void print_matrix(const Mat& x){ - print(x); - printf("\n"); -} static void print_simplex_state(const Mat& c,const Mat& b,double v,const std::vector N,const std::vector B){ printf("\tprint simplex state\n"); @@ -32,8 +28,6 @@ static void print_simplex_state(const Mat& c,const Mat& b,double v,const std::ve printf("\n"); } #else -#define dprintf(x) -#define print_matrix(x) #define print_simplex_state(c,b,v,N,B) #endif diff --git a/modules/optim/src/simplex.cpp b/modules/optim/src/simplex.cpp new file mode 100644 index 000000000..eeb23c5c4 --- /dev/null +++ b/modules/optim/src/simplex.cpp @@ -0,0 +1,289 @@ +#include "precomp.hpp" +#include "debug.hpp" + +namespace cv{namespace optim{ + + class DownhillSolverImpl : public DownhillSolver + { + public: + void getInitStep(OutputArray step) const; + void setInitStep(InputArray step); + Ptr getFunction() const; + void setFunction(const Ptr& f); + TermCriteria getTermCriteria() const; + DownhillSolverImpl(); + void setTermCriteria(const TermCriteria& termcrit); + double minimize(InputOutputArray x); + protected: + Ptr _Function; + TermCriteria _termcrit; + Mat _step; + private: + inline void compute_coord_sum(Mat_& points,Mat_& coord_sum); + inline void create_initial_simplex(Mat_& simplex,Mat& step); + inline double inner_downhill_simplex(cv::Mat_& p,double MinRange,double MinError,int& nfunk, + const Ptr& f,int nmax); + inline double try_new_point(Mat_& p,Mat_& y,Mat_& coord_sum,const Ptr& f,int ihi, + double fac,Mat_& ptry); + }; + + double DownhillSolverImpl::try_new_point( + Mat_& p, + Mat_& y, + Mat_& coord_sum, + const Ptr& f, + int ihi, + double fac, + Mat_& ptry + ) + { + int ndim=p.cols; + int j; + double fac1,fac2,ytry; + + fac1=(1.0-fac)/ndim; + fac2=fac1-fac; + for (j=0;jcalc((double*)ptry.data); + if (ytry < y(ihi)) + { + y(ihi)=ytry; + for (j=0;j& p, + double MinRange, + double MinError, + int& nfunk, + const Ptr& f, + int nmax + ) + { + int ndim=p.cols; + double res; + int i,ihi,ilo,inhi,j,mpts=ndim+1; + double error, range,ysave,ytry; + Mat_ coord_sum(1,ndim,0.0),buf(1,ndim,0.0),y(1,ndim,0.0); + + nfunk = 0; + + for(i=0;icalc(p[i]); + } + + nfunk = ndim+1; + + compute_coord_sum(p,coord_sum); + + for (;;) + { + ilo=0; + /* find highest (worst), next-to-worst, and lowest + (best) points by going through all of them. */ + ihi = y(0)>y(1) ? (inhi=1,0) : (inhi=0,1); + for (i=0;i y(ihi)) + { + inhi=ihi; + ihi=i; + } + else if (y(i) > y(inhi) && i != ihi) + inhi=i; + } + + /* check stop criterion */ + error=fabs(y(ihi)-y(ilo)); + range=0; + for(i=0;i p(j,i) ) min = p(j,i); + if( max < p(j,i) ) max = p(j,i); + } + d = fabs(max-min); + if(range < d) range = d; + } + + if(range <= MinRange || error <= MinError) + { /* Put best point and value in first slot. */ + std::swap(y(0),y(ilo)); + for (i=0;i= nmax){ + dprintf(("nmax exceeded\n")); + return y(ilo); + } + nfunk += 2; + /*Begin a new iteration. First, reflect the worst point about the centroid of others */ + ytry = try_new_point(p,y,coord_sum,f,ihi,-1.0,buf); + if (ytry <= y(ilo)) + { /*If that's better than the best point, go twice as far in that direction*/ + ytry = try_new_point(p,y,coord_sum,f,ihi,2.0,buf); + } + else if (ytry >= y(inhi)) + { /* The new point is worse than the second-highest, but better + than the worst so do not go so far in that direction */ + ysave = y(ihi); + ytry = try_new_point(p,y,coord_sum,f,ihi,0.5,buf); + if (ytry >= ysave) + { /* Can't seem to improve things. Contract the simplex to good point + in hope to find a simplex landscape. */ + for (i=0;icalc((double*)coord_sum.data); + } + } + nfunk += ndim; + compute_coord_sum(p,coord_sum); + } + } else --(nfunk); /* correct nfunk */ + dprintf(("this is simplex on iteration %d\n",nfunk)); + print_matrix(p); + } /* go to next iteration. */ + res = y(0); + + return res; + } + + void DownhillSolverImpl::compute_coord_sum(Mat_& points,Mat_& coord_sum){ + for (int j=0;j& simplex,Mat& step){ + for(int i=1;i<=step.cols;++i) + { + simplex.row(0).copyTo(simplex.row(i)); + simplex(i,i-1)+= 0.5*step.at(0,i-1); + } + simplex.row(0) -= 0.5*step; + + dprintf(("this is simplex\n")); + print_matrix(simplex); + } + + double DownhillSolverImpl::minimize(InputOutputArray x){ + dprintf(("hi from minimize\n")); + CV_Assert(_Function.empty()==false); + dprintf(("termcrit:\n\ttype: %d\n\tmaxCount: %d\n\tEPS: %g\n",_termcrit.type,_termcrit.maxCount,_termcrit.epsilon)); + dprintf(("step\n")); + print_matrix(_step); + + Mat x_mat=x.getMat(); + CV_Assert(MIN(x_mat.rows,x_mat.cols)==1); + CV_Assert(MAX(x_mat.rows,x_mat.cols)==_step.cols); + CV_Assert(x_mat.type()==CV_64FC1); + + Mat_ proxy_x; + + if(x_mat.rows>1){ + proxy_x=x_mat.t(); + }else{ + proxy_x=x_mat; + } + + int count=0; + int ndim=_step.cols; + Mat_ simplex=Mat_(ndim+1,ndim,0.0); + + simplex.row(0).copyTo(proxy_x); + create_initial_simplex(simplex,_step); + double res = inner_downhill_simplex( + simplex,_termcrit.epsilon, _termcrit.epsilon, count,_Function,_termcrit.maxCount); + simplex.row(0).copyTo(proxy_x); + + dprintf(("%d iterations done\n",count)); + + if(x_mat.rows>1){ + Mat(x_mat.rows, 1, CV_64F, (double*)proxy_x.data).copyTo(x); + } + return res; + } + DownhillSolverImpl::DownhillSolverImpl(){ + _Function=Ptr(); + _step=Mat_(); + } + Ptr DownhillSolverImpl::getFunction()const{ + return _Function; + } + void DownhillSolverImpl::setFunction(const Ptr& f){ + _Function=f; + } + TermCriteria DownhillSolverImpl::getTermCriteria()const{ + return _termcrit; + } + void DownhillSolverImpl::setTermCriteria(const TermCriteria& termcrit){ + CV_Assert(termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0); + _termcrit=termcrit; + } + // both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does. + Ptr createDownhillSolver(const Ptr& f, InputArray initStep, TermCriteria termcrit){ + DownhillSolver *DS=new DownhillSolverImpl(); + DS->setFunction(f); + DS->setInitStep(initStep); + DS->setTermCriteria(termcrit); + return Ptr(DS); + } + void DownhillSolverImpl::getInitStep(OutputArray step)const{ + step.create(1,_step.cols,CV_64FC1); + _step.copyTo(step); + } + void DownhillSolverImpl::setInitStep(InputArray step){ + //set dimensionality and make a deep copy of step + Mat m=step.getMat(); + dprintf(("m.cols=%d\nm.rows=%d\n",m.cols,m.rows)); + CV_Assert(MIN(m.cols,m.rows)==1 && m.type()==CV_64FC1); + int ndim=MAX(m.cols,m.rows); + if(ndim!=_step.cols){ + _step=Mat_(1,ndim); + } + if(m.rows==1){ + m.copyTo(_step); + }else{ + Mat step_t=Mat_(ndim,1,(double*)_step.data); + m.copyTo(step_t); + } + } +}} diff --git a/modules/optim/test/test_downhill_simplex.cpp b/modules/optim/test/test_downhill_simplex.cpp new file mode 100644 index 000000000..cd2f099f0 --- /dev/null +++ b/modules/optim/test/test_downhill_simplex.cpp @@ -0,0 +1,63 @@ +#include "test_precomp.hpp" +#include +#include +#include + +static void mytest(cv::Ptr solver,cv::Ptr ptr_F,cv::Mat& x,cv::Mat& step, + cv::Mat& etalon_x,double etalon_res){ + solver->setFunction(ptr_F); + int ndim=MAX(step.cols,step.rows); + solver->setInitStep(step); + cv::Mat settedStep; + solver->getInitStep(settedStep); + ASSERT_TRUE(settedStep.rows==1 && settedStep.cols==ndim); + ASSERT_TRUE(std::equal(step.begin(),step.end(),settedStep.begin())); + std::cout<<"step setted:\n\t"<minimize(x); + std::cout<<"res:\n\t"<getTermCriteria().epsilon; + ASSERT_TRUE(std::abs(res-etalon_res)::iterator it1=x.begin(),it2=etalon_x.begin();it1!=x.end();it1++,it2++){ + ASSERT_TRUE(std::abs((*it1)-(*it2)) solver=cv::optim::createDownhillSolver(); +#if 1 + { + cv::Ptr ptr_F(new SphereF()); + cv::Mat x=(cv::Mat_(1,2)<<1.0,1.0), + step=(cv::Mat_(2,1)<<-0.5,-0.5), + etalon_x=(cv::Mat_(1,2)<<-0.0,0.0); + double etalon_res=0.0; + mytest(solver,ptr_F,x,step,etalon_x,etalon_res); + } +#endif +#if 1 + { + cv::Ptr ptr_F(new RosenbrockF()); + cv::Mat x=(cv::Mat_(2,1)<<0.0,0.0), + step=(cv::Mat_(2,1)<<0.5,+0.5), + etalon_x=(cv::Mat_(2,1)<<1.0,1.0); + double etalon_res=0.0; + mytest(solver,ptr_F,x,step,etalon_x,etalon_res); + } +#endif +} From b1f029ccc55adb4597751e4aeaf67122046482c3 Mon Sep 17 00:00:00 2001 From: Alex Leontiev Date: Tue, 20 Aug 2013 17:54:14 +0800 Subject: [PATCH 16/55] Removed trailing spaces This caused warnings. --- modules/optim/doc/downhill_simplex_method.rst | 6 +-- modules/optim/src/simplex.cpp | 50 +++++++++---------- modules/optim/test/test_downhill_simplex.cpp | 2 +- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/modules/optim/doc/downhill_simplex_method.rst b/modules/optim/doc/downhill_simplex_method.rst index 8bb9be8ba..5661a5cfc 100644 --- a/modules/optim/doc/downhill_simplex_method.rst +++ b/modules/optim/doc/downhill_simplex_method.rst @@ -32,7 +32,7 @@ positive integer ``termcrit.maxCount`` and positive non-integer ``termcrit.epsil public: virtual ~Function() {} //! ndim - dimensionality - virtual double calc(const double* x) const = 0; + virtual double calc(const double* x) const = 0; }; virtual Ptr getFunction() const = 0; @@ -64,7 +64,7 @@ algorithms in the ``optim`` module. optim::DownhillSolver::getFunction -------------------------------------------- -Getter for the optimized function. The optimized function is represented by ``Solver::Function`` interface, which requires +Getter for the optimized function. The optimized function is represented by ``Solver::Function`` interface, which requires derivatives to implement the sole method ``calc(double*)`` to evaluate the function. .. ocv:function:: Ptr optim::DownhillSolver::getFunction() @@ -134,7 +134,7 @@ of projection is treated to be vector given by :math:`s_i:=e_i\cdot\left& p, + Mat_& p, Mat_& y, - Mat_& coord_sum, + Mat_& coord_sum, const Ptr& f, - int ihi, + int ihi, double fac, Mat_& ptry ) @@ -40,24 +40,24 @@ namespace cv{namespace optim{ int ndim=p.cols; int j; double fac1,fac2,ytry; - + fac1=(1.0-fac)/ndim; fac2=fac1-fac; - for (j=0;jcalc((double*)ptry.data); - if (ytry < y(ihi)) + if (ytry < y(ihi)) { y(ihi)=ytry; - for (j=0;j& p, + double DownhillSolverImpl::inner_downhill_simplex( + cv::Mat_& p, double MinRange, double MinError, int& nfunk, @@ -84,32 +84,32 @@ namespace cv{namespace optim{ Mat_ coord_sum(1,ndim,0.0),buf(1,ndim,0.0),y(1,ndim,0.0); nfunk = 0; - + for(i=0;icalc(p[i]); } nfunk = ndim+1; - + compute_coord_sum(p,coord_sum); - - for (;;) + + for (;;) { ilo=0; /* find highest (worst), next-to-worst, and lowest (best) points by going through all of them. */ ihi = y(0)>y(1) ? (inhi=1,0) : (inhi=0,1); - for (i=0;i y(ihi)) + if (y(i) > y(ihi)) { inhi=ihi; ihi=i; - } - else if (y(i) > y(inhi) && i != ihi) + } + else if (y(i) > y(inhi) && i != ihi) inhi=i; } @@ -130,10 +130,10 @@ namespace cv{namespace optim{ if(range < d) range = d; } - if(range <= MinRange || error <= MinError) + if(range <= MinRange || error <= MinError) { /* Put best point and value in first slot. */ std::swap(y(0),y(ilo)); - for (i=0;i= y(inhi)) + else if (ytry >= y(inhi)) { /* The new point is worse than the second-highest, but better than the worst so do not go so far in that direction */ ysave = y(ihi); ytry = try_new_point(p,y,coord_sum,f,ihi,0.5,buf); - if (ytry >= ysave) + if (ytry >= ysave) { /* Can't seem to improve things. Contract the simplex to good point in hope to find a simplex landscape. */ - for (i=0;i #include -static void mytest(cv::Ptr solver,cv::Ptr ptr_F,cv::Mat& x,cv::Mat& step, +static void mytest(cv::Ptr solver,cv::Ptr ptr_F,cv::Mat& x,cv::Mat& step, cv::Mat& etalon_x,double etalon_res){ solver->setFunction(ptr_F); int ndim=MAX(step.cols,step.rows); From b92c88ddd1470a9f226c218a35b55ae533b5a57e Mon Sep 17 00:00:00 2001 From: Alex Leontiev Date: Wed, 21 Aug 2013 17:23:40 +0800 Subject: [PATCH 17/55] Removed trailing spaces Continuation of work done in previous commit. --- modules/optim/include/opencv2/optim.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/optim/include/opencv2/optim.hpp b/modules/optim/include/opencv2/optim.hpp index ad26a09e9..9acd95d02 100644 --- a/modules/optim/include/opencv2/optim.hpp +++ b/modules/optim/include/opencv2/optim.hpp @@ -55,7 +55,7 @@ public: public: virtual ~Function() {} //! ndim - dimensionality - virtual double calc(const double* x) const = 0; + virtual double calc(const double* x) const = 0; }; virtual Ptr getFunction() const = 0; From f2fd0ad15395fae7427723f9d8a6b663e3c8f0e7 Mon Sep 17 00:00:00 2001 From: Alex Leontiev Date: Wed, 21 Aug 2013 18:16:37 +0800 Subject: [PATCH 18/55] Fixed .rst indentation This caused warnings. --- .gitignore | 1 - modules/optim/doc/downhill_simplex_method.rst | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 3161cea79..1f3a89a9f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,5 +7,4 @@ tegra/ .sw[a-z] .*.swp tags -build/ Thumbs.db diff --git a/modules/optim/doc/downhill_simplex_method.rst b/modules/optim/doc/downhill_simplex_method.rst index 5661a5cfc..94d084c23 100644 --- a/modules/optim/doc/downhill_simplex_method.rst +++ b/modules/optim/doc/downhill_simplex_method.rst @@ -140,13 +140,10 @@ are supposed to be set via the setters before the call to this method or the def .. ocv:function:: double optim::DownhillSolver::minimize(InputOutputArray x) - :param x: The initial point, that will become a centroid of an initial simplex. After the algorithm will terminate, it will be setted to the - point where the algorithm stops, the point of possible minimum. + :param x: The initial point, that will become a centroid of an initial simplex. After the algorithm will terminate, it will be setted to the point where the algorithm stops, the point of possible minimum. :return: The value of a function at the point found. -Explain parameters. - optim::createDownhillSolver ------------------------------------ From af74ec60447edd134d5aa38baf43d1ce9c01ac67 Mon Sep 17 00:00:00 2001 From: Alex Leontiev Date: Wed, 28 Aug 2013 00:27:59 +0800 Subject: [PATCH 19/55] Minor fixes In response to the pull request comments by Vadim Pisarevsky. In particular, the following was done: *)cv::reduce use instead of custom code for calculating per-coordinate sum *) naming style of private methods is made consisted with overall -- mixed-case style *) irrelevant create()'s were removed -- I did not know that copyTo() method itself calls create --- modules/optim/src/simplex.cpp | 46 ++++++++++++----------------------- 1 file changed, 15 insertions(+), 31 deletions(-) diff --git a/modules/optim/src/simplex.cpp b/modules/optim/src/simplex.cpp index dd49234b5..f45d0ce0b 100644 --- a/modules/optim/src/simplex.cpp +++ b/modules/optim/src/simplex.cpp @@ -1,5 +1,6 @@ #include "precomp.hpp" #include "debug.hpp" +#include "opencv2/core/core_c.h" namespace cv{namespace optim{ @@ -19,15 +20,14 @@ namespace cv{namespace optim{ TermCriteria _termcrit; Mat _step; private: - inline void compute_coord_sum(Mat_& points,Mat_& coord_sum); - inline void create_initial_simplex(Mat_& simplex,Mat& step); - inline double inner_downhill_simplex(cv::Mat_& p,double MinRange,double MinError,int& nfunk, + inline void createInitialSimplex(Mat_& simplex,Mat& step); + inline double innerDownhillSimplex(cv::Mat_& p,double MinRange,double MinError,int& nfunk, const Ptr& f,int nmax); - inline double try_new_point(Mat_& p,Mat_& y,Mat_& coord_sum,const Ptr& f,int ihi, + inline double tryNewPoint(Mat_& p,Mat_& y,Mat_& coord_sum,const Ptr& f,int ihi, double fac,Mat_& ptry); }; - double DownhillSolverImpl::try_new_point( + double DownhillSolverImpl::tryNewPoint( Mat_& p, Mat_& y, Mat_& coord_sum, @@ -68,7 +68,7 @@ namespace cv{namespace optim{ form a simplex - each row is an ndim vector. On output, nfunk gives the number of function evaluations taken. */ - double DownhillSolverImpl::inner_downhill_simplex( + double DownhillSolverImpl::innerDownhillSimplex( cv::Mat_& p, double MinRange, double MinError, @@ -92,7 +92,7 @@ namespace cv{namespace optim{ nfunk = ndim+1; - compute_coord_sum(p,coord_sum); + reduce(p,coord_sum,0,CV_REDUCE_SUM); for (;;) { @@ -146,16 +146,16 @@ namespace cv{namespace optim{ } nfunk += 2; /*Begin a new iteration. First, reflect the worst point about the centroid of others */ - ytry = try_new_point(p,y,coord_sum,f,ihi,-1.0,buf); + ytry = tryNewPoint(p,y,coord_sum,f,ihi,-1.0,buf); if (ytry <= y(ilo)) { /*If that's better than the best point, go twice as far in that direction*/ - ytry = try_new_point(p,y,coord_sum,f,ihi,2.0,buf); + ytry = tryNewPoint(p,y,coord_sum,f,ihi,2.0,buf); } else if (ytry >= y(inhi)) { /* The new point is worse than the second-highest, but better than the worst so do not go so far in that direction */ ysave = y(ihi); - ytry = try_new_point(p,y,coord_sum,f,ihi,0.5,buf); + ytry = tryNewPoint(p,y,coord_sum,f,ihi,0.5,buf); if (ytry >= ysave) { /* Can't seem to improve things. Contract the simplex to good point in hope to find a simplex landscape. */ @@ -171,7 +171,7 @@ namespace cv{namespace optim{ } } nfunk += ndim; - compute_coord_sum(p,coord_sum); + reduce(p,coord_sum,0,CV_REDUCE_SUM); } } else --(nfunk); /* correct nfunk */ dprintf(("this is simplex on iteration %d\n",nfunk)); @@ -182,17 +182,7 @@ namespace cv{namespace optim{ return res; } - void DownhillSolverImpl::compute_coord_sum(Mat_& points,Mat_& coord_sum){ - for (int j=0;j& simplex,Mat& step){ + void DownhillSolverImpl::createInitialSimplex(Mat_& simplex,Mat& step){ for(int i=1;i<=step.cols;++i) { simplex.row(0).copyTo(simplex.row(i)); @@ -229,8 +219,8 @@ namespace cv{namespace optim{ Mat_ simplex=Mat_(ndim+1,ndim,0.0); simplex.row(0).copyTo(proxy_x); - create_initial_simplex(simplex,_step); - double res = inner_downhill_simplex( + createInitialSimplex(simplex,_step); + double res = innerDownhillSimplex( simplex,_termcrit.epsilon, _termcrit.epsilon, count,_Function,_termcrit.maxCount); simplex.row(0).copyTo(proxy_x); @@ -267,7 +257,6 @@ namespace cv{namespace optim{ return Ptr(DS); } void DownhillSolverImpl::getInitStep(OutputArray step)const{ - step.create(1,_step.cols,CV_64FC1); _step.copyTo(step); } void DownhillSolverImpl::setInitStep(InputArray step){ @@ -275,15 +264,10 @@ namespace cv{namespace optim{ Mat m=step.getMat(); dprintf(("m.cols=%d\nm.rows=%d\n",m.cols,m.rows)); CV_Assert(MIN(m.cols,m.rows)==1 && m.type()==CV_64FC1); - int ndim=MAX(m.cols,m.rows); - if(ndim!=_step.cols){ - _step=Mat_(1,ndim); - } if(m.rows==1){ m.copyTo(_step); }else{ - Mat step_t=Mat_(ndim,1,(double*)_step.data); - m.copyTo(step_t); + transpose(m,_step); } } }} From 9d78b8003ebf9877a3bf47be73d52e5c26551f9a Mon Sep 17 00:00:00 2001 From: Daniel Angelov Date: Sat, 31 Aug 2013 13:39:38 +0300 Subject: [PATCH 20/55] Update on LSD no to use any Mat* (IOArrays instead). Updated to new license. --- modules/imgproc/include/opencv2/imgproc.hpp | 13 +++---- modules/imgproc/src/lsd.cpp | 41 ++++++++++----------- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 24d9f4463..581ba1443 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -904,7 +904,7 @@ class LineSegmentDetector : public Algorithm { public: /** - * Detect lines in the input image with the specified ROI. + * Detect lines in the input image. * * @param _image A grayscale(CV_8UC1) input image. * If only a roi needs to be selected, use @@ -913,8 +913,6 @@ public: * @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line. * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. - * @param _roi Return: ROI of the image, where lines are to be found. If specified, the returning - * lines coordinates are image wise. * @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param prec Return: Vector of precisions with which the lines are found. * @param nfa Return: Vector containing number of false alarms in the line region, with precision of 10%. @@ -935,18 +933,19 @@ public: * Should have the size of the image, where the lines were found * @param lines The lines that need to be drawn */ - virtual void drawSegments(InputOutputArray image, InputArray lines) = 0; + virtual void drawSegments(InputOutputArray _image, const InputArray lines) = 0; /** * Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2. * - * @param image The image, where lines will be drawn. - * Should have the size of the image, where the lines were found + * @param size The size of the image, where lines were found. * @param lines1 The first lines that need to be drawn. Color - Blue. * @param lines2 The second lines that need to be drawn. Color - Red. + * @param image Optional image, where lines will be drawn. + * Should have the size of the image, where the lines were found * @return The number of mismatching pixels between lines1 and lines2. */ - virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, Mat* image = 0) = 0; + virtual int compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image = noArray()) = 0; virtual ~LineSegmentDetector() {}; }; diff --git a/modules/imgproc/src/lsd.cpp b/modules/imgproc/src/lsd.cpp index 58d226f0c..8cfe2b2b2 100644 --- a/modules/imgproc/src/lsd.cpp +++ b/modules/imgproc/src/lsd.cpp @@ -1,5 +1,6 @@ /*M/////////////////////////////////////////////////////////////////////////////////////// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, @@ -9,8 +10,7 @@ // License Agreement // For Open Source Computer Vision Library // -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, @@ -185,7 +185,7 @@ public: double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024); /** - * Detect lines in the input image with the specified ROI. + * Detect lines in the input image. * * @param _image A grayscale(CV_8UC1) input image. * If only a roi needs to be selected, use @@ -194,8 +194,6 @@ public: * @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line. * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. * Returned lines are strictly oriented depending on the gradient. - * @param _roi Return: ROI of the image, where lines are to be found. If specified, the returning - * lines coordinates are image wise. * @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line. * @param prec Return: Vector of precisions with which the lines are found. * @param nfa Return: Vector containing number of false alarms in the line region, with precision of 10%. @@ -216,18 +214,19 @@ public: * Should have the size of the image, where the lines were found * @param lines The lines that need to be drawn */ - void drawSegments(InputOutputArray image, InputArray lines); + void drawSegments(InputOutputArray _image, const InputArray lines); /** * Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2. * - * @param image The image, where lines will be drawn. - * Should have the size of the image, where the lines were found + * @param size The size of the image, where lines1 and lines2 were found. * @param lines1 The first lines that need to be drawn. Color - Blue. * @param lines2 The second lines that need to be drawn. Color - Red. + * @param image An optional image, where lines will be drawn. + * Should have the size of the image, where the lines were found * @return The number of mismatching pixels between lines1 and lines2. */ - int compareSegments(const Size& size, InputArray lines1, InputArray lines2, Mat* image = 0); + int compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image = noArray()); private: Mat image; @@ -1186,10 +1185,10 @@ void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, const InputA } -int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, Mat* _image) +int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image) { Size sz = size; - if (_image && _image->size() != size) sz = _image->size(); + if (_image.needed() && _image.size() != size) sz = _image.size(); CV_Assert(sz.area()); Mat_ I1 = Mat_::zeros(sz); @@ -1219,14 +1218,14 @@ int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray bitwise_xor(I1, I2, Ixor); int N = countNonZero(Ixor); - if (_image) + if (_image.needed()) { Mat Ig; - if (_image->channels() == 1) + if (_image.channels() == 1) { - cvtColor(*_image, *_image, CV_GRAY2BGR); + cvtColor(_image, _image, CV_GRAY2BGR); } - CV_Assert(_image->isContinuous() && I1.isContinuous() && I2.isContinuous()); + CV_Assert(_image.getMatRef().isContinuous() && I1.isContinuous() && I2.isContinuous()); for (unsigned int i = 0; i < I1.total(); ++i) { @@ -1234,11 +1233,11 @@ int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray uchar i2 = I2.data[i]; if (i1 || i2) { - _image->data[3*i + 1] = 0; - if (i1) _image->data[3*i] = 255; - else _image->data[3*i] = 0; - if (i2) _image->data[3*i + 2] = 255; - else _image->data[3*i + 2] = 0; + _image.getMatRef().data[3*i + 1] = 0; + if (i1) _image.getMatRef().data[3*i] = 255; + else _image.getMatRef().data[3*i] = 0; + if (i2) _image.getMatRef().data[3*i + 2] = 255; + else _image.getMatRef().data[3*i + 2] = 0; } } } From 24e916059fd45123c7e5c93dbbb6060853e0a088 Mon Sep 17 00:00:00 2001 From: Daniel Angelov Date: Sat, 31 Aug 2013 13:46:10 +0300 Subject: [PATCH 21/55] Added LineSegmentDetector documentation and an output image. --- modules/imgproc/doc/feature_detection.rst | 106 ++++++++++++++++++++++ modules/imgproc/doc/pics/building_lsd.png | Bin 0 -> 50285 bytes 2 files changed, 106 insertions(+) create mode 100644 modules/imgproc/doc/pics/building_lsd.png diff --git a/modules/imgproc/doc/feature_detection.rst b/modules/imgproc/doc/feature_detection.rst index 1c5d29c16..b23675171 100644 --- a/modules/imgproc/doc/feature_detection.rst +++ b/modules/imgproc/doc/feature_detection.rst @@ -496,6 +496,110 @@ And this is the output of the above program in case of the probabilistic Hough t .. image:: pics/houghp.png +.. seealso:: + + :ocv:class:`LineSegmentDetector` + + + +LineSegmentDetector +------------------- +Line segment detector class, following the algorithm described at [Rafael12]_. + +.. ocv:class:: LineSegmentDetector : public Algorithm + + +createLineSegmentDetectorPtr +---------------------------- +Creates a smart pointer to a LineSegmentDetector object and initializes it. + +.. ocv:function:: Ptr createLineSegmentDetectorPtr(int _refine = LSD_REFINE_STD, double _scale = 0.8, double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5, double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024) + + :param _refine: The way found lines will be refined: + + * **LSD_REFINE_NONE** - No refinement applied. + + * **LSD_REFINE_STD** - Standard refinement is applied. E.g. breaking arches into smaller straighter line approximations. + + * **LSD_REFINE_ADV** - Advanced refinement. Number of false alarms is calculated, lines are refined through increase of precision, decrement in size, etc. + + :param scale: The scale of the image that will be used to find the lines. Range (0..1]. + + :param sigma_scale: Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale. + + :param quant: Bound to the quantization error on the gradient norm. + + :param ang_th: Gradient angle tolerance in degrees. + + :param log_eps: Detection threshold: -log10(NFA) > log_eps. Used only when advancent refinement is chosen. + + :param density_th: Minimal density of aligned region points in the enclosing rectangle. + + :param n_bins: Number of bins in pseudo-ordering of gradient modulus. + +The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want to edit those, as to tailor it for their own application. + + +LineSegmentDetector::detect +--------------------------- +Finds lines in the input image. See the lsd_lines.cpp sample for possible usage. + +.. ocv:function:: void LineSegmentDetector::detect(const InputArray _image, OutputArray _lines, OutputArray width = noArray(), OutputArray prec = noArray(), OutputArray nfa = noArray()) + + :param _image A grayscale (CV_8UC1) input image. + If only a roi needs to be selected, use :: + lsd_ptr->detect(image(roi), lines, ...); + lines += Scalar(roi.x, roi.y, roi.x, roi.y); + + :param lines: A vector of Vec4i elements specifying the beginning and ending point of a line. Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly oriented depending on the gradient. + + :param width: Vector of widths of the regions, where the lines are found. E.g. Width of line. + + :param prec: Vector of precisions with which the lines are found. + + :param nfa: Vector containing number of false alarms in the line region, with precision of 10%. The bigger the value, logarithmically better the detection. + + * -1 corresponds to 10 mean false alarms + + * 0 corresponds to 1 mean false alarm + + * 1 corresponds to 0.1 mean false alarms + + This vector will be calculated only when the objects type is LSD_REFINE_ADV. + +This is the output of the default parameters of the algorithm on the above shown image. + +.. image:: pics/building_lsd.png + +.. note:: + + * An example using the LineSegmentDetector can be found at opencv_source_code/samples/cpp/lsd_lines.cpp + +LineSegmentDetector::drawSegments +--------------------------------- +Draws the line segments on a given image. + +.. ocv:function:: void LineSegmentDetector::drawSegments(InputOutputArray _image, const InputArray lines) + + :param image: The image, where the liens will be drawn. Should be bigger or equal to the image, where the lines were found. + + :param lines: A vector of the lines that needed to be drawn. + + +LineSegmentDetector::compareSegments +------------------------------------ +Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. + +.. ocv:function:: int LineSegmentDetector::compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image = noArray()) + + :param size: The size of the image, where lines1 and lines2 were found. + + :param lines1: The first group of lines that needs to be drawn. It is visualized in blue color. + + :param lines2: The second group of lines. They visualized in red color. + + :param image: Optional image, where the lines will be drawn. The image is converted to grayscale before displaying, leaving lines1 and lines2 in the above mentioned colors. + preCornerDetect @@ -542,3 +646,5 @@ The corners can be found as local maximums of the functions, as shown below: :: .. [Shi94] J. Shi and C. Tomasi. *Good Features to Track*. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 593-600, June 1994. .. [Yuen90] Yuen, H. K. and Princen, J. and Illingworth, J. and Kittler, J., *Comparative study of Hough transform methods for circle finding*. Image Vision Comput. 8 1, pp 71–77 (1990) + +.. [Rafael12] Rafael Grompone von Gioi, Jérémie Jakubowicz, Jean-Michel Morel, and Gregory Randall, LSD: a Line Segment Detector, Image Processing On Line, vol. 2012. http://dx.doi.org/10.5201/ipol.2012.gjmr-lsd diff --git a/modules/imgproc/doc/pics/building_lsd.png b/modules/imgproc/doc/pics/building_lsd.png new file mode 100644 index 0000000000000000000000000000000000000000..747029a65d914ac2a4e05621b45587a13c7b3947 GIT binary patch literal 50285 zcmZ_13sjWnx%ZtkyRoT3abhsVQ!tJW;9=S9iKH4&6C{iv3l4b;f_XFE{fcv0K9lcb0u<`s^Qh zN*tyCZu`W&p(SBV+s?96wsXP3tNpR-oyMMm%IzxyJ8kRj+g8Ti{gG`#C}nA&*-^SA zkRH?3VH9YzfHm5!OhP&%Xe7G;iJYAq2BaB4-#n)UGkLVnBIZd&U|wUqxvh? zyC?bf^?L7*T34~EE#bt#veO4N>i3*RK}>08pti@>%>YA(XPtQ|FxmcG5Shgr_j z(P3SVI%9F2@vgsebzrg+LH9OQ^yYnNPO-njKT)~XFjiOg=*R{4B(GWPbzOHvy6cQa zQJVRo`H#pw!+p2`JNvf%6(`zhPAbg^Y+n{ry3e+FLg--dlB_8) z@}9pkhW8$BGCJIj$OQL>sU+yPEhC-@d^G%mg{P@U`BS=+furx+Y04=L=B9akOAw ztv_lUK#b`2wcsU>>*wx;YXULOz-@#avAfYsyKzPj%I+?E6W7I0EW@aFm;ak=D#>a5 z6BabMIvM*p5nodGmTmDT|7t64sW{jl+ilyLguPY!zlPZyIc_+e)i1j<{Bgzg_`uJZ zH$ok4pW%Ef-t5CC%zr?p*hYgfDj7Z9^f0Ltq3F;Rw$VOpOl)2dcT>qIe9w*thpw*JJ03HtXl;pTh@2ru|+~pw|;J$c%_kDDZ3~8(l$`7JPZguZeQfG{Fs z3ZCJwJnOSS3)HT#O&EyXXPfIq!Pt5o`WZG@bPe1Gwt9wF(E+yebC%2`J+L+3MI@83 z*WBaUgMrqq%R;D@0X{-;fN+!n4vv0~l7(F!>^quuF+JsQqaohF|o{e1< z2hs*CyCPgpKooeg`+8f#of_GpbRrd2j*VA<1&zjMtvfHFi%e^|Lp=n-8O#R-STMQAcC+4?npkJ-vF(yH zcL+cV0w;-DJdbE#V5!^UuvqYd$hLr}>khmMJi3S8W~>W-n_iv)Fd*SNrdalFP2Vz1 zqkmR~?P^Q4nJAYH63No=T;9`Zyw4kn`zZE?2XW#(^^A1oC9E4%VJ6)m8DKckGB(A5 zDK-&_^~?gKzkqt$83}WLLyyIy6;ih5vchG^-F_!mNYEd9+h4h>KNjWAPmIjrBPqoU zak&*BShR{r4P@i_Ul7wJPP`1y#jx8F#w15x@K-+Z55lpEc(YRht&UHDnu=@yge()n zQg53tWo-{5=0+n^5bCs#aaa#Ks8GAnX}l-9-Ju~00~eFELKlHp(MpeV%u6n9wtaTA zA|-RE*?5>Z*p=-%y#za)6)bxRDLyB}vHW8e7;j+&<9!?S+($Si5-^i}ab@czVK`G?R!P&bJ+nDz1IzsYjMS=I^GSk-z0YAf@`X4d!fJ|xME8=^qC-d+JH zm2D$5P-*HCVta0tv?PS4PpZ+m@A!)E4KIbW{*s_Xz`UvK*h={5rgd4fbKR02lJScXa1}|_G(M|EaNUQ-h~}SpDVn# z`72*`Z_xYGSxpbmIiJU4-*8sH9%RA%-~d?03h)+$%I4xe@CQ&a-vvi&wt^2%mM{h+ zwJkoMgP*`I*0m&jCX(i_ zEDZG)i^1L2BP)>3M=%4inFa3h9U$D$&Uo{O@el+txyy|egYPOZdWA}d8!CEdOz~N$ zeo=GBf}B9pM6rt;sZ5K^${z`6;LyW%2{s=6TvmJrgt(;0EGKBWGMfm@M?W;zvEnnfn=KV(yUSL6$fs*%GpG+Ui9vFf zcWiFP^(27>qUx0(6fR%f_7>}1*r?%Q%zFsWh#gFWC9iEY5^3X+@?Oo6-)kP~i=hjCkQ);iwYoZ8KX||J;3gr+{Js zn3ZW6_M$vs{c^bClFxNIsKSOq7R%=+wBZ7-!-+>aE66FZVU#c>&vkmao-I*fr_Soy^uX1m z$Tx{N&gwt(oozJ!?60g=CNfJHEPRW>{{n|(1y6)lwUCX(LJB>@u)YjI@Sd05hg)Z0 z0_zp1L9UkqXMNoSD2IF`-dyL>L%!lP9(Gn=t=gM|MINbFAWKDF8w8gbd;rt_nefO8 z0=E^i-t$M5HJZit!jtyYU=)vwuI36<8<0#*-x=ZxLGu^+=1(9Q@utriXl_d%$_h*% zAJjC>-zkjyFtoYZJOlA(##k^`YRPNY09*QLQnRxTt&T6=4y38z2Mlds+aOI^BOI$HXKcux6gW}>9bHIyNA zZV^qb#+GJauRUV2CzPkZM2;q)P;7j}JTUl5ELO zpEDlriR&g3BA6NWJImHQaa9}}6vI*QMQ}`U@Jx;I0@Emyf-vIC7=Z%)dOMc=2u6t8 zedOr|&=p8FkYQ${r0Z?z5TUiLapn&PYpn<|C#>wbeE^#i-FS+(0u`ebbiQD9m!JaM z3$WLVK|R*~`9SQ|O?|RtzRt>yEA0<%1{2q214gNTQP@x=k5m1gp2B&X={4f8| z+(v+W4g!gd7NVZ64j@J8qli240LtEhRu+qi@^~`KjrQZM=>!G%lT00ip5c7VX>@C@ z+2oF>i@XJO!f^LR)5LXk#;c)KZRxo7;Cg5EtHySLv|<)1=0OB>5iD{ckFOv%IFQN> z5VzM8BSi$F*MjI0;hv~5-i-=3>gAu2t767pG7&lbd;Yji(dm&z)+-*c_|IBbAD(;1 zi_7EUN_NKJ$Hewwub-e2j1o1=e9YZq1F-7M7$^&+mbQc=_0grKk&4fVTvV`Vh3>e} z*$vOWvY$S%NUzK|;mGkg^C$m`Wq!>LIFM5(69yHG5Wbix$S+!cLe#=(zkwid7H<&w zfTNbJIwj)M8Bi$frrM&(t*ZgK$9xIKz`U34Wfp7gZ(Y&Op?MAtx}RzP41ivlS*>@M?KBN4Yy+q;x#DpFQ|Xnw~$ z3T6){k2uhCGRFVJq#afwWFCw+|HRy22ctw-aEyylU0uMKoPqf{1Wf3QMSL)isYeN= zC!*$<60My4qqF*C>^Y@Zu1t(VCc!KT{K7y*QLu56*Zhs2nYM>|(`02=+Y%~lXkDwp znuwG)TNDPvs0`GG-R$+CXR!x}sNDa@HF!h9m6b!y6&UJd%nb4YwCXJ&FX~oiAjy@k zf|PZ$$`|BCUMd_UXfXH{_~yQ{V$5>ze<4hY1ZGW59~7xA;Q}rwpH@vww0zc;HA7RK zJms}uwhQ=*5xe!3v|u7UL`9R7c4KsZEZnAnzoCTeuuoM3_KKlNQHRmMrZYA zag73e12E(RU-)fX!>h~RxsE)JnDbszH z(g=K=sAtu@HX=~DzRGhiTjb(a%j)uQuOc!21ao}^NC!Gc%Xy%n)II!h`$(ox^=)x~ zrUw!Dce`g4rtlQq*t}E5o;;Am&yXYvH6`}Eov z!rOV!jC#YriIdi;_@&B^C32K-u7COkD!XgCj#0ovJfy*_6|*I}{?JAgg%WRr<*uhf z0!4+r_Q8^5Ah4Cl_x-4s%FIvjj-N-38(*78-8Iu(XPXdA@qTFj$sdPWo{@Kwgh`_K zPP1(f@!hS6>NOboTq0(&wTYJG(R)1}JF53ugIl8)z$ z5Bhl_C3E9wtap-kpGdJ6#E0rWrpj>2YL$wx4VQ?)ja>6(nf0^%v7;3H-eYkh=$P=j z4k~g!WbcHkZ(md7qhjjm=xdzO)p-FiZzn0;;$dj2O8iWWaH1dCdTEn5QXP*YU5&9H zIYynY{72yB>G$KosP*BS-LdH4KvVeHU(PoYUVLjn|bCQYG^QZYqp* z8s{JLTO^5Po}II&B_U2fmRdz|fOwfwk9bjdm{Lh*h-wc z{D+k1KsPWsU29OhzmdBS9DEu7sD zC#fetU%eq&-ZDjCo2Jl2NZpA+tCRy~6WG^K%6 zzzQ+9g9@0u^?dYc{Uj|aa14D+$(EFh(SN2a`=6pHz~2J;w(sUqh^Ii zG-6kyS&m5jYU))iTEufLX>uBmgCbjxlliIAxexdg8F|Hi=ev8;rd;Ht~H{E{R z=WP$J3Q|@tv~Tj5-*=y`4oo}C$V;*sD7>He*S8~jl%$O-Yv(;a@&QY2ZNbVnh+dWH;(_uU5;LRT; zMqUvqqo%vtUm3C;Na8_HxhFZg_$RW|Uv;}B!RJ1VebyOi)!P|O*t|6GzG}Kn6lvZ3 z5VDPOFU$(O>K}I!1cT7GCXN|d%W=wtUKlP_#6~0CKIM2tZ$V6*;R_@O2fJ~omEnXq zyU$$?#xUzpO0xm?+8cM*(UoI1+Rm<2fV^C}% z=bgsy{At%6rBm%Ehz)^j!Cvi)>=_t;F%eNfuH8kA6}^XQ$aZo9Cq3q2+t^(5>(0I} zLe@fFvt(m#4kzAdXV_|d%n|u7b1IIJ(poW?>b|_1f|1b`U*f4|wznN!d1lC6)NBk` ziA`(brrhDRpsz*#st(NR^e##E#hB8!Ys@#@L#y$xLQH^Vvio!^-&I8M;@(ou*sk`+ zo=pwBp464+ngqXYGTsoRf7M@Ut%c2?aN@SGQ7=nRENwTk2FA~nGsJ-T%#NqTo6GHU z^30pbT{+Cl3x*D2# z580}P{>P$nt*X{cUI?aSo13VmIs5hodvgP4JB?h|EQ~GkxxXqcv9uKpHw~|61&W!} zlCZXuMfB#z)K#STypQ)(?sH#GgJc|OHnzBnI+#r%_1Tc+H_6DQw4uiQ77{NTYabKD zVn)e;6ZeD$Rqyw?CnZPzufK9sIAKnXOWqb$QL(?WHYrkKzkxZ#U@3EQs#99y5P>7& zb@sV4Bddl@F0`tD{9Z}stL}D@nxc-T-ZgmA%{nYFv~FdaO9Gz|qk#TwZ^p33@_6o*bSCRGj-ifAY&2$@4Sh#2qgg`# z2?F#~%Sn^F^5ibK>a=Z*6#zsw23Of?{2}VJjC%kpK5FEmzSkY4M~&U?g|1BOHOIcl zgX5O{G~PTMZ+^pFBuAc%Rk{M_pPU)gDskHFY^$;VkqNx`+>WQ>{^#c-y#SF|!nU(- z0O9aV6xAPFZ9DI^BntVNljFVYMeml`7g5nYS^#8Vklp>U7Xcj9@2{F@!Q4(9;d6kB zU7Y;Gz7kJKn+)xJ^$e7~!;%-eV@lUaLSVPjNcDbw+}u)9q-SGWShW7Ul;uge@;lkH=$6n)Y%4S;G&eN*qsK&$b*eNk%Q6FzTAaJjSZNeS@`vx%;ys0oR#pcv;~ zScuBc6G@IoHS~2bR2xB8qiEE`-)EcYKy%csR-w?;%Pr{4gVxdNCN?w{2r_NR~0?ZaI z3rtNco!?Vg-9P>uuP5P4*{(^9A63IpTHsZpO4F*=ggZxV#Zc3o1xBiswjEPMO^qSk zK{*kw)O;z4jMm&+%8{o*=;zPU$i} zM&w_7%s5~>3Hh;7wt8cqY-5Q6?8v~?w%a_kr?R*aEw%FB;^P`Pm4V~UlP#&Um|}_6 zm5PsLo3HkLS1>-xX-xG9N{R#5`o~X5#I=6sUYO3@sP|60C-Np$Fgps2xq0}1F8@u< z$83t~ntF3HBPBgibIv7@Ro`>r%bDWn#o79RIXAfr>&V3L$-|{k&A)WJ5Ol~2vr`bf z3$xlR+`T`0_92;l$UhFkmBZ`l_|M~Ld}`93U~e;cGsc)me84NnOc{;ZbYQ4auz~cgR~&b46LWfars@)|%4-xkHgFvh9h%Hn<;_OEndGDt|xM+0XEaBqrdjtQ@UkOhjU!iZ{e zbkOv``&f26&$#oixI+a1fU{{5;iiMY02)mVED+j8-E^yV>H%Z<@Os#n}`Ad zJg1dWUW>|Lh9!d?BDps^Fm5S^QRd|FX|nt*h3IDYP_8vur!lc5VI?`u``R02E4C<5 z7#yUu@9~ezQb_aZae1bzkIkRrm`Jmp0ZT|`>m(0`)r6d=KWV1KT)g;I_t4ixL}8eM z)6;DzhgqKsA@-?|)pj50g^ZezUkn0HO_CC?O%p9yVQ>27gk<}sd~#^m)+%gXAp^O- z3^laDs~9~;{=+t%;s=uJwOvdUui4}uXF1g@1P37}z7J;e3tRCyJ${=E_;>VbFl-Kz zwxX)v3bz=uHQ{R%G=#lv_yucZ!pj1M;tB2Ty0ckYvK(*R{Lsh$Q3SI7*2V{s?Ey2W-&&-ely4UYd;M=v^ zT?(zT6oM2Gm<>8YjNC%Ut{PCXv%{9}z!>VW*eCkdga&``G9H1~k4lOxi2~ssTYig@ zwKn4#HQ=;B8b05E^WF@@?#N_MDXtcSbalKKoz^X%d2M#+pg7jsWOVq|@K~@^QoCkp z>8B!hx2?1-)qT1PA0<7Q8U}tU27j54{q6zrCAhL5+KO+Awv%Jc9H2D2FdJ;Ldp>&if}tjZp|&x^-5VICj+ze?#eQIsP+T^_Uqx{SFkOX< z0wN^fzhkgSPY#MowRkV&W#&pa%d!A*t++E6s(XTH@*d7N^b%vv3zb#MLYZ%lnXfc} ze4Uz&e_Xz!j$qD=3ssod=C7(vqIlF`Tyzf;YH`)_!ztv7k6DrMI~HtYo5Z<2w)tUt z_cZO>$*j{F{UmEtfr5;nfYdXCRcyC}!sh=NB?xcO`fAAngQ>@4er0^uQYI1VPaB*N zLCO^<5mce7mLUwtWuld2-Azar1~XijnA|^pYqo`O3nYz4DHf11pwDb`eVb(@$_OmU zfL)ZJ2s2j|t8y6scQ|+)x_grel4;?y83I<(ST}e&1mg3|I$5*;%(5vFccjO?mMb3H)G9wOa)%0fO z^X6>;)E!B^0|o!Z#2ZgQVQADpGo}75ia}kSZ`tyxnWr=>*_F*Gf#^}+^Q)g&B&<9n1Qyec`pgv*A&hhcGHk1I1R zPH}jZ3U|sWGkE&osk3NolZOf$ZIn%{SoQ?AF1lx`AySpN$Th`5OIBCeOtkW0%DiIB zxlunr_ySUx;mivxnREgdiGutBl~y-NM_&*mh(xVaIF-h;Z8e30a*dfUSdUIW+p3s} z--%)o40`e`{GqKrF;dFjh6E3=BfoR*@=v`Cwp+n6mKUhINo}MkF1Ezb8+2{B^0v7h zj9r+WS^A!MDb-5C_#=eNgGvwz6_WQO7bg0JDR}b(=G=OnNOzBFt!&DEj7NFY7MFpch;`NhcCT$fK6JBjDmP!TL zj3$eXY?<$*C{3L!DtFk-$(BSWpprk+D1(YF0p+a9przPkT);In$Eh?Lb!|Bc@$p(U zKubFGQ;JTC)k~#aw&JbX5@#v}9)51HN~v{m7MQQ6{NmAeh3e;49gx7n%=1(Qx;Efb zDO0hf%SKTR3B3VSEm}bxr6c-8FvqHAUZX#j~-p$`!2k zOHVjl!(Kn~jf&}<%FjIERis|NA6}!%`w4zfQE646b7H7BRi+U{p&%GFkO5*fMjaIr z)s?UFfe{suO=9!Zv*GJsCj=9nx4bR4BJyCH5vhS+Q`Pg7aArWof83i=O`PgA9w6ih zMdli+8zgW!h?OGtalz^*q6hHHY06|EJ?R?>?N&ld#>sPVXXWne0@_hge$1n-bfML_ zs1iECBr2nJtH!rzWPHl;)k^D@i;bMbA1x@`o0nXgb#MCA-YJ3ZM6b2>&sAQ2)&2Mv zQ(yn*fBjX;I}bhlqsZN~bJO1ZhyVEf|FbUr%rB>xs*j#sK5K)pw=i%nlu{IE=#O1D zFrF<7N5qY|K4&wx-e|Q=Z7}SdjtL~Yi}KASJvL&AC_&gbpD8BqbsEngpF6YJn4gSn ze9;~+cc3=e&F5=_DKe}Cw&hYM)w6m5cNbfGOV#c7lzmQOxzrcD=nKg^iZu(eZ-g>Lo7n{2Jj%{m8!fV;4+>Y&) zaI~8IE|a3TaVJby!iZSB_F(Vvrm`1jn`?$qfpz_aqRbB7eD}vRwA3R%J_5Umyy~Mj z?NVlR&JAi{u_~|`7|~*Op!35=rjsL{iKME)8$pLT?bBdTH9e4E6B9A2dO7h1fH4ec@*>pf4Y2nn%Nt;+LTlkAx z6WQEf%%&%S&j!{9%U%pDhzg`rkXp~POSbngXQV>21_x8(&D_jLj~W+rQez!P$47C` zMvUdMvRZfG)Bf>TIVmTIm4`qoY_~{YN6PY`G?R83@cQcLz|W!5!70u@>#W{Htl|V+ zCdixlUTsahL#u`$tAwIyuDR|QzI#5qWIH0;>WEZEt5CEzNk?=MwWJ=VRX!>UQPp*A zykHhJ^+WGaE?aNNz&2-w{oh> zvN8VqLQtlGj=Be?z2@~^W>}`Q#J&xj{R*Nr+v7_{K@O_HQrC6S>(P?tx`Anwl~uhx z!sK4_fh?Qrng%)TR2aCK&mU?##{5ZTk|NWceMd;ojHy^73#jc@55pHS@+|+jWV{sK zy) z6eb`HZnbS!IoYY8(^nnBg*DsHa|sHabV78{UtHvorKvyNH=?%dolTd#nO!>uyHR;s zVx+qxI&cVxPo=dD$30d7P_`m78W>N@*ATj(7WDX_fBdc1#3tLt(BL-9u{R`-%oy)| zJ)HRY`0YN7ApxU`G#J#}@jsROs%WMY%Cy?XDq&}nv0Z(#tC=rux0kb{`EaXo4DQo& zEMwF8nPFoaN}pYlKIw?JjrY&$0`` z)f!$)yy5d64%1>37058Q$@Qjqyw}w#aJ^C7mqXkOl1ozw9w~X%$QxJl+HYtd2o_Rh zdq5sXH04rf-wEp9{As(D82REa;O%5M^?GYuWO`RE8ZnD=ttoeLERVVb{=i`a>YQQF z$wASVx!xB@GZN-(=0+!3=`#>Q_vMaZnV45+Saw&Mn%UJ|V}7+{F|GD7kcp0Z=X`)Q8(V`$G^S%Q|#(%tlYiiIv=fKpX<`7*vwL=P07CO8whDDYA(HgSXTT*6vCT>BI>b9(2=&qiS^&cu zM`_y?@J7+7$+mdDQv5aoA$skb?62CITOC{6tTS%(1isF!!HjYC<5c_^1|AVA%Z`a0 z1toF91F$%p&@#HJ0;9FS*hkj(m#eVtZA$^Skj_?}vRC9j?ua}KQUh8kKqb0u zDhvNIZg2JWB{7j-kIem$^m%{oY#|;}9w3f(ol6=p0s0wTnWe!3(aLP(yj^EGn{4p- z#fsw{7C)!j`m=$GOT|Oa`YSu5AhAw5d&UapW~-*trr>ofYKxyTF)X%%7P`dOe9348 zeW&D^wK8C>n)9A2rk&EEr!?Bw+BQa}NA|WRoXNI;9v(qlT`CV!k-5os0KUF?s_woACm3@m4GTXbML0VQI2&G|aPeY=4eCGYu#Lqnw%|5d>*Zf4Jo`A{W zyLdC}{=`oV#J(O*6@geII_3(zr)ub$%m_Px4XlIxxui&znmpA(Lsp`Rx7*kYos#>B zIl1pVdEyRA2X!Fxb3sDW%S7yjc`Oq<8l`mKCu=0&J8%cgl4g>YP#Hl(<~zb`C~mXK zscBE7T0tqZEw%ie!9T}Ah*;0#(tVC=C|wK4=22cifYfPmYif}SO7GM2hj%T2o}{$V z&A+jF0u86ULhtNZA_!a&*{-*yMSCKtSTw zQE25;Xchr9_0~Vf_CaqJ5gpYb1ehm0`co>SO+2)6?}WwFWBD=7O3TOe&~pl%a=H~p za%2XU?G0-X&a;{`9%17RO)_7})A?K%%orDgUw+~*EN>qC^S2QRnbZ`I`cZVirf-Wl znYDdMr%q4h8_?1FunPLJC!JkQb38Pdiz5)naqjP+sC}7gTVq^gbD%cMhn-M=UW(cD z*2m$1qXZq4Lf)w%_FDR~LO4Ot=O0%~`&Jb8k;&pPtfofGpZetDT8cYnRlHfMdTclO zAyr@$PbUdsfuuXk!5s|e2-YU2?-qG<2euP-ciHaJgwx5PZZXlJWb^7iR0>Mc*60pE zdPa%&sn$(#!`q{V&*8zC6Wp&%*qK9sG&kArAO(^?@-V1HXJvGGLG_f@$B{%*H)7~X z$Cco+_2Jc#&lu=)3_)Wv1Qw;-nJq{z?=B!so5ncIqj+3&b(`#ST8SPR&c2T@1WCU1 zsPOo}z11BGCmx4^aRkJg(p1)9Ka-sq$#?d>p@2U!(jtD(BI>Qo${L91sp{p(j8>}O zoBXB&Px2A;uy1Z4@Q>S`Q;kV5TRN$yZCkV&2~Q?CvtdY(9iyvYwB_){KKaM{lnYOQpjgNk?(0WMQ4;Tjn}`t*oRPumtif1O%>A)wEX`;8_ug0>E+CHQ z>du6A#06KLY$Ya0Yl?NcA$xe0**w#7Rr;bUhb4cV8h?6JUK5oah!E=k>yn|EAw~Z& zB&!nr`0bDz?HNQxrt@{6Q?le19Sl@u2f_8gVBG`b*^%&lX=Y_xoqf#gbFyq8;BHk) zgK_?SD)|bsqN+HWl3P^ZP%O>_z1d4?wQbfFFXz3kg~y}ynr@|E+h84L=76Slon{oh z#Y>PlOj7hICJJ4~n!^l2l0qvZ52Qe)h|S;th8tc4~rvx zGZ$H{&a?ck7VHb~05_zy}p`I)f~e z1{;5sm--SsK$+a)ggc0IouDeuN-yp&o}O{)L7FHfj5$nHm8$Q^3J)mto*5jZ^WW!H z!j_fU#nGCRBHLdYQjQ8V4I8#AaE*Y#{Bab#HWfPcTDF(YE^P4~yahHuRpcA~aWJqO zUN7NbzOzr_&#LUPEvCkl4F^Y<6zE2Ti9>(R5&8pyruTIkfZEJ?y$#Q^6HD)qhKG$e z=^f~w#*j$lSMHX?m1r9c5lPNz;iMFLuu2YqC*s3AnjTnpq_T$4fq7Vfs(Mqg7abH%)6E&MJPj->D z{v%;#xlvo$IkW|t1T0v~1_QVf!m4mYK z`ld^1w6wvCfs<%A+x%*7k~!VV98zUbA6Y59*#81_pfli7{<1)~Xgort_M!Q38*P0r3e7D)IQe>0XvzksI!G~p7Cz7J}&4Nc(#Mwkk@5MMX{Zu882&V zRYg`jx&48moBUNVTao~qYnt{&jXu|+d{qQhXW!`l)c|@0kakdrj{6_vc*KY1RpqAi zt+Jn~Z4h=%-g#iq*DNA)D=(_KC!>Y86%0__n@+ojzmTG4d1N^)9Blflp7Aeps#gTxcDfUtPObX{gc$ zK-Qxegj3R>8;z^i2F8D)>M;eI4U()?asxqOXtlq3FS4)VN($UbWicOLwxW6YvC(&)zD@y`{HE4tNN|del%0Iw1a$l6tf7{p-ZdZ zds2HGpv5IFbF-ShEm!5pz+}a1Yd{#)$s}ken?2*%F@b962;%*z-N_2n?xl`=_vNuG ze?0Ly+w_NC`Qq+>_59Zpzx!d}t7Fq2n)}SN9PwbgiX#~wFI#=uv12PGxr<))EJ>CF zXw*Wz-0!z@@K{X=)b>=E)09*ulw%6D3ut3lN$k$0mWxFdP|MyzSCrLmEbViz85jK& z%ks|%v);f^rZ&tGh0MobX|dxhoNa<7Ojn@u$-6n`ALsGY9(rIU(du~f0Oz}coUX9; zV<-put0M9`Y?gyEO`P)ktR-<<=cWMHR1C}QmLP_BO%0htjH9{tZDaCZ51VWL@rG;? zGOf*w$vv=&kX#TEqC>N{l5>N*B z*_vgbDMkF^b6Q*2e~=0mitqK7NXb$Pbdhq`75Q%zaE+${1u$j8a!q@sUrQF zJQNsJouIyllW^N;oOwZI4i9W2^19QQ9nH&grMd>L7N}HX9XYLon$xJH(zFq4$mci` z5J7j+F=MB#xlQI4m5BE-jN*Eo@ta?uJ9dwY^2`JLmaL*F8b_RNRCK-lITl;bXWvgG z^V=$NbweAx4>^ruI(T*R6Nl1WVV2bbr|bJ_Ln%L9!el>U1oIp>5~z+srVblI7A^_m zjsAkm3J1#qpX!%%Z&0De+D^YmqmngWU8DI|^Br0p_;@tyyv_mmPGc;Fq%>%F!oMJp zI*qvl6@%&?ogg0ire@F`8hjTG#8Ky1;E2!$oX?dEs9G+oxYsdw$kt5zM~HG@nhxtv zCF8-9nzgqcZ=XZSF&u*<|HX)N+5g!3HwN2bYd$bsN#$^0zH5kP7|V|kK_Q%~Wi&NH zFUI$pm$~229Z*9{*fUBkKWJ-K#^d73fcwIn8cg}Aju_MiS=LET5Z+S}SzixZHgT|& zB+zV=N*G+-u{JQ(QQBKz>{4&%-xzQ++R@RAxdX9h2`@27Lds)VGc#%$ejHOpty1N~%6f|27plg{viQ0aa zeNT?LKi>S6?#1Zxt&g-6sEE!LtI2;krqkG3^|9rR$L0W6$vCfB65 zls@|vfi{(OyuCwCv|8v8W?}xHxcwvHVL@?Mwt2rFr(a3d-LoC zHczlP1qM?R?xF6h9bPNsnY7Fy-iE>GR-Y-JIoH*w}Pw^~Or8 zL?CYVpkP%t+G%b^up%gwF!2E{xKiyo+PhA8Var2l1^XCzMD7?-0k2m;lRA{QvHF%m zpeh+Fg?Bd;f(0_!=IFl9fR^R_{pLXIdiy3NCu$-9q;-yBfk+B}=hDoi=v*J^-&vNM z8ClVju!xI6E@0U3wR^S|ZK^GH+&Q-GY1P6HYez0J(Yh-+Eh<{^cAb1U9!?mO>!nqo z8qcKz10Hn7Uo{oZ-Bw7Khwk6GCx@|#k!MHL%moN?LX$Q(ANy zcYeD}nFf)(O)=>WfPPbz#Fg<^lP2+&QYasJJ}%ru<>7vP)X+ zWL?~cmrbJE0fJOV3wF>%TBw(1mO_e(={EbjV?q$V&k$TDT*N_)cDDCK2ocD1#ghfZ zfMQ_e%1h{l>q$gX7;YHxh57pnfiSbgj>J0MZR z%@ng0<)eIKqP0r%k4nN=6xWuVje4U(AHApxR61m%MWtKISk^W)u@%O(YH9>Sj#Nw% z#hB=D$3zbeZg)#!I>3o-2k(zXBk(R&;?_T4Bnz`5pcax@dl7_ zD_%!AaG|O!>PGUCbtG(4k2QVhDq){_F;IwX*w>=Vh16L@??IJf{3iy+0}EfM)(RV; zTd-Tjrl`|yBZ3KiK~o=obIkwnncT10$t_4J%L5;GVL6CQ z$Qbv(%VI`)0aR!et^i2T0Bu%S%T^|!MnaWeE6Nu1U@3$i466*@4$GvkR3UN$`10=N^9&+$i(_w?JR2s{?Y_y82pDgqxD1ju#Vfw>cQ zC6<0m+pl*BG!O%8SB4rITi@P^Qo7Q7E+i)(nuid!H8qv(J_S!>W;-o(qFlpxt;Tm9 zI`PJU3ljO=Z?tQoidnQO^EhcMdyECuRHV5zTv;#yvk(zos=&~}_dS|%CZH5G&nc>7Fng#|slCV36L;3S+g!8clA^~kpz=@+&>sdUyt)^_ zYTV-&n}}yA`Y)Sx-^T3}(N&EW6I9=|i{nxK1!_y+eqvosw3|7!hXx;Y_Kod7QI>{+ zcs7W|4tX8fW|M6u|ItqNYZ`i1X3>rVIhw%zztIJQe539C1&HJ?##_j>kU)PDn*%aI zrQ!FSnF}lKuh|O0v>H%kK(s2N(M!)^T+6@LOV!KK@|@t{bSK=h&k)4qqR z53Kq9q|zTqqVKV0)wv`?ZIb43F%T~Fz<|UwP@DRlSh>nR_dp-gdQBhAqe6JxeHbuO zhg2L=w~D7^`dfS4)}Us^8PM_YL9TvFu25|U6d3CXY;-?AnL zQ)>9@;eOgbo)nh`7OwB3uMHQ@ql5Vv-~FI-w5#lAR;y64au|zapn1Ezy&h@#5 zmMHCjvi?G(MD=$c5XFT??!%(}Sn!9YVs1JvJ35HBw)v|_A5QRTFyEyc5S8lvxPYQc z6?+UPRq>#Vq*}Ie3uvZ&WwhJ2Y=QmraokGtKZ3Kl*`!N!9kVkS9gQSvB7%X<9cvnV zvj)odn=*c_G8FgkYeoihaMY%YgXK51Qo$hTn7 zn-Vond~i1mBYQ&0xaRW|D@1t`NKvVpuBSVb3W&HO zqZYu@;;j4GdMew@Kfa5(ZR>2JKy+KYVkZV+m3@YT^I1=ISyzGbeL60bkXm7nORRNA z|A?fh!W8~?*}~~jY0c+AQc|?|civ047H%`l2*>u1&&oTA%HjO0;QZQ%I>W9OS%ypT zUT)(Hkh0Q}PbK!5=y;?2M4-qDXW#5-0%4W>6anT_e5f%UTUL-HJUC)8-#dbFSVlTF zdn+Qx8zz?ikBVKl2FBlZNN=tv`@J{4snFD=pQ^~Wf};myvtWCq*6Upu9@b^Nx&PEN zIO%Nr$iz_fI{UVx7357;&Jf0hEOi#Pr4D6$s(mEc_^4!vG^mbG)3mrZL|J}ic_GUDX-W_7crsrT5Xz=WvdJuni~`+)l}+lJmu;(|(-QB_iYm`#yL zavZKVS`0dMc8RN_Z^EoOS$TO zYu!z$KHl8-;Fy__4@Lf{;I~m0q%cIT+0j=#P%y;*rg0toVUosZjZld{fR{I0dyABw z@^Ea6|9#&-ju=F!w4ix-$=>pAhz7-_kEyhB?+qJobo=D3@7i%H_N4KGeI-}DQ}0%# ziCTY8JcV2BUx=aK^N2~ufiimOBI(SLfC_q87sh^%2eB_e!TmV>N#VpXxeQxSJ+<;CBt>e0f|-WPNmlwa^mAG?TbKnc_X|nus^SzVZ{52H7z` z;-CFm_WG`7T37yffiWp$HyAs$Q^Yl3)uf7=D+kArEZ)CJ^&5g4U9{@1YEF_f-I9G$ z^F<%ekz?*ikVwXZrL}?;DMe zcC|jr)yZdVJLE(N*1o98Xfci&*ZjOd8Jfcmx}Ka}vj2!TZYJ{sUwd_7z_Fiu_1lhJ zN}JtR*!OF$sSK5+2A0N@tl3>6D*CvLx5d~M8vK#1fj4?iXF)DmOeWV#p2-U|4?MlC zm>(#FeABz{sez2s$7ggMstJs({$Q)7Q5D~JD&A;PIw})ruuWwPhfz=0FONK|qF$E$ zIqo~h@2{@n)}3E#t(e3I`|Zu zNKAcska$OZ&t1g_8wjq@W|@CF_2KQ(`@sN=y0{3hO(DlNTO+5 zb@}_#Ti}m<);?l+*Zq7g2c-9O%YK#^Y3_SgZG*qA4L&vCcy8cnSmT#WrmVKcsQTsj zso4*Xxs6AYm*(>t4%kp*>D#u8*q;+H40XlSw1E=9dTa_{YPf z-y0chLlT2M<7bC#L>&`C**nTSnQ*<8b&t+#DKx7Y@$_-D@arx1!f?O`Ijr!iDwB4pIc3#^o*!>ZnQ03*lYj+RW-)h zB>ZCWvFG{{jZZ?oh@c_9k1iax*zxXBTIM3=3V=lZ!}e@+(9bmTK3g#|FUCS>N2B`K z|MZl+tw7n)8B{8=$j16-M`lE*%-n6nEZ_XA5g$jO19+GDBJLN-bBu=W?={nK?*sv;LruK{Thq zwmp>$$8n_@vuDY_-v7(-S=D0`=Q-xJdWy(wGz4bQX-zT14URSsI5_(GHz!q^76VxD zbnc?s;;d{QnB6svjfd=*lI(_F?CGzUIVG>5toH9rYm2k$3;AQ0CiYq0j+7TaG@jn1 z!*KXAo3MX9wFQ3M<8M!Gp0|O9Ko?}nUY;H)qWw=3uFAtWzKU} zjcqyhz{DcYjHt2h(XRSOyK?s(?9V{IUhrcfy>i(*1{=$k1h(hx!0vAc2Y+Z;8o}jG zRJo1D`vZWphr$;LI(>r(5p(oSCKW3Qyo{J>XK3+rXoi2DT<+?Te4U zV!mwh^=paTQO~_!tpuT`G?nzg7QvzNx*5bgu<+Ay?8-}=BpB~0$Jr|K=?PTF_jzF2 z58Kyz%=ZUom&QeYgt;;Ci^)iXZ&`_&-UHTryDhyvxo*F?&wcnM)=cfd$}B&PN_2DU zy9aRzj;#kF1e!b#ujfWVD3X%bIsN<|*nWjOgWT*t3u#TQKGA5g-IrlOn^j zOY6@l0%3faW?|oA1Q27U1y|*h2!Kv}hX$ef-ays<@q43$X#dDU&B^{)Lb^I|mU9=< z+HnV{*Sa&!2~-ZYs=wr>gW#%=1|%Fcaz<9TJ*RRZIl1y|kE!pj3s?{}B~lusgt zw`+{=%(tIen@QZIo0LiIcq%wJ*}e%`wqVa)T5qLgg^~ZL0dIcJrv?uap+7nXw(W9o z&<`^z?&MeHIUgFoj)i6IHV?43z%^w9j^#`gGHa1Cv9q7|PiMi_@jk4>LjZ_kz#p2OA2c}yh)QC4) zJm~*ubg^?}axcoysXqM4jcz=CIzl`}iyEHmd(&^*zVdQziFY@EickCu8p)fPXMNO% z+dRzp%OkzVdr;Z6P}wpL-sLQ8e8v3U+MYRUbN3B?Y;toa5wHL0N9|`g```eff-(b* zRyg3e0`}jZ6{KJ(u5f>}d`1hu0)sU>+b;Gk=xJb+tbSie35> zE}hXXI#hYezNq0rOT69UAqMB}OUZsrzsU)_D(1@(TO9^d{KFWb z@nifm?&QUo-%HD+pq=|{*Ive%b%Sgz-l+9ngH*8KykRsz0B7R+X!n3^egn`ryaIsX z0IZI#EmsuSWPimUd$;kGlBv$B4>CHc--Q~!LRz+B(wr&TY%3MFZVFr+XfU7W?U*pS zjYcam;&dQr=D^c`W2%%jI}8F+RSoUJPv1!o-1J>zm*&p?0uI9ET%Sc`!9ssuUoh^F z5W8VU>59jM7padqE1z^$ad@N+;_-I|ekgMM23D9Qqs zbyO}6`;68;@&Iy{L+@mng*9CO>hF*G zMC-d{%XXLK?km$bJixYo)1OmxW=Tioc{qGoZG7Jw&Ps?mx%x7ZX%AP6JbfX=n|CEg ze3(J`lD{(9O#7nFbj7~Mb0N~4P+e7MdQWfSfik` z`wFVI+inSU_k+MY4h9dxYf74CP_)9iDg5Tb6Rx+)o>c%7|F{RcthhKja32Ggoh+!Dn*S_8;Dg)!IX^e})-aJKidfnXbTDhl zVIg1Z_hTZ`h+8B_L-PX=K~1V8d!N#gG;;icsy4P!r3|4H8#rAL zjKuemwNNwt`@`n!SQUO3Cd-$#wt9yC-ImsRY_POv&VI6`HSNiz&%ZtR7Nt-~-TSQ` zzQ80pc02qDhSK04PZG?QL9G%CQ9fK&*}#Z7cpF4L8(}HiR>Q4wiYSgyW(ZIwV#Lli zTzpstIL5JfXbC|M1;*M*LT(3F(Q8eJw+PgT?fPTsrUUfkU32y(Q3^QFT}iUT_xJh4 zZ8&-*)_n@|YmG-lDP~&o`Dwb`2v*lPXS?D!)(MQv^MJ<)k zghQ?&v81BHNsjvi6@5wJ$`Tu?FF$I(Q(sW^!4YM%zn*302U9w^DYRWlU$AUhjJc;^ zT(UnETGqy54{3$}G?IRDtXwB`Ne(e0^17DvA~vFazrZA$?~GRmX%0s*D&H-bu%c%U zo4d;A$lqFxnJTE1d%SD~gEob0^L&4smJ%h9DAGJLi`HnxfR`+VmAW8!| zS@H;}3{ZYOe9R&}$A}L5VfVeD-3K=-Kg%(%`(ulbK8nqfIOjmc@jekgD{J26Pz7l# z@z-1cr~RnK_;s^!J~8rDC1&?0d5>?(Z_n%9js6*Piplt!jXzVb`rZSRy65`lr~bs1 z%}lc!&6L;L4%FJNdinmK9P@j8-G;vQ=AIDN^#{xTC4Hn906uG_1XmiDGZAWRjWvP% ze#e~JeDn9~O>Xj}e#vI`^KY{;a!^Ig^uT;shsAQl*H62!LTA-vFfg*o`9x>SyS=B= zGqKXilzLLjUkW@aRi|UiV%v>s!E>KZ2ZAi>Y85GBolhW94Cdv5oYTjbm;U*5!MOK% zW;Gsl4Q?{D57Xtl+bk+gK0ib8C4(WX%$#VRHjr_>X+HpS9cJh zgdIq&75V`dRWwJ1mkvEoh`_NiROKs!jZu>$9U27@}xPe(W_OT|#giTrA#alE!E-RaktD(^@dZSPoGhbR~i-B!zmvv=mX%yl5hq);@@dO7maZFRx*&@A3ygo_1g+fd4=h(jp{e;LFbJIlm zYnG<*4q*Ol`4-_xtoSb12Ipa@SMk&L2Igrzx+B~cE?qwQdcmeH-$t{#(NwZ`CRzDv z9A+@RlYOuNi6hQ5Ou(GdU>GHa0(-}QS(yq`Y8A$N+*DFG)A`A7dd0|QaTXQ}o2cZc zd6R#+lhtSChZ4|VKmN3gmf!kU7gA!5EPFR+Fb1C4yfgNZyk-qm8uC%v(R#>*IBtrZ zvBL3&XU5eOAopW@vtDM&CRP?!!rB_OTIBxuU&uL;mNmpgx4mPH#*0n`NAwJw^aOV; z=zHRv^CrOGMhXKaR{HHUKsBrq`M@zZStH2#<~N2mEO9+G_tqZ|3@loOy9H8?5GLD9 zB|SQlZ>u|d$UeFc>T2R7>Rfi8P|VgQ+Do3fV0l+W1Zlv&98xbyWi;wQ-iiNXZ6Kg|yK3=+r&bu(_k=b1gu3n7G)Bh{YY z;Rad1_`n;w$p}KaLlB)k9?#6K1&xv4l$5jV{*N4?7S(3mQXW}IUy zD>Pzhi~PX1!%<}Y6UHN2zv_rH^`q&gjh7oIxQ&r`1k?)=KlU!MwoE4g&Donh{bsU3 z{msv~#jKwL38?&(Uey~US;EK}K$q-E<=Bg}&pEd;Mh71GlrG;Zsn=LYgUZgz4_QQU z2YcucvSEUx31x^K1F49q`j0+|W#yFVslkKli@9295+{lyw{6oFZ<2aXM( zM(oHWR3wR)ulGK_F@mrUy^2HLG6&Yn*2!-eAOI5X>WV1M-%`$vyiY3B=k;uPL~v<$ zMko&RRFsF9SDyUaV7cchl)zxg#Z!H8?WglD&p(tYI}m?DM`sVD29Yq>fSubeXkd0W z{nho<#IDd%_C3hCJnC8p;!dl9Frcr&5WUZtzBFEA33pq{`gPkb84tX z-?v)HY})Plb(dd$!vyupIl75PlrI|$pK!JQfZg*&l=Xk>UQF2Hnq(TePK9insbVlZ zudQ1(NcZC$_kUXEm9ENV-+G@}?WWPIjmyyrz|HTZYF@T7ad1)TR`Gcfl-<(A{REc| zL%GW>KQ)Y&(eowJc2llCG}wFFs4)2+UHbxDC%DfUWnG_8Ku3v9$7n@PheO`N<;-B~ zZ?ly;N_}2w+3oqEWnM%}?2iUw-A6c#QSwJ!kDcn88yHmj-<|)9MSsJ8A_dpX^gmNL zjQt}~{bXf#EhP#Jr32=Y_KW4w_1!e*J5lQzzFNkxq^=Ix9#J?{nny{ zAV2I6o0=of*rV1U3^80yH>+`5Tzgw~_vV~a(*{;kUn)<3`LI(?s8Z9bBO6kTt*Ydp z?rwWFOT*2v$-tgSvzz~Gw7{}YaU|#gVc;$ zZ~D-j<9;sEe^X0a@#`%2Zx&Kx$V3lyX%o$AkbboCV)?!_ANNO=c{E&X1)P(&ap9ogJz_k?%( z)g9|*gJJbZq<(uSwCacaTvfgj2GIkW+o*dwUq8VTUOFYznzJ&FG7$+-vd%F<#Xru_ zUQ+z##qz1CK5iFOLJ+AZ$1ux7jAkCp(XUEZmN)IDT1o>iE_9z*$7Im?3cH6>?62wy zg+mL3r7{aMhABrRyhitIa=~FPgg;~VB_aVWLk{V#-K69AcWFeJHRm8#NV6vC zOB2lnbbd1(?C2I%VYjmEjIjoHeanp`qGuU09JI6 z^0%?_b8%37=ox!){mCUak_+93PX^CPVJgu+ z=sFg0B2jKLNqt;+lnMTc*!#`@9_JzDE>_F0Z2E@j(!=2_+tkwD#%Z z?4HKKtb-lSEkIlJa%SxQY$Hf^;=rK!bX=P=plOzyo zB60{ytMmMP*0CwHGm{4C{_Z(le$g)`Nb_E)CxP%vgXf_zYu&~2M#pA#SOlalwa{Iv z-)bu~jT~&?Wu;Q6Ru8Xt{RB}d?$ULnEqA63M7p0c1*PKeE?6a1(=Vx4 zo(*+xi=i1wQ=4a1r1iLVh|+4y&IHmRb?r%M-Jd-#7`jik|n&oxlLFh&|6w!il?v1!PKyp0$SP^CrS1vlP_&84-+ip@EUX08-^0^oo^%CFALoT= zb4PL^5JqFUP_JAeV7N}H>IUTYfLM}xCq*JzH#GuUoU0iPwuz0N2~?dx}5~GoVmVKg3!) zvkaf1)p?Bt;O`xC+GvOEd|6tHpOWV2Ey32hnYP8!9QfyYoc{(dw;+R;SsEF*0QDDY zr!Fd`AZS~^ngdNCKrA#GBfy7k0GTaIU~&UG*-MYF$;_I?EKP$V!0Jya?67hjTsnaQ za!`1FX=y2iWtkpL=dxh_($oUp0rSy_`dU|H7doI-(zL4-5TkCVIX{hg(BeEvO!HE zCVTPAWY=KNA&2vS33px}=MDm0OuG*X;59nW9CJpuwbN>N-UN7DykECh2nem@1LDl#BHt0&+(u_`Se7KEeZ6?nt}IQd zDt=sfaV>jnb%;%JL8=ex$QHY}5v}xIhxTifOIPco;wC%mgRRtI6tQci6Zgh6wWKKK zbiN!&_*y7Z_;D;N2Nr@4bpShaGHLG<`yQNo+TCG5T{j?T;PW0Bi|(f=nY5GWj5@m2 z_5mgRU@?!^bi zdz+9-Y$%5muqqg#$Z5D%TfGA4^%ZByKJcr$O#(iRB9#XHA?zdMksd+q$`xYcR18_^ zp?0P%?wdAkanQ!5y$PRcX4yU(;f}v|xwzZmxt0^(TkCj?w&JMU8u6`dG>?ZbD|3|(P&o}JCV)X` zU-0{BRi1EhG^&vKy@LIlMG#*{@D>9j*trWL&NHM{8(w825_pJIq#f4Z4gp5rPsdwI z%Jix=c!qXU^hHWq*Ltp*Mpniq6gYz2PZ#CDbFp^dNh>YhHBU2Aq%8c_#v{%*1rL+P zIk-(Ck{$u+?f5bGCJxmEC6i)qA%nU6@yUH5JhPo%1cHU2? zLNFT*a%ilkz+ITNr`C1eAyEeowbue_l>3q=*sqA9?$MpdeuaPR zGq`Jn_J(R;uT3)yj3ednrQbShV#0h7OGUf(h`6|Nc>Zi0`Z?B$}>d)YOnDuyN1L+Q_EPMaR^h23x>yWNP5wb7?!cSu(?(7(a8tF>3d~t)W z>Eb{f#yE>wXf|J(&f4h_&hUr{<1(A%D$f_c)^BZ!ADJD=kV5Dz&LS;(T0UEkZDRf+ z>w=efPM4*-s5D#}bE`ytmdh_#n?xlUkU~TVcdC)0eu7-Ev{FU(}>O9}7 zEzw3K3t4=;E|k~7W6u|ZV~xR{V};dHeQD`dIUM{rm>mo_QF2NXM&>=+GF`I@SrBA~ zridE0Q`S~^BE3)VTmNn}5XjcNQjL6`h1y66G}|ocut|e&`96i;3gC*D(i?IkQ@wJf z8^R`~kaQ!v!7|0Y_z^*Ygy)4C$U`t}I>Z6U#krS$3sjRG2YFh2ikB!hq1pB;w z&pDO&^qB0haG0ZK&^ABfZ~!AH7;FT$97W;eB*jBg?@Wm+0=&hpxvu3^TtvuA|4+x$}RaaCOzXdUHH2y%Q zO|nME2te?iJO9T^m!Rz6VVWds)S|6G27QnFWtoCdObt@J|UMeI0 zOtLoQ8EBCu7!;#9zu@)vv}Fj*B*my%@B_%X#_4rRqrmAm*UlNFkYD z>g+V1e40ZSmPmvF&-{!wwzcPEFeXqU1W`4pEL;_Xz8Y!6{_j3{p$oTC2jN286MA#q#W6<}j>H5+6`YFwK zsH=?QvtnRZ0dOsDPeVmODmYicE>rrr7EmTYsbQDnG;8QlxdYyCNxRd^9 zPdu)GMz|DVy#sF(UoB0FON+WdTyjPMj@^MwXov08VEC$a2>=nsS3}%%d?Z4-IBgyj zM5kn;mj|zi_5G&y-A99Si~v}Eh*RW`<&u_(M{yy=fP3BlAZZjPE>!|1m|B!*n$e$Y zMi&NFb0j@>L=v!`pG5lmAjX;+m;s*Kl7?%o{68ZN<7WZT@kwKJIKV+h35V~4#0^ZJ zjIO}Z5EonLi}MJMSW!>0b@(2F`dTD-g+l{4-Q3-?jb$;heAkn!@O0)!+G4J*>0F0V z?8vN&+CUI#xThT~tkt$6yw~>rbMwuZJhzRDe zccZ;$hP2)afor&Ll~Udw2b0bIx_bo599$Gk&JUjyXJn3V^r->tKmPbq36+|7OqTVX z?2?Y{6tPpn9K}w=AfW_CND4dSZBZTI-gGp6-9H^Ip!KpUU|wB1ds>OF!_LM9NLy$wBlIx`dSO5yq{ zcAF%^OGiU;upsQnkXDw^?S6q43faWeODy%zW@HdfQ-PNU8W@mypQog^)E6;{xMy@C9eUBCYc}wnw-#yfS@oxR*{|IJLe7RM>kiz*pLfsZI z8V6`Q7j)Pu9o%ERIe)tC>Zc?ZkcyNG_R6^Na5pPmT{IKDZzJ6Gd)=h*ZpDk|gl>F&@~G21n{tDOGSVaDcR7u6~q=1yuV zTj)C%m8$IW&PN1jmS|8PZzh^UisYKQOjSmHUYc-AMH+Fh?wjFqp|!m?lb)zBI!#a6 z3VT$hl7ZUH4Yf+Y^(oYTh*xGv{JM_6%(kwXOv6Dc46HDA>!;hFGU(^i33Fa8FqQNn_OMYwbvTDBTAC4M%s8%zg4p*D_d{) z-67TiNa<00k7nAHd{e7wWi!N_N&yIwO%X{5K-Dx!Y9o%3!sNr;8m780xA?*S5Nr8A z;VKD>8cpazwRNsA1vLPQbFdUD&FDkB$}&UTslrZ4k&1V@*%CLB4#BiM!$ZiNH1v`} z8?t<^{R^XgAg7eY66fnis#?tNu`o#LWCojvZ03sN7J?4ZS3_1AX9^PE-3OsaRUhYf zH^ifev&4NUK81V%b>o!_q3(czFrsCm^Gx+2o|6$SH$V)DL7AgShe(pxM-i@KJ;z}w ze&ZFo2dJdKk-;m3a(J*_4z6QeBWsm~;vBw2B^1*^Ip37aU#^$Um96&%cowxz@*_F+ zWLxn*-Q1)1SO(hDhF#3YOB$_(A~F@x!a|E-do`jgpNJYJs}cIjOqw6dHi~diTh5#` z*d2qZq~mUbGszyc9ZU2Y-QfUg>m@}U|GiI_#d%mDM>O06>#LNfnRwoAvp>m>>RP+G0G{Vhdg3k80uE_ zt(i<>?!`}_f6yU-ZJXT5?g_NdRnjzxUyJ{bL zrASjRq+ZA%;DV?CUF?ysV@0#7X}%jLI|dcB!bNPzh&do;C7au`IYXju)US$dQ)>ht znM455fO`((2z8^fppV>Er^b+17T{zh>I}0adHpS>AdUmz@?SV#y!SSi8~ zNJCHM1ZzX8aCANCY(+Tp>FqkbC?@>skFCGgo#pO(DpkN%c60-2ZFG@^X|G=LcA9cxV4D1*muM>z43LXQ+_u@3Rh}{yA7!yViqYds-QYsBK{&$3UX)1Mw0M7l0T@>{IgZ1x= zvnJa4jEm#559>$i{TtA<*EO+umg*}oXr2toTiQJPF;GyX3cd7}1QOZLNA|E}l#2!* zKvEk{Q04*IMLK&=;oaC^u3{obByw`jHHo!y+C;fn*QBWm@P1vp$2OFml+m6L8suu{_fK# zf^dM+Mb%LfxS!%LLvpYu*EI4IVkerZ9yc4CGe(|@R5*LGOxi177zj&D0Yvr)iLw16 zi87I5>bFWG-EqwjSWctSFB*WUGecK^vQ{IVLpK#ehi2P!`@{qb#C-#^4WmtHtY>4_ zLP};l-{m)v-jyj6%aYQUqi`s7Mw{wq0@ag&X7iXR3SZ6}+zZjxanzX|D}r!vyQ?aqZvMtleV2PcrN?4n}=qF?1nrYa~q z|BoGZ5=@B2bHnx4F)S4Nr-C2fP5!Q9c=DW11tm zAb*lbqQe;{<&O>s3bXjAC(tg9=(k0h?6uC1(_>b)1_LK6Oe1TGQc7mGI&X^aaT@-5 zHBZd*VmtbXpFIYG!s$*9-27f+c0ruDNhzvPzn#_?BI2vW<(oLpX``40a{mHa4qc9s z4cu!F3y@y8Nv1CR0pS*nE`IaAp`fI%xFQ&Jd~3>y$WpM@bHzma+(*xQxR#-`_2wjS zD2VsS3g`fMCQ0d-!R+y(hy@Cii4~)oC5o=W%6_4F1E(3{rFj<; z{iMVe5K)ZJm^C}7f4@aSWX*q@>w;;ea9GSt3iv@@pFG(wG!v>0|2*l?>$1LSZ{#5)(y zN!TI7fdL2(7!N?oCSANZ+kGThafs%UwUR*(JO2z@Ic`D>B7ntLZJJIRmAj3VzRx1c zkxXwrJ+YW?Ov&$LzDZOfgKtVrGgp}f@Zu>d9#Lv2e`hj zQd37{e$-F@`QWs{Hy97=<6;X*v%weFZ4x1w&v=ihFb&5(;* z=J$ywx>tPYcPwSg9kHk7bS7HVr@JuF2jQ?dv6ix7yXX6uc2xgKZPMqFUeTa?X6}Hq z&aCxnvjo@Euw`IrNG4PBJ^$9PQ}9ZBX9K8mv-vd znK6HjRG-08ar1Ceysmq&yV34>yvwiAvCwsAaM5-ePT`nNc6Uh^r?tqa4-*w|v!+aD zG7ikNyND=+t$NJ?_tM(yrLi<&FCIR$Mi=)EF6dje?$feUf|DIKjSM)%jGlq+wwg{$ z_hh3SV14j?-%^|`7u2oSop)rE?0TIDlRQH{ZuwDRoWxTbMPpi!^sIpei7C;3b@fie zKp*u&y>{^1C=)hHDFK2?vvFXud-Fi0Cyg^^iOBwbi8L!&=Dy~MTskLQBj9{yFwFl} zntKOlV7FLfKDBN0I-T`|U0!=}&*f{S?#01lB9z5lWgjhL0LLeV{984LNm06hjd5-c=(<%e8{`Ia;LGZ6d|0 z=>qVZzf^3A%zwN188jS~G))K&-)*|Q>N%Nb#$Fiyn`52E$EV>e7J;Jyypy4>JTQ=?%@bOUloh99+fLDR~F2xC$A*RzGOo@VT%c|7FkA%ybtZnYyQl&I!(9>o)+|1u zWSEX>*)<$E{G7kb3tpBa05*R8{YWg$BJO1({FZ74fLw~~tdL$3k~Qq{eB_N4X30M( zQZW$L$MOWa%o5>>qbR{JvX@F?1iu-G&c%db_fV>nV!6KqLq)G!)lVR>pf;O*$6HyMFn)>|691Xs z+(VGBM>;P_O%foKvEMA?B!C%}UM;}yAsQxZkdWbDtm}2j(TS9>!WG2m9lY55ldju=cZH(H3>>i1~ro|hpZ=0c84VLb&$yNUQkGt?JF|WXhLAIx9FP&y!)OO2?C5IA zXv)A~b4{JkP=s2L7m1N`2o7!}WztP-GmQt*#CcdGOe3!Y$MaJoBJwEdCYP=l6 zAW4Q84yE_F6!-?-q-!ot7T5kbnBB+e2HE%&{UuyDi@DB*ULb{V88Hbp4&=gkXmsDY zwlaR@$t9&n+a(BY5Cu;JjWFTKIo6zyWa~y;TO1kzt=N*;ZO(j=96sE`N5nS3H~xx) zj1s^)lNeQUIz*%E6lyU^%5D?-Sm}9_7BmumJez?s(Rxt0!4Y?E(W%#dtsngvin0ea zaQ~pIoUEH*+#Zf-nO(2b~O(GigV0@PL=7lsMwOP8VOoUcC{l5xD6ktpr2T# zUv~Kg3{E?f^Vq!6vXj9?Mom{-Z`uR3jOz!3pCRQzht1Y@&zBU}tM+_3o9x=BWjhD^ zNIaZr5bKkG3qUF8_)l#eD8rKfdeSwnou5{uu8tF<1kUcSvmpva=vJ=xCaR_BEa)KmMwG%{0P z>cDe@;|U5cxUi0CocS_Aecas<=Dv07wC?U7b!VPXK0eZ|hKp2szKk36QLjBzR3512 zXZ+*EPyhVzRyEo8d;fm^tErC}79F)Xx>w(swCMS6BemR{?L)8W&ZGypeQ#}8xo($# zQhI=K^yuh}r1W>4E5M9tjp+py+==E0v-Bk`UZ3lFp~%lR*h7A46rj7+7I?RLak+cL zW5MlLzppOn4?L5ho;Hn8fQoQ%SJ~~1kIU_*k&H>V+M+6Tg+ZnYW2wXWyxl`pr6ybP z!dq=wt*dM{-4CmyxCC+AlIH$v!Cg}Y#e6gM{)Wz3`qI>b5=SXVq3T>0OcfKW0}J1H z;aG3kR|eyw957Hm3+v2M?6RLX%T!_eGMw&pgpiCA^b4i)Roz_E!A!+vYE9={C1!Z2 zPB;9pzTZ>z!upk&4?GcWW)FJo32xgkvEO^bE9;$X&&_2c-*oSL8vU8`Z5{T&v*~ly zjcSOg0*jSV5NGo1j`aBzt|-b`dA!d*^Va;t`BRQMIX9fC?|(rS zmIo$!l5(BJ`TBkzQ^oDneaggvP1e=cdHR0xA7RG7$}=?-(+@Cu*EBp}S<$&a>UG^P zo@0pj`!;)Th;jRICovi1ND2rr_J8F4lFiUZIrrimdP7%Oal+t>vyjB`um?|hE4Rd{m*XX-xVWrPI z?0iAr|FBH8f3utcEezUFG0{Fm;aK}nwsOl<(GcoB;tX7xtSY7o>#vM+(hGiw5EX2e z0~b-%Le}U|BQSs8*DyOxzAiqdGqAB~51eY?P(?ouVT4N&f9F$f@+!k}X0v~?{`F8J z|EF-ek$7sIl@6w91uGC3b)LurcxyLRM4Bp=XB1T6@T4B31gSksYzL1-)%@Rs!w)^= zb3Oj?HR(bXT#2W?`S{uDEOBLsfTziMv19alE6epm9U?Z z-8)kYUegus)(!8nM>Xq)v-2JAV>laS-p6)?(#UmHHPi7pbi-}m{cNR<%gV3e zdhzUNS^;H*(+d{r`%f9TNZU{6U3oLL3DOGQ&a1obns*@tj|0Y>5Vbq3%Rzo9yVG1yLIp^DKP_2kNh>r$WK+PS>q*zsa$gZPae~%h z?4u4RheQk5X{>N_C)?qC-#LZ1x!ZKq_(AF)d}60Qv4tnv2Vyq@T$A+u5U4Zt;oiw$@2_hJg8R}IWa?YvthL0_AlSMeQ^P>~yEEz$SmRqh`DmV!=iBRQ(Cse(Sxjn2T$g0;f~1wEX2b?!w5 z=xD|F)MfoQ%20j%eVyc!9-+0Kle*hB-EfBqVUS<*_tOR1x7a4!!0~VS?YsZv+a&F` ze7j#ad{hlWXfYt$8%Ad!7c?A%C;dN>QgKlYfoD>2|1PKR@-IX!Va0=A^hb3ex&=To z_x`Ov7i$s-rUOUd1#}#KuAsvBXo`yBcc#jn`S+n^VFgU;kN)k;=F{aT9iKnM|Kej4 LA8-G~lfV7{ Date: Sat, 31 Aug 2013 23:35:03 +1000 Subject: [PATCH 22/55] changed int -> size_t when accessing std::vector --- .../objdetect/cascade_classifier/cascade_classifier.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst index 03080fec5..8a4f25cc2 100644 --- a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst +++ b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.rst @@ -90,7 +90,7 @@ This tutorial code's is shown lines below. You can also download it from `here < //-- Detect faces face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) ); - for( int i = 0; i < faces.size(); i++ ) + for( size_t i = 0; i < faces.size(); i++ ) { Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 ); ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 ); @@ -101,7 +101,7 @@ This tutorial code's is shown lines below. You can also download it from `here < //-- In each face, detect eyes eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) ); - for( int j = 0; j < eyes.size(); j++ ) + for( size_t j = 0; j < eyes.size(); j++ ) { Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 ); int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 ); From 3c2a8912ee3b239ac8b22de4233de82eb7f7007f Mon Sep 17 00:00:00 2001 From: peng xiao Date: Mon, 2 Sep 2013 10:06:01 +0800 Subject: [PATCH 23/55] Let clAmdBlas library initialize once during program lifetime. --- modules/ocl/src/gemm.cpp | 47 ++++++++++++++++++++++++++++-- modules/ocl/src/initialization.cpp | 3 ++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/modules/ocl/src/gemm.cpp b/modules/ocl/src/gemm.cpp index 44f23da69..6e04baca4 100644 --- a/modules/ocl/src/gemm.cpp +++ b/modules/ocl/src/gemm.cpp @@ -46,16 +46,59 @@ #include #include "precomp.hpp" +namespace cv { namespace ocl { + +// used for clAmdBlas library to avoid redundant setup/teardown +void clBlasSetup(); +void clBlasTeardown(); + +}} /* namespace cv { namespace ocl */ + + #if !defined HAVE_CLAMDBLAS void cv::ocl::gemm(const oclMat&, const oclMat&, double, const oclMat&, double, oclMat&, int) { CV_Error(CV_StsNotImplemented, "OpenCL BLAS is not implemented"); } + +void cv::ocl::clBlasSetup() +{ + CV_Error(CV_StsNotImplemented, "OpenCL BLAS is not implemented"); +} + +void cv::ocl::clBlasTeardown() +{ + CV_Error(CV_StsNotImplemented, "OpenCL BLAS is not implemented"); +} + #else #include "clAmdBlas.h" using namespace cv; +static bool clBlasInitialized = false; +static Mutex cs; + +void cv::ocl::clBlasSetup() +{ + AutoLock al(cs); + if(!clBlasInitialized) + { + openCLSafeCall(clAmdBlasSetup()); + clBlasInitialized = true; + } +} + +void cv::ocl::clBlasTeardown() +{ + AutoLock al(cs); + if(clBlasInitialized) + { + clAmdBlasTeardown(); + clBlasInitialized = false; + } +} + void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha, const oclMat &src3, double beta, oclMat &dst, int flags) { @@ -71,7 +114,8 @@ void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha, dst.create(src1.rows, src2.cols, src1.type()); dst.setTo(Scalar::all(0)); } - openCLSafeCall( clAmdBlasSetup() ); + + clBlasSetup(); const clAmdBlasTranspose transA = (cv::GEMM_1_T & flags) ? clAmdBlasTrans : clAmdBlasNoTrans; const clAmdBlasTranspose transB = (cv::GEMM_2_T & flags) ? clAmdBlasTrans : clAmdBlasNoTrans; @@ -156,6 +200,5 @@ void cv::ocl::gemm(const oclMat &src1, const oclMat &src2, double alpha, } break; } - clAmdBlasTeardown(); } #endif diff --git a/modules/ocl/src/initialization.cpp b/modules/ocl/src/initialization.cpp index b990e09fe..564b40357 100644 --- a/modules/ocl/src/initialization.cpp +++ b/modules/ocl/src/initialization.cpp @@ -68,6 +68,7 @@ namespace cv namespace ocl { extern void fft_teardown(); + extern void clBlasTeardown(); /* * The binary caching system to eliminate redundant program source compilation. * Strictly, this is not a cache because we do not implement evictions right now. @@ -1050,6 +1051,7 @@ namespace cv void Info::release() { fft_teardown(); + clBlasTeardown(); impl->release(); impl = new Impl; DeviceName.clear(); @@ -1058,6 +1060,7 @@ namespace cv Info::~Info() { fft_teardown(); + clBlasTeardown(); impl->release(); } From 7e638cb0b54b4c37bcfdbcc8a8191fa5f986fbfb Mon Sep 17 00:00:00 2001 From: StevenPuttemans Date: Fri, 30 Aug 2013 14:21:11 +0200 Subject: [PATCH 24/55] Bugfix 3115: Added not to documentation for python version for facerecognizer interface + white + whitespaces remove --- modules/contrib/doc/facerec/facerec_api.rst | 2 ++ samples/python2/facerec_demo.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/modules/contrib/doc/facerec/facerec_api.rst b/modules/contrib/doc/facerec/facerec_api.rst index 9e8170d48..3100cfd8f 100644 --- a/modules/contrib/doc/facerec/facerec_api.rst +++ b/modules/contrib/doc/facerec/facerec_api.rst @@ -70,6 +70,8 @@ Moreover every :ocv:class:`FaceRecognizer` supports the: * **Loading/Saving** the model state from/to a given XML or YAML. +.. note:: When using the FaceRecognizer interface in combination with Python, please stick to Python 2. Some underlying scripts like create_csv will not work in other versions, like Python 3. + Setting the Thresholds +++++++++++++++++++++++ diff --git a/samples/python2/facerec_demo.py b/samples/python2/facerec_demo.py index 1b0adcc21..9eeb04e0b 100755 --- a/samples/python2/facerec_demo.py +++ b/samples/python2/facerec_demo.py @@ -31,6 +31,11 @@ # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +# ------------------------------------------------------------------------------------------------ +# Note: +# When using the FaceRecognizer interface in combination with Python, please stick to Python 2. +# Some underlying scripts like create_csv will not work in other versions, like Python 3. +# ------------------------------------------------------------------------------------------------ import os import sys From e6ec3dd17f9fe5165de49106c935fc1117f90615 Mon Sep 17 00:00:00 2001 From: kdrobnyh Date: Sun, 18 Aug 2013 02:13:44 +0400 Subject: [PATCH 25/55] Add IPP support in resize, warpAffine, warpPerspective functions --- modules/imgproc/src/imgwarp.cpp | 311 ++++++++++++++++++++++++++++++++ 1 file changed, 311 insertions(+) diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index e6c189421..4c9063da5 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -50,9 +50,73 @@ #include #include +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +static IppStatus sts = ippInit(); +#endif + namespace cv { +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + typedef IppStatus (CV_STDCALL* ippiSetFunc)(const void*, void *, int, IppiSize); + typedef IppStatus (CV_STDCALL* ippiWarpPerspectiveBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [3][3], int); + typedef IppStatus (CV_STDCALL* ippiWarpAffineBackFunc)(const void*, IppiSize, int, IppiRect, void *, int, IppiRect, double [2][3], int); + typedef IppStatus (CV_STDCALL* ippiResizeSqrPixelFunc)(const void*, IppiSize, int, IppiRect, void*, int, IppiRect, double, double, double, double, int, Ipp8u *); + + template + bool IPPSetSimple(cv::Scalar value, void *dataPointer, int step, IppiSize &size, ippiSetFunc func) + { + Type values[channels]; + for( int i = 0; i < channels; i++ ) + values[i] = (Type)value[i]; + return func(values, dataPointer, step, size) >= 0; + } + + bool IPPSet(const cv::Scalar &value, void *dataPointer, int step, IppiSize &size, int channels, int depth) + { + if( channels == 1 ) + { + switch( depth ) + { + case CV_8U: + return ippiSet_8u_C1R((Ipp8u)value[0], (Ipp8u *)dataPointer, step, size) >= 0; + case CV_16U: + return ippiSet_16u_C1R((Ipp16u)value[0], (Ipp16u *)dataPointer, step, size) >= 0; + case CV_32F: + return ippiSet_32f_C1R((Ipp32f)value[0], (Ipp32f *)dataPointer, step, size) >= 0; + } + } + else + { + if( channels == 3 ) + { + switch( depth ) + { + case CV_8U: + return IPPSetSimple<3, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C3R); + case CV_16U: + return IPPSetSimple<3, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C3R); + case CV_32F: + return IPPSetSimple<3, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C3R); + } + } + else if( channels == 4 ) + { + switch( depth ) + { + case CV_8U: + return IPPSetSimple<4, Ipp8u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_8u_C4R); + case CV_16U: + return IPPSetSimple<4, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C4R); + case CV_32F: + return IPPSetSimple<4, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C4R); + } + } + } + return false; + } +#endif + /************** interpolation formulas and tables ***************/ const int INTER_RESIZE_COEF_BITS=11; @@ -1604,6 +1668,45 @@ static int computeResizeAreaTab( int ssize, int dsize, int cn, double scale, Dec return k; } +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +class IPPresizeInvoker : + public ParallelLoopBody +{ +public: + IPPresizeInvoker(Mat &_src, Mat &_dst, double &_inv_scale_x, double &_inv_scale_y, int _mode, ippiResizeSqrPixelFunc _func, bool *_ok) : + ParallelLoopBody(), src(_src), dst(_dst), inv_scale_x(_inv_scale_x), inv_scale_y(_inv_scale_y), mode(_mode), func(_func), ok(_ok) + { + *ok = true; + } + + virtual void operator() (const Range& range) const + { + int cn = src.channels(); + IppiRect srcroi = { 0, range.start, src.cols, range.end - range.start }; + int dsty = CV_IMIN(cvRound(range.start * inv_scale_y), dst.rows); + int dstwidth = CV_IMIN(cvRound(src.cols * inv_scale_x), dst.cols); + int dstheight = CV_IMIN(cvRound(range.end * inv_scale_y), dst.rows); + IppiRect dstroi = { 0, dsty, dstwidth, dstheight - dsty }; + int bufsize; + ippiResizeGetBufSize( srcroi, dstroi, cn, mode, &bufsize ); + Ipp8u *buf; + buf = ippsMalloc_8u( bufsize ); + IppStatus sts; + if( func( src.data, ippiSize(src.cols, src.rows), (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, inv_scale_x, inv_scale_y, 0, 0, mode, buf ) < 0 ) + *ok = false; + ippsFree(buf); + } +private: + Mat &src; + Mat &dst; + double inv_scale_x; + double inv_scale_y; + int mode; + ippiResizeSqrPixelFunc func; + bool *ok; + const IPPresizeInvoker& operator= (const IPPresizeInvoker&); +}; +#endif } @@ -1745,6 +1848,39 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, int depth = src.depth(), cn = src.channels(); double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y; int k, sx, sy, dx, dy; + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + int mode = + interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : + interpolation == INTER_NEAREST ? IPPI_INTER_NN : + interpolation == INTER_CUBIC ? IPPI_INTER_CUBIC : + interpolation == INTER_AREA && inv_scale_x * inv_scale_y > 1 ? IPPI_INTER_NN : + 0; + int type = src.type(); + ippiResizeSqrPixelFunc ippFunc = + type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R : + type == CV_8UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C3R : + type == CV_8UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C4R : + type == CV_16UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C1R : + type == CV_16UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C3R : + type == CV_16UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C4R : + type == CV_16SC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C1R : + type == CV_16SC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C3R : + type == CV_16SC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C4R : + type == CV_32FC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C1R : + type == CV_32FC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C3R : + type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R : + 0; + if( ippFunc && mode ) + { + bool ok; + Range range(0, src.rows); + IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, ippFunc, &ok); + parallel_for_(range, invoker, dst.total()/(double)(1<<16)); + if( ok ) + return; + } +#endif if( interpolation == INTER_NEAREST ) { @@ -3257,6 +3393,49 @@ private: double *M; }; +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +class IPPwarpAffineInvoker : + public ParallelLoopBody +{ +public: + IPPwarpAffineInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[2][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpAffineBackFunc _func, bool *_ok) : + ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok) + { + *ok = true; + } + + virtual void operator() (const Range& range) const + { + IppiSize srcsize = { src.cols, src.rows }; + IppiRect srcroi = { 0, 0, src.cols, src.rows }; + IppiRect dstroi = { 0, range.start, dst.cols, range.end - range.start }; + int cnn = src.channels(); + if( borderType == BORDER_CONSTANT ) + { + IppiSize setSize = { dst.cols, range.end - range.start }; + void *dataPointer = dst.data + dst.step[0] * range.start; + if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) ) + { + *ok = false; + return; + } + } + if( func( src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode ) < 0) ////Aug 2013: problem in IPP 7.1, 8.0 : sometimes function return ippStsCoeffErr + *ok = false; + } +private: + Mat &src; + Mat &dst; + double (&coeffs)[2][3]; + int mode; + int borderType; + Scalar borderValue; + ippiWarpAffineBackFunc func; + bool *ok; + const IPPwarpAffineInvoker& operator= (const IPPwarpAffineInvoker&); +}; +#endif + } @@ -3303,6 +3482,50 @@ void cv::warpAffine( InputArray _src, OutputArray _dst, const int AB_BITS = MAX(10, (int)INTER_BITS); const int AB_SCALE = 1 << AB_BITS; +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + int depth = src.depth(); + int channels = src.channels(); + if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) && + ( channels == 1 || channels == 3 || channels == 4 ) && + ( borderType == cv::BORDER_TRANSPARENT || ( borderType == cv::BORDER_CONSTANT ) ) ) + { + int type = src.type(); + ippiWarpAffineBackFunc ippFunc = + type == CV_8UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C1R : + type == CV_8UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C3R : + type == CV_8UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C4R : + type == CV_16UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C1R : + type == CV_16UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C3R : + type == CV_16UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_16u_C4R : + type == CV_32FC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C1R : + type == CV_32FC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C3R : + type == CV_32FC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C4R : + 0; + int mode = + flags == INTER_LINEAR ? IPPI_INTER_LINEAR : + flags == INTER_NEAREST ? IPPI_INTER_NN : + flags == INTER_CUBIC ? IPPI_INTER_CUBIC : + 0; + if( mode && ippFunc ) + { + double coeffs[2][3]; + for( int i = 0; i < 2; i++ ) + { + for( int j = 0; j < 3; j++ ) + { + coeffs[i][j] = matM.at(i, j); + } + } + bool ok; + Range range(0, dst.rows); + IPPwarpAffineInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok); + parallel_for_(range, invoker, dst.total()/(double)(1<<16)); + if( ok ) + return; + } + } +#endif + for( x = 0; x < dst.cols; x++ ) { adelta[x] = saturate_cast(M[0]*x*AB_SCALE); @@ -3410,6 +3633,50 @@ private: Scalar borderValue; }; +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +class IPPwarpPerspectiveInvoker : + public ParallelLoopBody +{ +public: + IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) : + ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok) + { + *ok = true; + } + + virtual void operator() (const Range& range) const + { + IppiSize srcsize = {src.cols, src.rows}; + IppiRect srcroi = {0, 0, src.cols, src.rows}; + IppiRect dstroi = {0, range.start, dst.cols, range.end - range.start}; + int cnn = src.channels(); + + if( borderType == BORDER_CONSTANT ) + { + IppiSize setSize = {dst.cols, range.end - range.start}; + void *dataPointer = dst.data + dst.step[0] * range.start; + if( !IPPSet( borderValue, dataPointer, (int)dst.step[0], setSize, cnn, src.depth() ) ) + { + *ok = false; + return; + } + } + if( func(src.data, srcsize, (int)src.step[0], srcroi, dst.data, (int)dst.step[0], dstroi, coeffs, mode) < 0) + *ok = false; + } +private: + Mat &src; + Mat &dst; + double (&coeffs)[3][3]; + int mode; + int borderType; + const Scalar borderValue; + ippiWarpPerspectiveBackFunc func; + bool *ok; + const IPPwarpPerspectiveInvoker& operator= (const IPPwarpPerspectiveInvoker&); +}; +#endif + } void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, @@ -3439,6 +3706,50 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, if( !(flags & WARP_INVERSE_MAP) ) invert(matM, matM); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + int depth = src.depth(); + int channels = src.channels(); + if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) && + ( channels == 1 || channels == 3 || channels == 4 ) && + ( borderType == cv::BORDER_TRANSPARENT || borderType == cv::BORDER_CONSTANT ) ) + { + int type = src.type(); + ippiWarpPerspectiveBackFunc ippFunc = + type == CV_8UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C1R : + type == CV_8UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C3R : + type == CV_8UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C4R : + type == CV_16UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C1R : + type == CV_16UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C3R : + type == CV_16UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_16u_C4R : + type == CV_32FC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C1R : + type == CV_32FC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C3R : + type == CV_32FC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C4R : + 0; + int mode = + flags == INTER_LINEAR ? IPPI_INTER_LINEAR : + flags == INTER_NEAREST ? IPPI_INTER_NN : + flags == INTER_CUBIC ? IPPI_INTER_CUBIC : + 0; + if( mode && ippFunc ) + { + double coeffs[3][3]; + for( int i = 0; i < 3; i++ ) + { + for( int j = 0; j < 3; j++ ) + { + coeffs[i][j] = matM.at(i, j); + } + } + bool ok; + Range range(0, dst.rows); + IPPwarpPerspectiveInvoker invoker(src, dst, coeffs, mode, borderType, borderValue, ippFunc, &ok); + parallel_for_(range, invoker, dst.total()/(double)(1<<16)); + if( ok ) + return; + } + } +#endif Range range(0, dst.rows); warpPerspectiveInvoker invoker(src, dst, M, interpolation, borderType, borderValue); From e85e4d3ab90da55d0461c63f1502278ee34f4143 Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Mon, 2 Sep 2013 18:34:50 +0400 Subject: [PATCH 26/55] fixed bug in IPP-accelerated morphology; added several IPP imgwarp functions (by Klim) --- modules/imgproc/src/imgwarp.cpp | 57 +++++++++++++++------------------ modules/imgproc/src/morph.cpp | 29 ++++++++--------- 2 files changed, 39 insertions(+), 47 deletions(-) diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 4c9063da5..a4fda282d 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -52,7 +52,7 @@ #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) static IppStatus sts = ippInit(); -#endif +#endif namespace cv { @@ -76,15 +76,15 @@ namespace cv { if( channels == 1 ) { - switch( depth ) - { - case CV_8U: + switch( depth ) + { + case CV_8U: return ippiSet_8u_C1R((Ipp8u)value[0], (Ipp8u *)dataPointer, step, size) >= 0; case CV_16U: return ippiSet_16u_C1R((Ipp16u)value[0], (Ipp16u *)dataPointer, step, size) >= 0; case CV_32F: return ippiSet_32f_C1R((Ipp32f)value[0], (Ipp32f *)dataPointer, step, size) >= 0; - } + } } else { @@ -98,7 +98,7 @@ namespace cv return IPPSetSimple<3, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C3R); case CV_32F: return IPPSetSimple<3, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C3R); - } + } } else if( channels == 4 ) { @@ -110,7 +110,7 @@ namespace cv return IPPSetSimple<4, Ipp16u>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_16u_C4R); case CV_32F: return IPPSetSimple<4, Ipp32f>(value, dataPointer, step, size, (ippiSetFunc)ippiSet_32f_C4R); - } + } } } return false; @@ -1848,17 +1848,12 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, int depth = src.depth(), cn = src.channels(); double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y; int k, sx, sy, dx, dy; - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - int mode = - interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : - interpolation == INTER_NEAREST ? IPPI_INTER_NN : - interpolation == INTER_CUBIC ? IPPI_INTER_CUBIC : - interpolation == INTER_AREA && inv_scale_x * inv_scale_y > 1 ? IPPI_INTER_NN : - 0; + int mode = interpolation == INTER_LINEAR ? IPPI_INTER_LINEAR : 0; int type = src.type(); - ippiResizeSqrPixelFunc ippFunc = - type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R : + ippiResizeSqrPixelFunc ippFunc = + type == CV_8UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C1R : type == CV_8UC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C3R : type == CV_8UC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_8u_C4R : type == CV_16UC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16u_C1R : @@ -1869,9 +1864,9 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, type == CV_16SC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_16s_C4R : type == CV_32FC1 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C1R : type == CV_32FC3 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C3R : - type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R : + type == CV_32FC4 ? (ippiResizeSqrPixelFunc)ippiResizeSqrPixel_32f_C4R : 0; - if( ippFunc && mode ) + if( ippFunc && mode != 0 ) { bool ok; Range range(0, src.rows); @@ -3485,12 +3480,12 @@ void cv::warpAffine( InputArray _src, OutputArray _dst, #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) int depth = src.depth(); int channels = src.channels(); - if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) && - ( channels == 1 || channels == 3 || channels == 4 ) && + if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) && + ( channels == 1 || channels == 3 || channels == 4 ) && ( borderType == cv::BORDER_TRANSPARENT || ( borderType == cv::BORDER_CONSTANT ) ) ) { int type = src.type(); - ippiWarpAffineBackFunc ippFunc = + ippiWarpAffineBackFunc ippFunc = type == CV_8UC1 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C1R : type == CV_8UC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C3R : type == CV_8UC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_8u_C4R : @@ -3501,10 +3496,10 @@ void cv::warpAffine( InputArray _src, OutputArray _dst, type == CV_32FC3 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C3R : type == CV_32FC4 ? (ippiWarpAffineBackFunc)ippiWarpAffineBack_32f_C4R : 0; - int mode = + int mode = flags == INTER_LINEAR ? IPPI_INTER_LINEAR : flags == INTER_NEAREST ? IPPI_INTER_NN : - flags == INTER_CUBIC ? IPPI_INTER_CUBIC : + flags == INTER_CUBIC ? IPPI_INTER_CUBIC : 0; if( mode && ippFunc ) { @@ -3525,7 +3520,7 @@ void cv::warpAffine( InputArray _src, OutputArray _dst, } } #endif - + for( x = 0; x < dst.cols; x++ ) { adelta[x] = saturate_cast(M[0]*x*AB_SCALE); @@ -3638,7 +3633,7 @@ class IPPwarpPerspectiveInvoker : public ParallelLoopBody { public: - IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) : + IPPwarpPerspectiveInvoker(Mat &_src, Mat &_dst, double (&_coeffs)[3][3], int &_interpolation, int &_borderType, const Scalar &_borderValue, ippiWarpPerspectiveBackFunc _func, bool *_ok) : ParallelLoopBody(), src(_src), dst(_dst), mode(_interpolation), coeffs(_coeffs), borderType(_borderType), borderValue(_borderValue), func(_func), ok(_ok) { *ok = true; @@ -3706,16 +3701,16 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, if( !(flags & WARP_INVERSE_MAP) ) invert(matM, matM); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) int depth = src.depth(); int channels = src.channels(); - if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) && - ( channels == 1 || channels == 3 || channels == 4 ) && + if( ( depth == CV_8U || depth == CV_16U || depth == CV_32F ) && + ( channels == 1 || channels == 3 || channels == 4 ) && ( borderType == cv::BORDER_TRANSPARENT || borderType == cv::BORDER_CONSTANT ) ) { int type = src.type(); - ippiWarpPerspectiveBackFunc ippFunc = + ippiWarpPerspectiveBackFunc ippFunc = type == CV_8UC1 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C1R : type == CV_8UC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C3R : type == CV_8UC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_8u_C4R : @@ -3726,10 +3721,10 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, type == CV_32FC3 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C3R : type == CV_32FC4 ? (ippiWarpPerspectiveBackFunc)ippiWarpPerspectiveBack_32f_C4R : 0; - int mode = + int mode = flags == INTER_LINEAR ? IPPI_INTER_LINEAR : flags == INTER_NEAREST ? IPPI_INTER_NN : - flags == INTER_CUBIC ? IPPI_INTER_CUBIC : + flags == INTER_CUBIC ? IPPI_INTER_CUBIC : 0; if( mode && ippFunc ) { diff --git a/modules/imgproc/src/morph.cpp b/modules/imgproc/src/morph.cpp index b8bb7cf38..19636bc96 100644 --- a/modules/imgproc/src/morph.cpp +++ b/modules/imgproc/src/morph.cpp @@ -1213,11 +1213,10 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne } static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, - InputArray _kernel, - const Point &anchor, int iterations, + const Mat& _kernel, Point anchor, int iterations, int borderType, const Scalar &borderValue) { - Mat src = _src.getMat(), kernel = _kernel.getMat(); + Mat src = _src.getMat(), kernel = _kernel; if( !( src.depth() == CV_8U || src.depth() == CV_32F ) || ( iterations > 1 ) || !( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue()) ) || !( op == MORPH_DILATE || op == MORPH_ERODE) ) @@ -1248,9 +1247,6 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, } Size ksize = kernel.data ? kernel.size() : Size(3,3); - Point normanchor = normalizeAnchor(anchor, ksize); - - CV_Assert( normanchor.inside(Rect(0, 0, ksize.width, ksize.height)) ); _dst.create( src.size(), src.type() ); Mat dst = _dst.getMat(); @@ -1265,7 +1261,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, if( !kernel.data ) { ksize = Size(1+iterations*2,1+iterations*2); - normanchor = Point(iterations, iterations); + anchor = Point(iterations, iterations); rectKernel = true; iterations = 1; } @@ -1273,7 +1269,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, { ksize = Size(ksize.width + (iterations-1)*(ksize.width-1), ksize.height + (iterations-1)*(ksize.height-1)), - normanchor = Point(normanchor.x*iterations, normanchor.y*iterations); + anchor = Point(anchor.x*iterations, anchor.y*iterations); kernel = Mat(); rectKernel = true; iterations = 1; @@ -1283,7 +1279,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, if( iterations > 1 ) return false; - return IPPMorphReplicate( op, src, dst, kernel, ksize, normanchor, rectKernel ); + return IPPMorphReplicate( op, src, dst, kernel, ksize, anchor, rectKernel ); } #endif @@ -1292,18 +1288,19 @@ static void morphOp( int op, InputArray _src, OutputArray _dst, Point anchor, int iterations, int borderType, const Scalar& borderValue ) { - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - if( IPPMorphOp(op, _src, _dst, _kernel, anchor, iterations, borderType, borderValue) ) - return; -#endif - - Mat src = _src.getMat(), kernel = _kernel.getMat(); + Mat kernel = _kernel.getMat(); Size ksize = kernel.data ? kernel.size() : Size(3,3); anchor = normalizeAnchor(anchor, ksize); CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) ); +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if( IPPMorphOp(op, _src, _dst, kernel, anchor, iterations, borderType, borderValue) ) + return; +#endif + + Mat src = _src.getMat(); + _dst.create( src.size(), src.type() ); Mat dst = _dst.getMat(); From 7b3e3f69fb8600a43d51a464a812f3c00d770194 Mon Sep 17 00:00:00 2001 From: peng xiao Date: Tue, 3 Sep 2013 09:31:13 +0800 Subject: [PATCH 27/55] Modify according to @alalek. --- modules/ocl/src/gemm.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/ocl/src/gemm.cpp b/modules/ocl/src/gemm.cpp index 6e04baca4..a9533b5d8 100644 --- a/modules/ocl/src/gemm.cpp +++ b/modules/ocl/src/gemm.cpp @@ -81,11 +81,14 @@ static Mutex cs; void cv::ocl::clBlasSetup() { - AutoLock al(cs); if(!clBlasInitialized) { - openCLSafeCall(clAmdBlasSetup()); - clBlasInitialized = true; + AutoLock al(cs); + if(!clBlasInitialized) + { + openCLSafeCall(clAmdBlasSetup()); + clBlasInitialized = true; + } } } From ea165394484460f6907c32d447003baab89eaeb4 Mon Sep 17 00:00:00 2001 From: pengxiao Date: Tue, 3 Sep 2013 10:30:37 +0800 Subject: [PATCH 28/55] Fix a crash of ocl program if clAmdBlas is not linked. --- modules/ocl/src/gemm.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ocl/src/gemm.cpp b/modules/ocl/src/gemm.cpp index a9533b5d8..7e31cdbf4 100644 --- a/modules/ocl/src/gemm.cpp +++ b/modules/ocl/src/gemm.cpp @@ -69,7 +69,7 @@ void cv::ocl::clBlasSetup() void cv::ocl::clBlasTeardown() { - CV_Error(CV_StsNotImplemented, "OpenCL BLAS is not implemented"); + //intentionally do nothing } #else From a70bdfc13f8279ee86be1a0281fbd9ee357245b7 Mon Sep 17 00:00:00 2001 From: yao Date: Tue, 3 Sep 2013 15:02:18 +0800 Subject: [PATCH 29/55] a little fix to tests and sample --- modules/ocl/test/test_imgproc.cpp | 24 ++++++++++-------------- samples/ocl/clahe.cpp | 4 ++++ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/modules/ocl/test/test_imgproc.cpp b/modules/ocl/test/test_imgproc.cpp index 46cd257c8..f723e13bc 100644 --- a/modules/ocl/test/test_imgproc.cpp +++ b/modules/ocl/test/test_imgproc.cpp @@ -1396,14 +1396,10 @@ TEST_P(calcHist, Mat) } /////////////////////////////////////////////////////////////////////////////////////////////////////// // CLAHE -namespace -{ - IMPLEMENT_PARAM_CLASS(ClipLimit, double) -} -PARAM_TEST_CASE(CLAHE, cv::Size, ClipLimit) +PARAM_TEST_CASE(CLAHE, cv::Size, double) { - cv::Size size; + cv::Size gridSize; double clipLimit; cv::Mat src; @@ -1414,22 +1410,22 @@ PARAM_TEST_CASE(CLAHE, cv::Size, ClipLimit) virtual void SetUp() { - size = GET_PARAM(0); + gridSize = GET_PARAM(0); clipLimit = GET_PARAM(1); cv::RNG &rng = TS::ptr()->get_rng(); - src = randomMat(rng, size, CV_8UC1, 0, 256, false); + src = randomMat(rng, cv::Size(MWIDTH, MHEIGHT), CV_8UC1, 0, 256, false); g_src.upload(src); } }; TEST_P(CLAHE, Accuracy) { - cv::Ptr clahe = cv::ocl::createCLAHE(clipLimit); + cv::Ptr clahe = cv::ocl::createCLAHE(clipLimit, gridSize); clahe->apply(g_src, g_dst); cv::Mat dst(g_dst); - cv::Ptr clahe_gold = cv::createCLAHE(clipLimit); + cv::Ptr clahe_gold = cv::createCLAHE(clipLimit, gridSize); clahe_gold->apply(src, dst_gold); EXPECT_MAT_NEAR(dst_gold, dst, 1.0); @@ -1725,10 +1721,10 @@ INSTANTIATE_TEST_CASE_P(histTestBase, calcHist, Combine( ONE_TYPE(CV_32SC1) //no use )); -INSTANTIATE_TEST_CASE_P(ImgProc, CLAHE, Combine( - Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(1300, 1300)), - Values(0.0, 40.0))); +INSTANTIATE_TEST_CASE_P(Imgproc, CLAHE, Combine( + Values(cv::Size(4, 4), cv::Size(32, 8), cv::Size(8, 64)), + Values(0.0, 10.0, 62.0, 300.0))); -INSTANTIATE_TEST_CASE_P(OCL_ImgProc, ColumnSum, DIFFERENT_SIZES); +INSTANTIATE_TEST_CASE_P(Imgproc, ColumnSum, DIFFERENT_SIZES); #endif // HAVE_OPENCL diff --git a/samples/ocl/clahe.cpp b/samples/ocl/clahe.cpp index c2f4b27bf..1fbf49fac 100644 --- a/samples/ocl/clahe.cpp +++ b/samples/ocl/clahe.cpp @@ -44,6 +44,10 @@ int main(int argc, char** argv) namedWindow("CLAHE"); createTrackbar("Tile Size", "CLAHE", &tilesize, 32, (TrackbarCallback)TSize_Callback); createTrackbar("Clip Limit", "CLAHE", &cliplimit, 20, (TrackbarCallback)Clip_Callback); + + vector info; + CV_Assert(ocl::getDevice(info)); + Mat frame, outframe; ocl::oclMat d_outframe; From e528f39deff8f1b31ff53c9e3676e07b8227d0ed Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Wed, 4 Sep 2013 14:37:46 +0800 Subject: [PATCH 30/55] Added the performance test for mog and mog2. --- modules/ocl/perf/perf_bgfg.cpp | 337 +++++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) create mode 100644 modules/ocl/perf/perf_bgfg.cpp diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp new file mode 100644 index 000000000..188dffa13 --- /dev/null +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -0,0 +1,337 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Fangfang Bai, fangfang@multicorewareinc.com +// Jin Ma, jin@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other oclMaterials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "perf_precomp.hpp" + +///////////// PyrLKOpticalFlow //////////////////////// + +using namespace perf; +using std::tr1::get; +using std::tr1::tuple; +using std::tr1::make_tuple; + +#if defined(HAVE_XINE) || \ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_AVFOUNDATION) || \ + defined(HAVE_FFMPEG) || \ + defined(WIN32) /* assume that we have ffmpeg */ + +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1 +#else +# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0 +#endif + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +typedef tuple VideoMOGParamType; +typedef TestBaseWithParam VideoMOGFixture; + +PERF_TEST_P(VideoMOGFixture, Video_MOG, + ::testing::Combine(::testing::Values("768x576.avi", "1920x1080.avi"), + ::testing::Values(1, 3), + ::testing::Values(0.0, 0.01))) +{ + VideoMOGParamType params = GetParam(); + + const string inputFile = perf::TestBase::getDataPath(get<0>(params)); + const int cn = get<1>(params); + const float learningRate = static_cast(get<2>(params)); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + + if(RUN_PLAIN_IMPL) + { + cv::BackgroundSubtractorMOG mog; + cv::Mat foreground; + + mog(frame, foreground, learningRate); + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + + cv::swap(temp, frame); + + TEST_CYCLE() + mog(frame, foreground, learningRate); + + SANITY_CHECK(foreground); + } + }else if(RUN_OCL_IMPL) + { + cv::ocl::oclMat d_frame(frame); + cv::ocl::MOG d_mog; + cv::ocl::oclMat foreground; + cv::Mat foreground_h; + + d_mog(d_frame, foreground, learningRate); + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + + d_frame.upload(frame); + + OCL_TEST_CYCLE() + d_mog(d_frame, foreground, learningRate); + + foreground.download(foreground_h); + SANITY_CHECK(foreground_h); + } + }else + OCL_PERF_ELSE +} +#endif + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +typedef tuple VideoMOG2ParamType; +typedef TestBaseWithParam VideoMOG2Fixture; + +PERF_TEST_P(VideoMOG2Fixture, Video_MOG2, + ::testing::Combine(::testing::Values("768x576.avi", "1920x1080.avi"), + ::testing::Values(1, 3))) +{ + VideoMOG2ParamType params = GetParam(); + + const string inputFile = perf::TestBase::getDataPath(get<0>(params)); + const int cn = get<1>(params); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + + if(RUN_PLAIN_IMPL) + { + cv::BackgroundSubtractorMOG2 mog2; + cv::Mat foreground; + + mog2.set("detectShadows", false); + mog2(frame, foreground); + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + + cv::swap(temp, frame); + + TEST_CYCLE() + mog2(frame, foreground); + + SANITY_CHECK(foreground); + } + }else if(RUN_OCL_IMPL) + { + cv::ocl::oclMat d_frame(frame); + cv::ocl::MOG2 d_mog2; + cv::ocl::oclMat foreground; + cv::Mat foreground_h; + + d_mog2(d_frame, foreground); + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + + d_frame.upload(frame); + + OCL_TEST_CYCLE() + d_mog2(d_frame, foreground); + + foreground.download(foreground_h); + SANITY_CHECK(foreground_h); + } + }else + OCL_PERF_ELSE +} +#endif + +#if BUILD_WITH_VIDEO_INPUT_SUPPORT + +typedef TestBaseWithParam Video_MOG2GetBackgroundImage; + +PERF_TEST_P(Video_MOG2GetBackgroundImage, Video_MOG2, + ::testing::Combine(::testing::Values("768x576.avi", "1920x1080.avi"), + ::testing::Values(1, 3))) +{ + VideoMOG2ParamType params = GetParam(); + + const string inputFile = perf::TestBase::getDataPath(get<0>(params)); + const int cn = get<1>(params); + + cv::VideoCapture cap(inputFile); + ASSERT_TRUE(cap.isOpened()); + + cv::Mat frame; + + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + + if(RUN_PLAIN_IMPL) + { + cv::BackgroundSubtractorMOG2 mog2; + cv::Mat foreground; + + mog2.set("detectShadows", false); + mog2(frame, foreground); + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + + cv::swap(temp, frame); + + TEST_CYCLE() + mog2(frame, foreground); + } + cv::Mat background; + TEST_CYCLE() + mog2.getBackgroundImage(background); + + SANITY_CHECK(background); + }else if(RUN_OCL_IMPL) + { + cv::ocl::oclMat d_frame(frame); + cv::ocl::MOG2 d_mog2; + cv::ocl::oclMat foreground; + cv::Mat background_h; + + d_mog2(d_frame, foreground); + + for (int i = 0; i < 10; ++i) + { + cap >> frame; + ASSERT_FALSE(frame.empty()); + + cv::Mat temp; + if (cn == 1) + cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY); + else + cv::cvtColor(frame, temp, cv::COLOR_BGR2BGRA); + cv::swap(temp, frame); + + d_frame.upload(frame); + d_mog2(d_frame, foreground); + } + cv::ocl::oclMat background; + OCL_TEST_CYCLE() + d_mog2.getBackgroundImage(background); + + background.download(background_h); + SANITY_CHECK(background_h); + }else + OCL_PERF_ELSE +} +#endif + From dd73016c8ba1b70dd371c44995f51f3c206c2636 Mon Sep 17 00:00:00 2001 From: Jin Ma Date: Wed, 4 Sep 2013 15:00:36 +0800 Subject: [PATCH 31/55] Removed whitespace. --- modules/ocl/perf/perf_bgfg.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/ocl/perf/perf_bgfg.cpp b/modules/ocl/perf/perf_bgfg.cpp index 188dffa13..185f704b3 100644 --- a/modules/ocl/perf/perf_bgfg.cpp +++ b/modules/ocl/perf/perf_bgfg.cpp @@ -79,7 +79,7 @@ PERF_TEST_P(VideoMOGFixture, Video_MOG, const string inputFile = perf::TestBase::getDataPath(get<0>(params)); const int cn = get<1>(params); const float learningRate = static_cast(get<2>(params)); - + cv::VideoCapture cap(inputFile); ASSERT_TRUE(cap.isOpened()); @@ -296,7 +296,7 @@ PERF_TEST_P(Video_MOG2GetBackgroundImage, Video_MOG2, mog2(frame, foreground); } cv::Mat background; - TEST_CYCLE() + TEST_CYCLE() mog2.getBackgroundImage(background); SANITY_CHECK(background); @@ -333,5 +333,4 @@ PERF_TEST_P(Video_MOG2GetBackgroundImage, Video_MOG2, }else OCL_PERF_ELSE } -#endif - +#endif \ No newline at end of file From f538e503853b42c43ddf5ffb59338e2c3e38c805 Mon Sep 17 00:00:00 2001 From: ilya-lavrenov Date: Wed, 4 Sep 2013 11:56:22 +0400 Subject: [PATCH 32/55] updated .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 4fd406edd..0bcffd726 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ tegra/ .sw[a-z] .*.swp tags +*.autosave From a9975b144a22e76228125eb0a25f78ec13db6815 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Wed, 4 Sep 2013 14:06:34 +0400 Subject: [PATCH 33/55] Fixed a bug in FLANN resulting in uninitialized accesses. This is fixed upstream in mariusmuja/flann@b615f26, but that fix would break binary compatibility, so I had to make a different one. Since the bug isn't quite obvious, here's an explanation. In the const version of any::cast, if policy is a small_any_policy, its get_value returns its input argument. So r becomes a pointer to obj, and the return value is a reference to a local variable, which is invalidated when the function exits. --- modules/flann/include/opencv2/flann/any.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/flann/include/opencv2/flann/any.h b/modules/flann/include/opencv2/flann/any.h index 89189c64e..7140b2a08 100644 --- a/modules/flann/include/opencv2/flann/any.h +++ b/modules/flann/include/opencv2/flann/any.h @@ -255,8 +255,7 @@ public: const T& cast() const { if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); - void* obj = const_cast(object); - T* r = reinterpret_cast(policy->get_value(&obj)); + T* r = reinterpret_cast(policy->get_value(const_cast(&object))); return *r; } From b5e1eb7d48f2aa4d0b2347f3d24c0ea90ac943ff Mon Sep 17 00:00:00 2001 From: Daniel Angelov Date: Wed, 4 Sep 2013 14:59:57 +0300 Subject: [PATCH 34/55] Removed IOArray constness. --- modules/imgproc/doc/feature_detection.rst | 4 ++-- modules/imgproc/include/opencv2/imgproc.hpp | 4 ++-- modules/imgproc/src/lsd.cpp | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/imgproc/doc/feature_detection.rst b/modules/imgproc/doc/feature_detection.rst index b23675171..ef44c05c3 100644 --- a/modules/imgproc/doc/feature_detection.rst +++ b/modules/imgproc/doc/feature_detection.rst @@ -579,7 +579,7 @@ LineSegmentDetector::drawSegments --------------------------------- Draws the line segments on a given image. -.. ocv:function:: void LineSegmentDetector::drawSegments(InputOutputArray _image, const InputArray lines) +.. ocv:function:: void LineSegmentDetector::drawSegments(InputOutputArray _image, InputArray lines) :param image: The image, where the liens will be drawn. Should be bigger or equal to the image, where the lines were found. @@ -590,7 +590,7 @@ LineSegmentDetector::compareSegments ------------------------------------ Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. -.. ocv:function:: int LineSegmentDetector::compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image = noArray()) +.. ocv:function:: int LineSegmentDetector::compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) :param size: The size of the image, where lines1 and lines2 were found. diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 581ba1443..76d500248 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -933,7 +933,7 @@ public: * Should have the size of the image, where the lines were found * @param lines The lines that need to be drawn */ - virtual void drawSegments(InputOutputArray _image, const InputArray lines) = 0; + virtual void drawSegments(InputOutputArray _image, InputArray lines) = 0; /** * Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2. @@ -945,7 +945,7 @@ public: * Should have the size of the image, where the lines were found * @return The number of mismatching pixels between lines1 and lines2. */ - virtual int compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image = noArray()) = 0; + virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) = 0; virtual ~LineSegmentDetector() {}; }; diff --git a/modules/imgproc/src/lsd.cpp b/modules/imgproc/src/lsd.cpp index 8cfe2b2b2..9b57a3411 100644 --- a/modules/imgproc/src/lsd.cpp +++ b/modules/imgproc/src/lsd.cpp @@ -214,7 +214,7 @@ public: * Should have the size of the image, where the lines were found * @param lines The lines that need to be drawn */ - void drawSegments(InputOutputArray _image, const InputArray lines); + void drawSegments(InputOutputArray _image, InputArray lines); /** * Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2. @@ -226,7 +226,7 @@ public: * Should have the size of the image, where the lines were found * @return The number of mismatching pixels between lines1 and lines2. */ - int compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image = noArray()); + int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()); private: Mat image; @@ -335,7 +335,7 @@ private: * @param rec Return: The generated rectangle. */ void region2rect(const std::vector& reg, const int reg_size, const double reg_angle, - const double prec, const double p, rect& rec) const; + const double prec, const double p, rect& rec) const; /** * Compute region's angle as the principal inertia axis of the region. @@ -409,7 +409,7 @@ LineSegmentDetectorImpl::LineSegmentDetectorImpl(int _refine, double _scale, dou _n_bins > 0); } -void LineSegmentDetectorImpl::detect(const InputArray _image, OutputArray _lines, +void LineSegmentDetectorImpl::detect(InputArray _image, OutputArray _lines, OutputArray _width, OutputArray _prec, OutputArray _nfa) { Mat_ img = _image.getMat(); @@ -1149,7 +1149,7 @@ inline bool LineSegmentDetectorImpl::isAligned(const int& address, const double& } -void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, const InputArray lines) +void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, InputArray lines) { CV_Assert(!_image.empty() && (_image.channels() == 1 || _image.channels() == 3)); @@ -1185,7 +1185,7 @@ void LineSegmentDetectorImpl::drawSegments(InputOutputArray _image, const InputA } -int LineSegmentDetectorImpl::compareSegments(const Size& size, const InputArray lines1, const InputArray lines2, InputOutputArray _image) +int LineSegmentDetectorImpl::compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image) { Size sz = size; if (_image.needed() && _image.size() != size) sz = _image.size(); From 6ebfa87181af66511098c49b317493bdf854abaf Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Wed, 4 Sep 2013 16:13:27 +0400 Subject: [PATCH 35/55] Delete a bunch more trailing whitespace that slipped through the cracks. --- modules/imgproc/src/color.cpp | 38 +++++++++---------- modules/imgproc/src/smooth.cpp | 2 +- modules/java/android_test/AndroidManifest.xml | 4 +- modules/java/android_test/res/layout/main.xml | 6 +-- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index 9469925ec..bac63ad6f 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -398,7 +398,7 @@ private: struct IPPColor2GrayFunctor { - IPPColor2GrayFunctor(ippiColor2GrayFunc _func) : func(_func) + IPPColor2GrayFunctor(ippiColor2GrayFunc _func) : func(_func) { coeffs[0] = 0.114f; coeffs[1] = 0.587f; @@ -454,7 +454,7 @@ struct IPPReorderGeneralFunctor } bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const { - Mat temp; + Mat temp; temp.create(rows, cols, CV_MAKETYPE(depth, 3)); if(func1(src, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows), order) < 0) return false; @@ -478,7 +478,7 @@ struct IPPGeneralReorderFunctor } bool operator()(const void *src, int srcStep, void *dst, int dstStep, int cols, int rows) const { - Mat temp; + Mat temp; temp.create(rows, cols, CV_MAKETYPE(depth, 3)); if(func1(src, srcStep, temp.data, (int)temp.step[0], ippiSize(cols, rows)) < 0) return false; @@ -3651,8 +3651,8 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create( sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( code == CV_BGR2BGRA || code == CV_RGB2RGBA) { if ( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 0, 1, 2)) ) @@ -3737,7 +3737,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Assert( scn == 3 || scn == 4 ); _dst.create(sz, CV_MAKETYPE(depth, 1)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( code == CV_BGR2GRAY ) { @@ -3789,13 +3789,13 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) CV_Assert( scn == 1 && (dcn == 3 || dcn == 4)); _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( code == CV_GRAY2BGR ) { if( CvtColorIPPLoop(src, dst, IPPGray2BGRFunctor(ippiCopyP3C3RTab[depth])) ) return; - } + } else if( code == CV_GRAY2BGRA ) { if( CvtColorIPPLoop(src, dst, IPPGray2BGRAFunctor(ippiCopyP3C3RTab[depth], ippiSwapChannelsC3C4RTab[depth], depth)) ) @@ -3882,7 +3882,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( code == CV_BGR2XYZ && scn == 3 ) { @@ -3898,7 +3898,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) { if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2XYZTab[depth])) ) return; - } + } else if( code == CV_RGB2XYZ && scn == 4 ) { if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 0, 1, 2, depth)) ) @@ -3921,7 +3921,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( code == CV_XYZ2BGR && dcn == 3 ) { @@ -3964,7 +3964,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( depth == CV_8U || depth == CV_16U ) { @@ -3982,12 +3982,12 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) { if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HSVTab[depth])) ) return; - } + } else if( code == CV_RGB2HSV_FULL && scn == 4 ) { if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 0, 1, 2, depth)) ) return; - } + } else if( code == CV_BGR2HLS_FULL && scn == 3 ) { if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) @@ -4002,7 +4002,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) { if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HLSTab[depth])) ) return; - } + } else if( code == CV_RGB2HLS_FULL && scn == 4 ) { if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 0, 1, 2, depth)) ) @@ -4045,7 +4045,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); - + #if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) if( depth == CV_8U || depth == CV_16U ) { @@ -4063,12 +4063,12 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) { if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHSV2RGBTab[depth])) ) return; - } + } else if( code == CV_HSV2RGB_FULL && dcn == 4 ) { if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) return; - } + } else if( code == CV_HLS2BGR_FULL && dcn == 3 ) { if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) @@ -4083,7 +4083,7 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) { if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHLS2RGBTab[depth])) ) return; - } + } else if( code == CV_HLS2RGB_FULL && dcn == 4 ) { if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 3dad2c087..3d42e3be5 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -1951,7 +1951,7 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d, if( ok ) return; } #endif - + vector _color_weight(cn*256); vector _space_weight(d*d); vector _space_ofs(d*d); diff --git a/modules/java/android_test/AndroidManifest.xml b/modules/java/android_test/AndroidManifest.xml index dfe25fff0..81f2bc134 100644 --- a/modules/java/android_test/AndroidManifest.xml +++ b/modules/java/android_test/AndroidManifest.xml @@ -3,7 +3,7 @@ package="org.opencv.test" android:versionCode="1" android:versionName="1.0"> - +