diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index a72ef09c7..5e5773385 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -43,6 +43,7 @@ #ifndef __OPENCV_CUDALEGACY_HPP__ #define __OPENCV_CUDALEGACY_HPP__ +#include "opencv2/core/cuda.hpp" #include "opencv2/cudalegacy/NCV.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVPyramid.hpp" @@ -56,4 +57,16 @@ @} */ +namespace cv { namespace cuda { + +class CV_EXPORTS ImagePyramid : public Algorithm +{ +public: + virtual void getLayer(OutputArray outImg, Size outRoi, Stream& stream = Stream::Null()) const = 0; +}; + +CV_EXPORTS Ptr createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null()); + +}} + #endif /* __OPENCV_CUDALEGACY_HPP__ */ diff --git a/modules/cudalegacy/src/image_pyramid.cpp b/modules/cudalegacy/src/image_pyramid.cpp new file mode 100644 index 000000000..938ffea5d --- /dev/null +++ b/modules/cudalegacy/src/image_pyramid.cpp @@ -0,0 +1,147 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +using namespace cv; +using namespace cv::cuda; + +#if !defined HAVE_CUDA || defined(CUDA_DISABLER) + +Ptr cv::cuda::createImagePyramid(InputArray, int, Stream&) { throw_no_cuda(); return Ptr(); } + +#else // HAVE_CUDA + +namespace +{ + class ImagePyramidImpl : public ImagePyramid + { + public: + ImagePyramidImpl(InputArray img, int nLayers, Stream& stream); + + void getLayer(OutputArray outImg, Size outRoi, Stream& stream = Stream::Null()) const; + + private: + GpuMat layer0_; + std::vector pyramid_; + int nLayers_; + }; + + ImagePyramidImpl::ImagePyramidImpl(InputArray _img, int numLayers, Stream& stream) + { + GpuMat img = _img.getGpuMat(); + + CV_Assert( img.depth() <= CV_32F && img.channels() <= 4 ); + + img.copyTo(layer0_, stream); + + Size szLastLayer = img.size(); + nLayers_ = 1; + + if (numLayers <= 0) + numLayers = 255; // it will cut-off when any of the dimensions goes 1 + + pyramid_.resize(numLayers); + + for (int i = 0; i < numLayers - 1; ++i) + { + Size szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2); + + if (szCurLayer.width == 0 || szCurLayer.height == 0) + break; + + ensureSizeIsEnough(szCurLayer, img.type(), pyramid_[i]); + nLayers_++; + + const GpuMat& prevLayer = i == 0 ? layer0_ : pyramid_[i - 1]; + + cv::cuda::device::pyramid::downsampleX2(prevLayer, pyramid_[i], img.depth(), img.channels(), StreamAccessor::getStream(stream)); + + szLastLayer = szCurLayer; + } + } + + void ImagePyramidImpl::getLayer(OutputArray _outImg, Size outRoi, Stream& stream) const + { + CV_Assert( outRoi.width <= layer0_.cols && outRoi.height <= layer0_.rows && outRoi.width > 0 && outRoi.height > 0 ); + + ensureSizeIsEnough(outRoi, layer0_.type(), _outImg); + GpuMat outImg = _outImg.getGpuMat(); + + if (outRoi.width == layer0_.cols && outRoi.height == layer0_.rows) + { + layer0_.copyTo(outImg, stream); + return; + } + + float lastScale = 1.0f; + float curScale; + GpuMat lastLayer = layer0_; + GpuMat curLayer; + + for (int i = 0; i < nLayers_ - 1; ++i) + { + curScale = lastScale * 0.5f; + curLayer = pyramid_[i]; + + if (outRoi.width == curLayer.cols && outRoi.height == curLayer.rows) + { + curLayer.copyTo(outImg, stream); + } + + if (outRoi.width >= curLayer.cols && outRoi.height >= curLayer.rows) + break; + + lastScale = curScale; + lastLayer = curLayer; + } + + cv::cuda::device::pyramid::interpolateFrom1(lastLayer, outImg, outImg.depth(), outImg.channels(), StreamAccessor::getStream(stream)); + } +} + +Ptr cv::cuda::createImagePyramid(InputArray img, int nLayers, Stream& stream) +{ + return Ptr(new ImagePyramidImpl(img, nLayers, stream)); +} + +#endif diff --git a/modules/cudawarping/CMakeLists.txt b/modules/cudawarping/CMakeLists.txt index 231e24e69..fa99e9d04 100644 --- a/modules/cudawarping/CMakeLists.txt +++ b/modules/cudawarping/CMakeLists.txt @@ -6,4 +6,4 @@ set(the_description "CUDA-accelerated Image Warping") ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow) -ocv_define_module(cudawarping opencv_imgproc OPTIONAL opencv_cudalegacy) +ocv_define_module(cudawarping opencv_core opencv_imgproc OPTIONAL opencv_cudev) diff --git a/modules/cudawarping/include/opencv2/cudawarping.hpp b/modules/cudawarping/include/opencv2/cudawarping.hpp index ca877d50c..fbd63873b 100644 --- a/modules/cudawarping/include/opencv2/cudawarping.hpp +++ b/modules/cudawarping/include/opencv2/cudawarping.hpp @@ -224,14 +224,6 @@ src . */ CV_EXPORTS void pyrUp(InputArray src, OutputArray dst, Stream& stream = Stream::Null()); -class CV_EXPORTS ImagePyramid : public Algorithm -{ -public: - virtual void getLayer(OutputArray outImg, Size outRoi, Stream& stream = Stream::Null()) const = 0; -}; - -CV_EXPORTS Ptr createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null()); - //! @} }} // namespace cv { namespace cuda { diff --git a/modules/cudawarping/perf/perf_warping.cpp b/modules/cudawarping/perf/perf_warping.cpp index dfb11075a..1096d3f44 100644 --- a/modules/cudawarping/perf/perf_warping.cpp +++ b/modules/cudawarping/perf/perf_warping.cpp @@ -514,40 +514,3 @@ PERF_TEST_P(Sz_Depth_Cn, PyrUp, CPU_SANITY_CHECK(dst); } } - -////////////////////////////////////////////////////////////////////// -// ImagePyramidGetLayer - -PERF_TEST_P(Sz_Depth_Cn, ImagePyramidGetLayer, - Combine(CUDA_TYPICAL_MAT_SIZES, - Values(CV_8U, CV_16U, CV_32F), - CUDA_CHANNELS_1_3_4)) -{ - const cv::Size size = GET_PARAM(0); - const int depth = GET_PARAM(1); - const int channels = GET_PARAM(2); - - const int type = CV_MAKE_TYPE(depth, channels); - - cv::Mat src(size, type); - declare.in(src, WARMUP_RNG); - - const int nLayers = 3; - const cv::Size dstSize(size.width / 2 + 10, size.height / 2 + 10); - - if (PERF_RUN_CUDA()) - { - const cv::cuda::GpuMat d_src(src); - cv::cuda::GpuMat dst; - - cv::Ptr d_pyr = cv::cuda::createImagePyramid(d_src, nLayers); - - TEST_CYCLE() d_pyr->getLayer(dst, dstSize); - - CUDA_SANITY_CHECK(dst); - } - else - { - FAIL_NO_CPU(); - } -} diff --git a/modules/cudawarping/src/precomp.hpp b/modules/cudawarping/src/precomp.hpp index 9f5b0c152..a59a4e925 100644 --- a/modules/cudawarping/src/precomp.hpp +++ b/modules/cudawarping/src/precomp.hpp @@ -47,11 +47,4 @@ #include "opencv2/core/private.cuda.hpp" -#include "opencv2/opencv_modules.hpp" - -#ifdef HAVE_OPENCV_CUDALEGACY -# include "opencv2/cudalegacy.hpp" -# include "opencv2/cudalegacy/private.hpp" -#endif - #endif /* __OPENCV_PRECOMP_H__ */ diff --git a/modules/cudawarping/src/pyramids.cpp b/modules/cudawarping/src/pyramids.cpp index 3d942fc6a..0cb0f5de5 100644 --- a/modules/cudawarping/src/pyramids.cpp +++ b/modules/cudawarping/src/pyramids.cpp @@ -50,8 +50,6 @@ using namespace cv::cuda; void cv::cuda::pyrDown(InputArray, OutputArray, Stream&) { throw_no_cuda(); } void cv::cuda::pyrUp(InputArray, OutputArray, Stream&) { throw_no_cuda(); } -Ptr cv::cuda::createImagePyramid(InputArray, int, Stream&) { throw_no_cuda(); return Ptr(); } - #else // HAVE_CUDA ////////////////////////////////////////////////////////////////////////////// @@ -133,112 +131,4 @@ void cv::cuda::pyrUp(InputArray _src, OutputArray _dst, Stream& stream) func(src, dst, StreamAccessor::getStream(stream)); } -////////////////////////////////////////////////////////////////////////////// -// ImagePyramid - -#ifdef HAVE_OPENCV_CUDALEGACY - -namespace -{ - class ImagePyramidImpl : public ImagePyramid - { - public: - ImagePyramidImpl(InputArray img, int nLayers, Stream& stream); - - void getLayer(OutputArray outImg, Size outRoi, Stream& stream = Stream::Null()) const; - - private: - GpuMat layer0_; - std::vector pyramid_; - int nLayers_; - }; - - ImagePyramidImpl::ImagePyramidImpl(InputArray _img, int numLayers, Stream& stream) - { - GpuMat img = _img.getGpuMat(); - - CV_Assert( img.depth() <= CV_32F && img.channels() <= 4 ); - - img.copyTo(layer0_, stream); - - Size szLastLayer = img.size(); - nLayers_ = 1; - - if (numLayers <= 0) - numLayers = 255; // it will cut-off when any of the dimensions goes 1 - - pyramid_.resize(numLayers); - - for (int i = 0; i < numLayers - 1; ++i) - { - Size szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2); - - if (szCurLayer.width == 0 || szCurLayer.height == 0) - break; - - ensureSizeIsEnough(szCurLayer, img.type(), pyramid_[i]); - nLayers_++; - - const GpuMat& prevLayer = i == 0 ? layer0_ : pyramid_[i - 1]; - - cv::cuda::device::pyramid::downsampleX2(prevLayer, pyramid_[i], img.depth(), img.channels(), StreamAccessor::getStream(stream)); - - szLastLayer = szCurLayer; - } - } - - void ImagePyramidImpl::getLayer(OutputArray _outImg, Size outRoi, Stream& stream) const - { - CV_Assert( outRoi.width <= layer0_.cols && outRoi.height <= layer0_.rows && outRoi.width > 0 && outRoi.height > 0 ); - - ensureSizeIsEnough(outRoi, layer0_.type(), _outImg); - GpuMat outImg = _outImg.getGpuMat(); - - if (outRoi.width == layer0_.cols && outRoi.height == layer0_.rows) - { - layer0_.copyTo(outImg, stream); - return; - } - - float lastScale = 1.0f; - float curScale; - GpuMat lastLayer = layer0_; - GpuMat curLayer; - - for (int i = 0; i < nLayers_ - 1; ++i) - { - curScale = lastScale * 0.5f; - curLayer = pyramid_[i]; - - if (outRoi.width == curLayer.cols && outRoi.height == curLayer.rows) - { - curLayer.copyTo(outImg, stream); - } - - if (outRoi.width >= curLayer.cols && outRoi.height >= curLayer.rows) - break; - - lastScale = curScale; - lastLayer = curLayer; - } - - cv::cuda::device::pyramid::interpolateFrom1(lastLayer, outImg, outImg.depth(), outImg.channels(), StreamAccessor::getStream(stream)); - } -} - #endif - -Ptr cv::cuda::createImagePyramid(InputArray img, int nLayers, Stream& stream) -{ -#ifndef HAVE_OPENCV_CUDALEGACY - (void) img; - (void) nLayers; - (void) stream; - throw_no_cuda(); - return Ptr(); -#else - return Ptr(new ImagePyramidImpl(img, nLayers, stream)); -#endif -} - -#endif // HAVE_CUDA