From b5c92a7dc0fe63faca28b9410e70c5447079ec39 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Wed, 15 Sep 2010 08:26:18 +0000 Subject: [PATCH] Added implementation and test for the GPU version of flip, resize, sum, minMax, copyConstBorder, setTo, based on NPP. --- modules/gpu/include/opencv2/gpu/gpu.hpp | 21 ++ modules/gpu/src/arithm.cpp | 158 ++++++++++++++- modules/gpu/src/matrix_operations.cpp | 44 +++- tests/gpu/src/npp_image_arithm.cpp | 259 +++++++++++++++++++++++- 4 files changed, 469 insertions(+), 13 deletions(-) diff --git a/modules/gpu/include/opencv2/gpu/gpu.hpp b/modules/gpu/include/opencv2/gpu/gpu.hpp index 1ea275965..d146ee88a 100644 --- a/modules/gpu/include/opencv2/gpu/gpu.hpp +++ b/modules/gpu/include/opencv2/gpu/gpu.hpp @@ -45,6 +45,7 @@ #include #include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/gpu/devmem2d.hpp" namespace cv @@ -372,9 +373,29 @@ namespace cv //! computes mean value and standard deviation of all or selected array elements CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev); + //! computes norm of array + //! Supports NORM_INF, NORM_L1, NORM_L2 CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2); + //! computes norm of the difference between two arrays + //! Supports NORM_INF, NORM_L1, NORM_L2 CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2); + //! reverses the order of the rows, columns or both in a matrix + CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode); + + //! resizes the image + //! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_LANCZOS4 + CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation=INTER_LINEAR); + + //! computes sum of array elements + CV_EXPORTS Scalar sum(const GpuMat& m); + + //! finds global minimum and maximum array elements and returns their values + CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal = 0); + + //! copies 2D array to a larger destination array and pads borders with user-specifiable constant + CV_EXPORTS void copyConstBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value = Scalar()); + ////////////////////////////// Image processing ////////////////////////////// // DST[x,y] = SRC[xmap[x,y],ymap[x,y]] with bilinear interpolation. // xymap.type() == xymap.type() == CV_32FC1 diff --git a/modules/gpu/src/arithm.cpp b/modules/gpu/src/arithm.cpp index 2ef08a1d3..295827a4a 100644 --- a/modules/gpu/src/arithm.cpp +++ b/modules/gpu/src/arithm.cpp @@ -66,6 +66,16 @@ void cv::gpu::meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev) { thro double cv::gpu::norm(const GpuMat& src1, int normType) { throw_nogpu(); return 0.0; } double cv::gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType) { throw_nogpu(); return 0.0; } +void cv::gpu::flip(const GpuMat& a, GpuMat& b, int flipCode) { throw_nogpu(); } + +void cv::gpu::resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx, double fy, int interpolation) { throw_nogpu(); } + +Scalar cv::gpu::sum(const GpuMat& m) { throw_nogpu(); return Scalar(); } + +void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal) { throw_nogpu(); } + +void cv::gpu::copyConstBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value) { throw_nogpu(); } + #else /* !defined (HAVE_CUDA) */ namespace @@ -247,13 +257,157 @@ double cv::gpu::norm(const GpuMat& src1, const GpuMat& src2, int normType) sz.height = src1.rows; int funcIdx = normType >> 1; - Npp64f retVal[3]; + Scalar retVal; npp_norm_diff_func[funcIdx]((const Npp8u*)src1.ptr(), src1.step, (const Npp8u*)src2.ptr(), src2.step, - sz, retVal); + sz, retVal.val); return retVal[0]; } +void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode) +{ + CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4); + + dst.create( src.size(), src.type() ); + + NppiSize sz; + sz.width = src.cols; + sz.height = src.rows; + + if (src.channels() == 1) + { + nppiMirror_8u_C1R((const Npp8u*)src.ptr(), src.step, + (Npp8u*)dst.ptr(), dst.step, sz, + (flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS))); + } + else + { + nppiMirror_8u_C4R((const Npp8u*)src.ptr(), src.step, + (Npp8u*)dst.ptr(), dst.step, sz, + (flipCode == 0 ? NPP_HORIZONTAL_AXIS : (flipCode > 0 ? NPP_VERTICAL_AXIS : NPP_BOTH_AXIS))); + } +} + +void cv::gpu::resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx, double fy, int interpolation) +{ + static const int npp_inter[] = {NPPI_INTER_NN, NPPI_INTER_LINEAR, NPPI_INTER_CUBIC, 0, NPPI_INTER_LANCZOS}; + + CV_Assert((src.type() == CV_8UC1 || src.type() == CV_8UC4) && + (interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC || interpolation == INTER_LANCZOS4)); + + CV_Assert( src.size().area() > 0 ); + CV_Assert( !(dsize == Size()) || (fx > 0 && fy > 0) ); + if( dsize == Size() ) + { + dsize = Size(saturate_cast(src.cols * fx), saturate_cast(src.rows * fy)); + } + else + { + fx = (double)dsize.width / src.cols; + fy = (double)dsize.height / src.rows; + } + dst.create(dsize, src.type()); + + NppiSize srcsz; + srcsz.width = src.cols; + srcsz.height = src.rows; + NppiRect srcrect; + srcrect.x = srcrect.y = 0; + srcrect.width = src.cols; + srcrect.height = src.rows; + NppiSize dstsz; + dstsz.width = dst.cols; + dstsz.height = dst.rows; + + if (src.channels() == 1) + { + nppiResize_8u_C1R((const Npp8u*)src.ptr(), srcsz, src.step, srcrect, + (Npp8u*)dst.ptr(), dst.step, dstsz, fx, fy, npp_inter[interpolation]); + } + else + { + nppiResize_8u_C4R((const Npp8u*)src.ptr(), srcsz, src.step, srcrect, + (Npp8u*)dst.ptr(), dst.step, dstsz, fx, fy, npp_inter[interpolation]); + } +} + +Scalar cv::gpu::sum(const GpuMat& src) +{ + CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4); + + Scalar res; + + NppiSize sz; + sz.width = src.cols; + sz.height = src.rows; + + if (src.channels() == 1) + { + nppiSum_8u_C1R((const Npp8u*)src.ptr(), src.step, sz, res.val); + } + else + { + nppiSum_8u_C4R((const Npp8u*)src.ptr(), src.step, sz, res.val); + } + + return res; +} + +void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal) +{ + CV_Assert(src.type() == CV_8UC1); + + NppiSize sz; + sz.width = src.cols; + sz.height = src.rows; + + Npp8u min_res, max_res; + + nppiMinMax_8u_C1R((const Npp8u*)src.ptr(), src.step, sz, &min_res, &max_res); + + if (minVal) + *minVal = min_res; + + if (maxVal) + *maxVal = max_res; +} + +void cv::gpu::copyConstBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value) +{ + CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1); + + dst.create(src.rows + top + bottom, src.cols + left + right, src.type()); + + NppiSize srcsz; + srcsz.width = src.cols; + srcsz.height = src.rows; + NppiSize dstsz; + dstsz.width = dst.cols; + dstsz.height = dst.rows; + + if (src.depth() == CV_8U) + { + if (src.channels() == 1) + { + Npp8u nVal = (Npp8u)value[0]; + nppiCopyConstBorder_8u_C1R((const Npp8u*)src.ptr(), src.step, srcsz, + (Npp8u*)dst.ptr(), dst.step, dstsz, top, left, nVal); + } + else + { + Npp8u nVal[] = {(Npp8u)value[0], (Npp8u)value[1], (Npp8u)value[2], (Npp8u)value[3]}; + nppiCopyConstBorder_8u_C4R((const Npp8u*)src.ptr(), src.step, srcsz, + (Npp8u*)dst.ptr(), dst.step, dstsz, top, left, nVal); + } + } + else //if (src.depth() == CV_32S) + { + Npp32s nVal = (Npp32s)value[0]; + nppiCopyConstBorder_32s_C1R((const Npp32s*)src.ptr(), src.step, srcsz, + (Npp32s*)dst.ptr(), dst.step, dstsz, top, left, nVal); + } +} + #endif /* !defined (HAVE_CUDA) */ \ No newline at end of file diff --git a/modules/gpu/src/matrix_operations.cpp b/modules/gpu/src/matrix_operations.cpp index 97c1f10a7..4a2964f8c 100644 --- a/modules/gpu/src/matrix_operations.cpp +++ b/modules/gpu/src/matrix_operations.cpp @@ -162,7 +162,49 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask) CV_DbgAssert(!this->empty()); if (mask.empty()) - matrix_operations::set_to_without_mask( *this, depth(), s.val, channels()); + { + switch (type()) + { + case CV_8UC1: + { + NppiSize sz; + sz.width = cols; + sz.height = rows; + Npp8u nVal = (Npp8u)s[0]; + nppiSet_8u_C1R(nVal, (Npp8u*)ptr(), step, sz); + break; + } + case CV_8UC4: + { + NppiSize sz; + sz.width = cols; + sz.height = rows; + Npp8u nVal[] = {(Npp8u)s[0], (Npp8u)s[1], (Npp8u)s[2], (Npp8u)s[3]}; + nppiSet_8u_C4R(nVal, (Npp8u*)ptr(), step, sz); + break; + } + case CV_32SC1: + { + NppiSize sz; + sz.width = cols; + sz.height = rows; + Npp32s nVal = (Npp32s)s[0]; + nppiSet_32s_C1R(nVal, (Npp32s*)ptr(), step, sz); + break; + } + case CV_32FC1: + { + NppiSize sz; + sz.width = cols; + sz.height = rows; + Npp32f nVal = (Npp32f)s[0]; + nppiSet_32f_C1R(nVal, (Npp32f*)ptr(), step, sz); + break; + } + default: + matrix_operations::set_to_without_mask( *this, depth(), s.val, channels()); + } + } else matrix_operations::set_to_with_mask( *this, depth(), s.val, mask, channels()); diff --git a/tests/gpu/src/npp_image_arithm.cpp b/tests/gpu/src/npp_image_arithm.cpp index ca65df22e..2ce5d72d0 100644 --- a/tests/gpu/src/npp_image_arithm.cpp +++ b/tests/gpu/src/npp_image_arithm.cpp @@ -40,6 +40,8 @@ //M*/ #include +#include +#include #include "gputest.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" @@ -59,10 +61,13 @@ protected: int test8UC1(const Mat& cpu1, const Mat& cpu2); int test8UC4(const Mat& cpu1, const Mat& cpu2); + int test32SC1(const Mat& cpu1, const Mat& cpu2); int test32FC1(const Mat& cpu1, const Mat& cpu2); virtual int test(const Mat& cpu1, const Mat& cpu2) = 0; int CheckNorm(const Mat& m1, const Mat& m2); + int CheckNorm(const Scalar& s1, const Scalar& s2); + int CheckNorm(double d1, double d2); }; CV_GpuNppImageArithmTest::CV_GpuNppImageArithmTest(const char* test_name, const char* test_funcs): CvTest(test_name, test_funcs) @@ -91,6 +96,19 @@ int CV_GpuNppImageArithmTest::test8UC4(const Mat& cpu1, const Mat& cpu2) return test(imgL_C4, imgR_C4); } +int CV_GpuNppImageArithmTest::test32SC1( const Mat& cpu1, const Mat& cpu2 ) +{ + cv::Mat imgL_C1; + cv::Mat imgR_C1; + cvtColor(cpu1, imgL_C1, CV_BGR2GRAY); + cvtColor(cpu2, imgR_C1, CV_BGR2GRAY); + + imgL_C1.convertTo(imgL_C1, CV_32S); + imgR_C1.convertTo(imgR_C1, CV_32S); + + return test(imgL_C1, imgR_C1); +} + int CV_GpuNppImageArithmTest::test32FC1( const Mat& cpu1, const Mat& cpu2 ) { cv::Mat imgL_C1; @@ -106,9 +124,31 @@ int CV_GpuNppImageArithmTest::test32FC1( const Mat& cpu1, const Mat& cpu2 ) int CV_GpuNppImageArithmTest::CheckNorm(const Mat& m1, const Mat& m2) { - double ret = norm(m1, m2); + double ret = norm(m1, m2, NORM_INF); - if (ret < 1.0) + if (ret < std::numeric_limits::epsilon()) + { + return CvTS::OK; + } + else + { + ts->printf(CvTS::LOG, "\nNorm: %f\n", ret); + return CvTS::FAIL_GENERIC; + } +} + +int CV_GpuNppImageArithmTest::CheckNorm(const Scalar& s1, const Scalar& s2) +{ + double ret0 = CheckNorm(s1[0], s2[0]), ret1 = CheckNorm(s1[1], s2[1]), ret2 = CheckNorm(s1[2], s2[2]), ret3 = CheckNorm(s1[3], s2[3]); + + return (ret0 == CvTS::OK && ret1 == CvTS::OK && ret2 == CvTS::OK && ret3 == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC; +} + +int CV_GpuNppImageArithmTest::CheckNorm(double d1, double d2) +{ + double ret = ::fabs(d1 - d2); + + if (ret < std::numeric_limits::epsilon()) { return CvTS::OK; } @@ -122,8 +162,11 @@ int CV_GpuNppImageArithmTest::CheckNorm(const Mat& m1, const Mat& m2) void CV_GpuNppImageArithmTest::run( int ) { //load images - cv::Mat img_l = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-L.png"); - cv::Mat img_r = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-R.png"); + //cv::Mat img_l = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-L.png"); + //cv::Mat img_r = cv::imread(std::string(ts->get_data_path()) + "stereobm/aloe-R.png"); + + cv::Mat img_l = cv::imread(std::string(ts->get_data_path()) + "stereobp/aloe-L.png"); + cv::Mat img_r = cv::imread(std::string(ts->get_data_path()) + "stereobp/aloe-R.png"); if (img_l.empty() || img_r.empty()) { @@ -146,6 +189,13 @@ void CV_GpuNppImageArithmTest::run( int ) return; } + testResult = test32SC1(img_l, img_r); + if (testResult != CvTS::OK) + { + ts->set_failed_test_info(testResult); + return; + } + testResult = test32FC1(img_l, img_r); if (testResult != CvTS::OK) { @@ -173,6 +223,9 @@ CV_GpuNppImageAddTest::CV_GpuNppImageAddTest(): CV_GpuNppImageArithmTest( "GPU-N int CV_GpuNppImageAddTest::test( const Mat& cpu1, const Mat& cpu2 ) { + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1) + return CvTS::OK; + cv::Mat cpuRes; cv::add(cpu1, cpu2, cpuRes); @@ -203,6 +256,9 @@ CV_GpuNppImageSubtractTest::CV_GpuNppImageSubtractTest(): CV_GpuNppImageArithmTe int CV_GpuNppImageSubtractTest::test( const Mat& cpu1, const Mat& cpu2 ) { + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1) + return CvTS::OK; + cv::Mat cpuRes; cv::subtract(cpu1, cpu2, cpuRes); @@ -233,6 +289,9 @@ CV_GpuNppImageMultiplyTest::CV_GpuNppImageMultiplyTest(): CV_GpuNppImageArithmTe int CV_GpuNppImageMultiplyTest::test( const Mat& cpu1, const Mat& cpu2 ) { + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1) + return CvTS::OK; + cv::Mat cpuRes; cv::multiply(cpu1, cpu2, cpuRes); @@ -263,6 +322,9 @@ CV_GpuNppImageDivideTest::CV_GpuNppImageDivideTest(): CV_GpuNppImageArithmTest( int CV_GpuNppImageDivideTest::test( const Mat& cpu1, const Mat& cpu2 ) { + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32FC1) + return CvTS::OK; + cv::Mat cpuRes; cv::divide(cpu1, cpu2, cpuRes); @@ -293,7 +355,7 @@ CV_GpuNppImageTransposeTest::CV_GpuNppImageTransposeTest(): CV_GpuNppImageArithm int CV_GpuNppImageTransposeTest::test( const Mat& cpu1, const Mat& ) { - if (!((cpu1.depth() == CV_8U) && cpu1.channels() == 1)) + if (cpu1.type() != CV_8UC1) return CvTS::OK; cv::Mat cpuRes; @@ -325,7 +387,7 @@ CV_GpuNppImageAbsdiffTest::CV_GpuNppImageAbsdiffTest(): CV_GpuNppImageArithmTest int CV_GpuNppImageAbsdiffTest::test( const Mat& cpu1, const Mat& cpu2 ) { - if (!((cpu1.depth() == CV_8U || cpu1.depth() == CV_32F) && cpu1.channels() == 1)) + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_32FC1) return CvTS::OK; cv::Mat cpuRes; @@ -358,7 +420,7 @@ CV_GpuNppImageThresholdTest::CV_GpuNppImageThresholdTest(): CV_GpuNppImageArithm int CV_GpuNppImageThresholdTest::test( const Mat& cpu1, const Mat& ) { - if (!((cpu1.depth() == CV_32F) && cpu1.channels() == 1)) + if (cpu1.type() != CV_32FC1) return CvTS::OK; const double thresh = 0.5; @@ -438,7 +500,7 @@ int CV_GpuNppImageMeanStdDevTest::test( const Mat& cpu1, const Mat& ) Scalar gpustddev; cv::gpu::meanStdDev(gpu1, gpumean, gpustddev); - return (cpumean == gpumean && cpustddev == gpustddev) ? CvTS::OK : CvTS::FAIL_GENERIC; + return (CheckNorm(cpumean, gpumean) == CvTS::OK && CheckNorm(cpustddev, gpustddev) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC; } CV_GpuNppImageMeanStdDevTest CV_GpuNppImageMeanStdDev_test; @@ -473,7 +535,184 @@ int CV_GpuNppImageNormTest::test( const Mat& cpu1, const Mat& cpu2 ) double gpu_norm_L1 = cv::gpu::norm(gpu1, gpu2, NORM_L1); double gpu_norm_L2 = cv::gpu::norm(gpu1, gpu2, NORM_L2); - return (cpu_norm_inf == gpu_norm_inf && cpu_norm_L1 == gpu_norm_L1 && cpu_norm_L2 == gpu_norm_L2) ? CvTS::OK : CvTS::FAIL_GENERIC; + return (CheckNorm(cpu_norm_inf, gpu_norm_inf) == CvTS::OK + && CheckNorm(cpu_norm_L1, gpu_norm_L1) == CvTS::OK + && CheckNorm(cpu_norm_L2, gpu_norm_L2) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC; } -CV_GpuNppImageNormTest CV_GpuNppImageNorm_test; \ No newline at end of file +CV_GpuNppImageNormTest CV_GpuNppImageNorm_test; + +//////////////////////////////////////////////////////////////////////////////// +// flip +class CV_GpuNppImageFlipTest : public CV_GpuNppImageArithmTest +{ +public: + CV_GpuNppImageFlipTest(); + +protected: + virtual int test(const Mat& cpu1, const Mat& cpu2); +}; + +CV_GpuNppImageFlipTest::CV_GpuNppImageFlipTest(): CV_GpuNppImageArithmTest( "GPU-NppImageFlip", "flip" ) +{ +} + +int CV_GpuNppImageFlipTest::test( const Mat& cpu1, const Mat& ) +{ + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4) + return CvTS::OK; + + Mat cpux, cpuy, cpub; + cv::flip(cpu1, cpux, 0); + cv::flip(cpu1, cpuy, 1); + cv::flip(cpu1, cpub, -1); + + GpuMat gpu1(cpu1); + GpuMat gpux, gpuy, gpub; + cv::gpu::flip(gpu1, gpux, 0); + cv::gpu::flip(gpu1, gpuy, 1); + cv::gpu::flip(gpu1, gpub, -1); + + return (CheckNorm(cpux, gpux) == CvTS::OK && + CheckNorm(cpuy, gpuy) == CvTS::OK && + CheckNorm(cpub, gpub) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC; +} + +CV_GpuNppImageFlipTest CV_GpuNppImageFlip_test; + +//////////////////////////////////////////////////////////////////////////////// +// resize +class CV_GpuNppImageResizeTest : public CV_GpuNppImageArithmTest +{ +public: + CV_GpuNppImageResizeTest(); + +protected: + virtual int test(const Mat& cpu1, const Mat& cpu2); +}; + +CV_GpuNppImageResizeTest::CV_GpuNppImageResizeTest(): CV_GpuNppImageArithmTest( "GPU-NppImageResize", "resize" ) +{ +} + +int CV_GpuNppImageResizeTest::test( const Mat& cpu1, const Mat& ) +{ + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4) + return CvTS::OK; + + Mat cpunn, cpulin, cpucub, cpulanc; + cv::resize(cpu1, cpunn, Size(), 0.5, 0.5, INTER_NEAREST); + cv::resize(cpu1, cpulin, Size(), 0.5, 0.5, INTER_LINEAR); + cv::resize(cpu1, cpucub, Size(), 0.5, 0.5, INTER_CUBIC); + cv::resize(cpu1, cpulanc, Size(), 0.5, 0.5, INTER_LANCZOS4); + + GpuMat gpu1(cpu1); + GpuMat gpunn, gpulin, gpucub, gpulanc; + cv::gpu::resize(gpu1, gpunn, Size(), 0.5, 0.5, INTER_NEAREST); + cv::gpu::resize(gpu1, gpulin, Size(), 0.5, 0.5, INTER_LINEAR); + cv::gpu::resize(gpu1, gpucub, Size(), 0.5, 0.5, INTER_CUBIC); + cv::gpu::resize(gpu1, gpulanc, Size(), 0.5, 0.5, INTER_LANCZOS4); + + int nnres =CheckNorm(cpunn, gpunn); + int linres = CheckNorm(cpulin, gpulin); + int cubres = CheckNorm(cpucub, gpucub); + int lancres = CheckNorm(cpulanc, gpulanc); + + return (nnres == CvTS::OK && linres == CvTS::OK && cubres == CvTS::OK && lancres == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC; +} + +CV_GpuNppImageResizeTest CV_GpuNppImageResize_test; + +//////////////////////////////////////////////////////////////////////////////// +// sum +class CV_GpuNppImageSumTest : public CV_GpuNppImageArithmTest +{ +public: + CV_GpuNppImageSumTest(); + +protected: + virtual int test(const Mat& cpu1, const Mat& cpu2); +}; + +CV_GpuNppImageSumTest::CV_GpuNppImageSumTest(): CV_GpuNppImageArithmTest( "GPU-NppImageSum", "sum" ) +{ +} + +int CV_GpuNppImageSumTest::test( const Mat& cpu1, const Mat& ) +{ + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4) + return CvTS::OK; + + Scalar cpures = cv::sum(cpu1); + + GpuMat gpu1(cpu1); + Scalar gpures = cv::gpu::sum(gpu1); + + return CheckNorm(cpures, gpures); +} + +CV_GpuNppImageSumTest CV_GpuNppImageSum_test; + +//////////////////////////////////////////////////////////////////////////////// +// minNax +class CV_GpuNppImageMinNaxTest : public CV_GpuNppImageArithmTest +{ +public: + CV_GpuNppImageMinNaxTest(); + +protected: + virtual int test(const Mat& cpu1, const Mat& cpu2); +}; + +CV_GpuNppImageMinNaxTest::CV_GpuNppImageMinNaxTest(): CV_GpuNppImageArithmTest( "GPU-NppImageMinNax", "minNax" ) +{ +} + +int CV_GpuNppImageMinNaxTest::test( const Mat& cpu1, const Mat& ) +{ + if (cpu1.type() != CV_8UC1) + return CvTS::OK; + + double cpumin, cpumax; + cv::minMaxLoc(cpu1, &cpumin, &cpumax); + + GpuMat gpu1(cpu1); + double gpumin, gpumax; + cv::gpu::minMax(gpu1, &gpumin, &gpumax); + + return (CheckNorm(cpumin, gpumin) == CvTS::OK && CheckNorm(cpumax, gpumax) == CvTS::OK) ? CvTS::OK : CvTS::FAIL_GENERIC; +} + +CV_GpuNppImageMinNaxTest CV_GpuNppImageMinNax_test; + +//////////////////////////////////////////////////////////////////////////////// +// copyConstBorder +class CV_GpuNppImageCopyConstBorderTest : public CV_GpuNppImageArithmTest +{ +public: + CV_GpuNppImageCopyConstBorderTest(); + +protected: + virtual int test(const Mat& cpu1, const Mat& cpu2); +}; + +CV_GpuNppImageCopyConstBorderTest::CV_GpuNppImageCopyConstBorderTest(): CV_GpuNppImageArithmTest( "GPU-NppImageCopyConstBorder", "copyConstBorder" ) +{ +} + +int CV_GpuNppImageCopyConstBorderTest::test( const Mat& cpu1, const Mat& ) +{ + if (cpu1.type() != CV_8UC1 && cpu1.type() != CV_8UC4 && cpu1.type() != CV_32SC1) + return CvTS::OK; + + Mat cpudst; + cv::copyMakeBorder(cpu1, cpudst, 5, 5, 5, 5, BORDER_CONSTANT); + + GpuMat gpu1(cpu1); + GpuMat gpudst; + cv::gpu::copyConstBorder(gpu1, gpudst, 5, 5, 5, 5); + + return CheckNorm(cpudst, gpudst); +} + +CV_GpuNppImageCopyConstBorderTest CV_GpuNppImageCopyConstBorder_test; \ No newline at end of file