move CUDA core tests to core module

This commit is contained in:
Vladislav Vinogradov
2014-12-19 17:07:44 +03:00
parent 53862687d5
commit 1be1a28920
6 changed files with 97 additions and 207 deletions

View File

@@ -0,0 +1,196 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../perf_precomp.hpp"
#ifdef HAVE_CUDA
#include "opencv2/core/cuda.hpp"
#include "opencv2/ts/cuda_perf.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// SetTo
PERF_TEST_P(Sz_Depth_Cn, CUDA_GpuMat_SetTo,
Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int channels = GET_PARAM(2);
const int type = CV_MAKE_TYPE(depth, channels);
const cv::Scalar val(1, 2, 3, 4);
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat dst(size, type);
TEST_CYCLE() dst.setTo(val);
}
else
{
cv::Mat dst(size, type);
TEST_CYCLE() dst.setTo(val);
}
SANITY_CHECK_NOTHING();
}
//////////////////////////////////////////////////////////////////////
// SetToMasked
PERF_TEST_P(Sz_Depth_Cn, CUDA_GpuMat_SetToMasked,
Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int channels = GET_PARAM(2);
const int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src(size, type);
cv::Mat mask(size, CV_8UC1);
declare.in(src, mask, WARMUP_RNG);
const cv::Scalar val(1, 2, 3, 4);
if (PERF_RUN_CUDA())
{
cv::cuda::GpuMat dst(src);
const cv::cuda::GpuMat d_mask(mask);
TEST_CYCLE() dst.setTo(val, d_mask);
}
else
{
cv::Mat dst = src;
TEST_CYCLE() dst.setTo(val, mask);
}
SANITY_CHECK_NOTHING();
}
//////////////////////////////////////////////////////////////////////
// CopyToMasked
PERF_TEST_P(Sz_Depth_Cn, CUDA_GpuMat_CopyToMasked,
Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
CUDA_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int channels = GET_PARAM(2);
const int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src(size, type);
cv::Mat mask(size, CV_8UC1);
declare.in(src, mask, WARMUP_RNG);
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
const cv::cuda::GpuMat d_mask(mask);
cv::cuda::GpuMat dst(d_src.size(), d_src.type(), cv::Scalar::all(0));
TEST_CYCLE() d_src.copyTo(dst, d_mask);
}
else
{
cv::Mat dst(src.size(), src.type(), cv::Scalar::all(0));
TEST_CYCLE() src.copyTo(dst, mask);
}
SANITY_CHECK_NOTHING();
}
//////////////////////////////////////////////////////////////////////
// ConvertTo
DEF_PARAM_TEST(Sz_2Depth, cv::Size, MatDepth, MatDepth);
PERF_TEST_P(Sz_2Depth, CUDA_GpuMat_ConvertTo,
Combine(CUDA_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F, CV_64F),
Values(CV_8U, CV_16U, CV_32F, CV_64F)))
{
const cv::Size size = GET_PARAM(0);
const int depth1 = GET_PARAM(1);
const int depth2 = GET_PARAM(2);
cv::Mat src(size, depth1);
declare.in(src, WARMUP_RNG);
const double a = 0.5;
const double b = 1.0;
if (PERF_RUN_CUDA())
{
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() d_src.convertTo(dst, depth2, a, b);
}
else
{
cv::Mat dst;
TEST_CYCLE() src.convertTo(dst, depth2, a, b);
}
SANITY_CHECK_NOTHING();
}
#endif

View File

@@ -0,0 +1,119 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#ifdef HAVE_CUDA
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/ts/cuda_test.hpp"
using namespace testing;
using namespace cv;
using namespace cv::cuda;
struct BufferPoolTest : TestWithParam<DeviceInfo>
{
void RunSimpleTest(Stream& stream, HostMem& dst_1, HostMem& dst_2)
{
BufferPool pool(stream);
{
GpuMat buf0 = pool.getBuffer(Size(640, 480), CV_8UC1);
EXPECT_FALSE( buf0.empty() );
buf0.setTo(Scalar::all(0), stream);
GpuMat buf1 = pool.getBuffer(Size(640, 480), CV_8UC1);
EXPECT_FALSE( buf1.empty() );
buf0.convertTo(buf1, buf1.type(), 1.0, 1.0, stream);
buf1.download(dst_1, stream);
}
{
GpuMat buf2 = pool.getBuffer(Size(1280, 1024), CV_32SC1);
EXPECT_FALSE( buf2.empty() );
buf2.setTo(Scalar::all(2), stream);
buf2.download(dst_2, stream);
}
}
void CheckSimpleTest(HostMem& dst_1, HostMem& dst_2)
{
EXPECT_MAT_NEAR(Mat(Size(640, 480), CV_8UC1, Scalar::all(1)), dst_1, 0.0);
EXPECT_MAT_NEAR(Mat(Size(1280, 1024), CV_32SC1, Scalar::all(2)), dst_2, 0.0);
}
};
CUDA_TEST_P(BufferPoolTest, FromNullStream)
{
HostMem dst_1, dst_2;
RunSimpleTest(Stream::Null(), dst_1, dst_2);
CheckSimpleTest(dst_1, dst_2);
}
CUDA_TEST_P(BufferPoolTest, From2Streams)
{
HostMem dst1_1, dst1_2;
HostMem dst2_1, dst2_2;
Stream stream1, stream2;
RunSimpleTest(stream1, dst1_1, dst1_2);
RunSimpleTest(stream2, dst2_1, dst2_2);
stream1.waitForCompletion();
stream2.waitForCompletion();
CheckSimpleTest(dst1_1, dst1_2);
CheckSimpleTest(dst2_1, dst2_2);
}
INSTANTIATE_TEST_CASE_P(CUDA_Stream, BufferPoolTest, ALL_DEVICES);
#endif // HAVE_CUDA

View File

@@ -0,0 +1,364 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#ifdef HAVE_CUDA
#include "opencv2/core/cuda.hpp"
#include "opencv2/ts/cuda_test.hpp"
using namespace cvtest;
////////////////////////////////////////////////////////////////////////////////
// SetTo
PARAM_TEST_CASE(GpuMat_SetTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::cuda::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::cuda::setDevice(devInfo.deviceID());
}
};
CUDA_TEST_P(GpuMat_SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(zero);
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
}
CUDA_TEST_P(GpuMat_SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
{
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
CUDA_TEST_P(GpuMat_SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
{
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
CUDA_TEST_P(GpuMat_SetTo, Masked)
{
cv::Scalar val = randomScalar(0.0, 255.0);
cv::Mat mat_gold = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val, loadMat(mask));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
{
cv::cuda::GpuMat mat = loadMat(mat_gold, useRoi);
mat.setTo(val, loadMat(mask, useRoi));
mat_gold.setTo(val, mask);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_SetTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// CopyTo
PARAM_TEST_CASE(GpuMat_CopyTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::cuda::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::cuda::setDevice(devInfo.deviceID());
}
};
CUDA_TEST_P(GpuMat_CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = createMat(size, type, useRoi);
d_src.copyTo(dst);
EXPECT_MAT_NEAR(src, dst, 0.0);
}
CUDA_TEST_P(GpuMat_CopyTo, Masked)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::cuda::GpuMat d_src = loadMat(src);
cv::cuda::GpuMat dst;
d_src.copyTo(dst, loadMat(mask, useRoi));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
{
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
d_src.copyTo(dst, loadMat(mask, useRoi));
cv::Mat dst_gold = cv::Mat::zeros(size, type);
src.copyTo(dst_gold, mask);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_CopyTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// ConvertTo
PARAM_TEST_CASE(GpuMat_ConvertTo, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
{
cv::cuda::DeviceInfo devInfo;
cv::Size size;
int depth1;
int depth2;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth1 = GET_PARAM(2);
depth2 = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::cuda::setDevice(devInfo.deviceID());
}
};
CUDA_TEST_P(GpuMat_ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::cuda::GpuMat d_src = loadMat(src);
cv::cuda::GpuMat dst;
d_src.convertTo(dst, depth2);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
{
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2);
EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
}
}
CUDA_TEST_P(GpuMat_ConvertTo, WithScaling)
{
cv::Mat src = randomMat(size, depth1);
double a = randomDouble(0.0, 1.0);
double b = randomDouble(-10.0, 10.0);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::cuda::GpuMat d_src = loadMat(src);
cv::cuda::GpuMat dst;
d_src.convertTo(dst, depth2, a, b);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
}
}
else
{
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2, a, b);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2, a, b);
EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
}
}
INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_ConvertTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
ALL_DEPTH,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// ensureSizeIsEnough
struct EnsureSizeIsEnough : testing::TestWithParam<cv::cuda::DeviceInfo>
{
virtual void SetUp()
{
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
}
};
CUDA_TEST_P(EnsureSizeIsEnough, BufferReuse)
{
cv::cuda::GpuMat buffer(100, 100, CV_8U);
cv::cuda::GpuMat old = buffer;
// don't reallocate memory
cv::cuda::ensureSizeIsEnough(10, 20, CV_8U, buffer);
EXPECT_EQ(10, buffer.rows);
EXPECT_EQ(20, buffer.cols);
EXPECT_EQ(CV_8UC1, buffer.type());
EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
// don't reallocate memory
cv::cuda::ensureSizeIsEnough(20, 30, CV_8U, buffer);
EXPECT_EQ(20, buffer.rows);
EXPECT_EQ(30, buffer.cols);
EXPECT_EQ(CV_8UC1, buffer.type());
EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
}
INSTANTIATE_TEST_CASE_P(CUDA, EnsureSizeIsEnough, ALL_DEVICES);
#endif // HAVE_CUDA

View File

@@ -0,0 +1,456 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#if defined(HAVE_CUDA) && defined(HAVE_OPENGL)
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/opengl.hpp"
#include "opencv2/ts/cuda_test.hpp"
using namespace cvtest;
/////////////////////////////////////////////
// Buffer
PARAM_TEST_CASE(Buffer, cv::Size, MatType)
{
static void SetUpTestCase()
{
cv::namedWindow("test", cv::WINDOW_OPENGL);
}
static void TearDownTestCase()
{
cv::destroyAllWindows();
}
cv::Size size;
int type;
virtual void SetUp()
{
size = GET_PARAM(0);
type = GET_PARAM(1);
}
};
CUDA_TEST_P(Buffer, Constructor1)
{
cv::ogl::Buffer buf(size.height, size.width, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(size.height, buf.rows());
EXPECT_EQ(size.width, buf.cols());
EXPECT_EQ(type, buf.type());
}
CUDA_TEST_P(Buffer, Constructor2)
{
cv::ogl::Buffer buf(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(size.height, buf.rows());
EXPECT_EQ(size.width, buf.cols());
EXPECT_EQ(type, buf.type());
}
CUDA_TEST_P(Buffer, ConstructorFromMat)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData;
buf.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, ConstructorFromGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Buffer buf(d_gold, cv::ogl::Buffer::ARRAY_BUFFER);
cv::Mat bufData;
buf.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, ConstructorFromBuffer)
{
cv::ogl::Buffer buf_gold(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::ogl::Buffer buf(buf_gold);
EXPECT_EQ(buf_gold.bufId(), buf.bufId());
EXPECT_EQ(buf_gold.rows(), buf.rows());
EXPECT_EQ(buf_gold.cols(), buf.cols());
EXPECT_EQ(buf_gold.type(), buf.type());
}
CUDA_TEST_P(Buffer, Create)
{
cv::ogl::Buffer buf;
buf.create(size.height, size.width, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_EQ(size.height, buf.rows());
EXPECT_EQ(size.width, buf.cols());
EXPECT_EQ(type, buf.type());
}
CUDA_TEST_P(Buffer, CopyFromMat)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf;
buf.copyFrom(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData;
buf.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, CopyFromGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Buffer buf;
buf.copyFrom(d_gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat bufData;
buf.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, CopyFromBuffer)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::ogl::Buffer buf;
buf.copyFrom(buf_gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_NE(buf_gold.bufId(), buf.bufId());
cv::Mat bufData;
buf.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, CopyToGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::cuda::GpuMat dst;
buf.copyTo(dst);
EXPECT_MAT_NEAR(gold, dst, 0);
}
CUDA_TEST_P(Buffer, CopyToBuffer)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::ogl::Buffer dst;
buf.copyTo(dst);
dst.setAutoRelease(true);
EXPECT_NE(buf.bufId(), dst.bufId());
cv::Mat bufData;
dst.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, Clone)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::ogl::Buffer dst = buf.clone(cv::ogl::Buffer::ARRAY_BUFFER, true);
EXPECT_NE(buf.bufId(), dst.bufId());
cv::Mat bufData;
dst.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, MapHostRead)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat dst = buf.mapHost(cv::ogl::Buffer::READ_ONLY);
EXPECT_MAT_NEAR(gold, dst, 0);
buf.unmapHost();
}
CUDA_TEST_P(Buffer, MapHostWrite)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(size, type, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::Mat dst = buf.mapHost(cv::ogl::Buffer::WRITE_ONLY);
gold.copyTo(dst);
buf.unmapHost();
dst.release();
cv::Mat bufData;
buf.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 0);
}
CUDA_TEST_P(Buffer, MapDevice)
{
cv::Mat gold = randomMat(size, type);
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::cuda::GpuMat dst = buf.mapDevice();
EXPECT_MAT_NEAR(gold, dst, 0);
buf.unmapDevice();
}
INSTANTIATE_TEST_CASE_P(OpenGL, Buffer, testing::Combine(DIFFERENT_SIZES, ALL_TYPES));
/////////////////////////////////////////////
// Texture2D
PARAM_TEST_CASE(Texture2D, cv::Size, MatType)
{
static void SetUpTestCase()
{
cv::namedWindow("test", cv::WINDOW_OPENGL);
}
static void TearDownTestCase()
{
cv::destroyAllWindows();
}
cv::Size size;
int type;
int depth;
int cn;
cv::ogl::Texture2D::Format format;
virtual void SetUp()
{
size = GET_PARAM(0);
type = GET_PARAM(1);
depth = CV_MAT_DEPTH(type);
cn = CV_MAT_CN(type);
format = cn == 1 ? cv::ogl::Texture2D::DEPTH_COMPONENT : cn == 3 ? cv::ogl::Texture2D::RGB : cn == 4 ? cv::ogl::Texture2D::RGBA : cv::ogl::Texture2D::NONE;
}
};
CUDA_TEST_P(Texture2D, Constructor1)
{
cv::ogl::Texture2D tex(size.height, size.width, format, true);
EXPECT_EQ(size.height, tex.rows());
EXPECT_EQ(size.width, tex.cols());
EXPECT_EQ(format, tex.format());
}
CUDA_TEST_P(Texture2D, Constructor2)
{
cv::ogl::Texture2D tex(size, format, true);
EXPECT_EQ(size.height, tex.rows());
EXPECT_EQ(size.width, tex.cols());
EXPECT_EQ(format, tex.format());
}
CUDA_TEST_P(Texture2D, ConstructorFromMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Texture2D tex(gold, true);
cv::Mat texData;
tex.copyTo(texData, depth);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
CUDA_TEST_P(Texture2D, ConstructorFromGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Texture2D tex(d_gold, true);
cv::Mat texData;
tex.copyTo(texData, depth);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
CUDA_TEST_P(Texture2D, ConstructorFromBuffer)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true);
cv::ogl::Texture2D tex(buf_gold, true);
cv::Mat texData;
tex.copyTo(texData, depth);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
CUDA_TEST_P(Texture2D, ConstructorFromTexture2D)
{
cv::ogl::Texture2D tex_gold(size, format, true);
cv::ogl::Texture2D tex(tex_gold);
EXPECT_EQ(tex_gold.texId(), tex.texId());
EXPECT_EQ(tex_gold.rows(), tex.rows());
EXPECT_EQ(tex_gold.cols(), tex.cols());
EXPECT_EQ(tex_gold.format(), tex.format());
}
CUDA_TEST_P(Texture2D, Create)
{
cv::ogl::Texture2D tex;
tex.create(size.height, size.width, format, true);
EXPECT_EQ(size.height, tex.rows());
EXPECT_EQ(size.width, tex.cols());
EXPECT_EQ(format, tex.format());
}
CUDA_TEST_P(Texture2D, CopyFromMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Texture2D tex;
tex.copyFrom(gold, true);
cv::Mat texData;
tex.copyTo(texData, depth);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
CUDA_TEST_P(Texture2D, CopyFromGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Texture2D tex;
tex.copyFrom(d_gold, true);
cv::Mat texData;
tex.copyTo(texData, depth);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
CUDA_TEST_P(Texture2D, CopyFromBuffer)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Buffer buf_gold(gold, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true);
cv::ogl::Texture2D tex;
tex.copyFrom(buf_gold, true);
cv::Mat texData;
tex.copyTo(texData, depth);
EXPECT_MAT_NEAR(gold, texData, 1e-2);
}
CUDA_TEST_P(Texture2D, CopyToGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Texture2D tex(gold, true);
cv::cuda::GpuMat dst;
tex.copyTo(dst, depth);
EXPECT_MAT_NEAR(gold, dst, 1e-2);
}
CUDA_TEST_P(Texture2D, CopyToBuffer)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::ogl::Texture2D tex(gold, true);
cv::ogl::Buffer dst;
tex.copyTo(dst, depth, true);
cv::Mat bufData;
dst.copyTo(bufData);
EXPECT_MAT_NEAR(gold, bufData, 1e-2);
}
INSTANTIATE_TEST_CASE_P(OpenGL, Texture2D, testing::Combine(DIFFERENT_SIZES, testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4)));
#endif

View File

@@ -0,0 +1,134 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../test_precomp.hpp"
#ifdef HAVE_CUDA
#include <cuda_runtime.h>
#include "opencv2/core/cuda.hpp"
#include "opencv2/ts/cuda_test.hpp"
using namespace cvtest;
struct Async : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::cuda::HostMem src;
cv::cuda::GpuMat d_src;
cv::cuda::HostMem dst;
cv::cuda::GpuMat d_dst;
virtual void SetUp()
{
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
src = cv::cuda::HostMem(cv::cuda::HostMem::PAGE_LOCKED);
cv::Mat m = randomMat(cv::Size(128, 128), CV_8UC1);
m.copyTo(src);
}
};
void checkMemSet(int status, void* userData)
{
ASSERT_EQ(cudaSuccess, status);
Async* test = reinterpret_cast<Async*>(userData);
cv::cuda::HostMem src = test->src;
cv::cuda::HostMem dst = test->dst;
cv::Mat dst_gold = cv::Mat::zeros(src.size(), src.type());
ASSERT_MAT_NEAR(dst_gold, dst, 0);
}
CUDA_TEST_P(Async, MemSet)
{
cv::cuda::Stream stream;
d_dst.upload(src);
d_dst.setTo(cv::Scalar::all(0), stream);
d_dst.download(dst, stream);
Async* test = this;
stream.enqueueHostCallback(checkMemSet, test);
stream.waitForCompletion();
}
void checkConvert(int status, void* userData)
{
ASSERT_EQ(cudaSuccess, status);
Async* test = reinterpret_cast<Async*>(userData);
cv::cuda::HostMem src = test->src;
cv::cuda::HostMem dst = test->dst;
cv::Mat dst_gold;
src.createMatHeader().convertTo(dst_gold, CV_32S);
ASSERT_MAT_NEAR(dst_gold, dst, 0);
}
CUDA_TEST_P(Async, Convert)
{
cv::cuda::Stream stream;
d_src.upload(src, stream);
d_src.convertTo(d_dst, CV_32S, stream);
d_dst.download(dst, stream);
Async* test = this;
stream.enqueueHostCallback(checkConvert, test);
stream.waitForCompletion();
}
INSTANTIATE_TEST_CASE_P(CUDA_Stream, Async, ALL_DEVICES);
#endif // HAVE_CUDA