the code was refactored and old test system code was removed
This commit is contained in:
parent
4c28a6f0f6
commit
d02ccc9590
@ -42,153 +42,54 @@
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
static int old_main(int argc, const char *argv[])
|
||||
const char * impls[] =
|
||||
{
|
||||
const char *keys =
|
||||
"{ h | help | false | print help message }"
|
||||
"{ f | filter | | filter for test }"
|
||||
"{ w | workdir | | set working directory }"
|
||||
"{ l | list | false | show all tests }"
|
||||
"{ d | device | 0 | device id }"
|
||||
"{ c | cpu_ocl | false | use cpu as ocl device}"
|
||||
"{ i | iters | 10 | iteration count }"
|
||||
"{ m | warmup | 1 | gpu warm up iteration count}"
|
||||
"{ t | xtop | 1.1 | xfactor top boundary}"
|
||||
"{ b | xbottom | 0.9 | xfactor bottom boundary}"
|
||||
"{ v | verify | false | only run gpu once to verify if problems occur}";
|
||||
IMPL_OCL,
|
||||
IMPL_PLAIN,
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
IMPL_GPU
|
||||
#endif
|
||||
};
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
const char * keys =
|
||||
"{ h | help | false | print help message }"
|
||||
"{ t | type | gpu | set device type:cpu or gpu}"
|
||||
"{ p | platform | 0 | set platform id }"
|
||||
"{ d | device | 0 | set device id }";
|
||||
|
||||
redirectError(cvErrorCallback);
|
||||
CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.get<bool>("help"))
|
||||
{
|
||||
cout << "Avaible options:" << endl;
|
||||
cout << "Available options besides google test option:" << endl;
|
||||
cmd.printParams();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// get ocl devices
|
||||
bool use_cpu = cmd.get<bool>("c");
|
||||
vector<ocl::Info> oclinfo;
|
||||
int num_devices = 0;
|
||||
if(use_cpu)
|
||||
num_devices = getDevice(oclinfo, ocl::CVCL_DEVICE_TYPE_CPU);
|
||||
else
|
||||
num_devices = getDevice(oclinfo);
|
||||
if (num_devices < 1)
|
||||
{
|
||||
cerr << "no device found\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// show device info
|
||||
int devidx = 0;
|
||||
for (size_t i = 0; i < oclinfo.size(); i++)
|
||||
{
|
||||
for (size_t j = 0; j < oclinfo[i].DeviceName.size(); j++)
|
||||
{
|
||||
cout << "device " << devidx++ << ": " << oclinfo[i].DeviceName[j] << endl;
|
||||
}
|
||||
}
|
||||
|
||||
string type = cmd.get<string>("type");
|
||||
unsigned int pid = cmd.get<unsigned int>("platform");
|
||||
int device = cmd.get<int>("device");
|
||||
if (device < 0 || device >= num_devices)
|
||||
|
||||
int flag = type == "cpu" ? cv::ocl::CVCL_DEVICE_TYPE_CPU :
|
||||
cv::ocl::CVCL_DEVICE_TYPE_GPU;
|
||||
|
||||
std::vector<cv::ocl::Info> oclinfo;
|
||||
int devnums = cv::ocl::getDevice(oclinfo, flag);
|
||||
if (devnums <= device || device < 0)
|
||||
{
|
||||
cerr << "Invalid device ID" << endl;
|
||||
std::cout << "device invalid\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// set this to overwrite binary cache every time the test starts
|
||||
ocl::setBinaryDiskCache(ocl::CACHE_UPDATE);
|
||||
|
||||
if (cmd.get<bool>("verify"))
|
||||
if (pid >= oclinfo.size())
|
||||
{
|
||||
TestSystem::instance().setNumIters(1);
|
||||
TestSystem::instance().setGPUWarmupIters(0);
|
||||
TestSystem::instance().setCPUIters(0);
|
||||
std::cout << "platform invalid\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
devidx = 0;
|
||||
for (size_t i = 0; i < oclinfo.size(); i++)
|
||||
{
|
||||
for (size_t j = 0; j < oclinfo[i].DeviceName.size(); j++, devidx++)
|
||||
{
|
||||
if (device == devidx)
|
||||
{
|
||||
ocl::setDevice(oclinfo[i], (int)j);
|
||||
TestSystem::instance().setRecordName(oclinfo[i].DeviceName[j]);
|
||||
cout << "use " << devidx << ": " <<oclinfo[i].DeviceName[j] << endl;
|
||||
goto END_DEV;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
END_DEV:
|
||||
|
||||
string filter = cmd.get<string>("filter");
|
||||
string workdir = cmd.get<string>("workdir");
|
||||
bool list = cmd.get<bool>("list");
|
||||
int iters = cmd.get<int>("iters");
|
||||
int wu_iters = cmd.get<int>("warmup");
|
||||
double x_top = cmd.get<double>("xtop");
|
||||
double x_bottom = cmd.get<double>("xbottom");
|
||||
|
||||
TestSystem::instance().setTopThreshold(x_top);
|
||||
TestSystem::instance().setBottomThreshold(x_bottom);
|
||||
|
||||
if (!filter.empty())
|
||||
{
|
||||
TestSystem::instance().setTestFilter(filter);
|
||||
}
|
||||
|
||||
if (!workdir.empty())
|
||||
{
|
||||
if (workdir[workdir.size() - 1] != '/' && workdir[workdir.size() - 1] != '\\')
|
||||
{
|
||||
workdir += '/';
|
||||
}
|
||||
|
||||
TestSystem::instance().setWorkingDir(workdir);
|
||||
}
|
||||
|
||||
if (list)
|
||||
{
|
||||
TestSystem::instance().setListMode(true);
|
||||
}
|
||||
|
||||
TestSystem::instance().setNumIters(iters);
|
||||
TestSystem::instance().setGPUWarmupIters(wu_iters);
|
||||
|
||||
TestSystem::instance().run();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char * impls[] =
|
||||
{
|
||||
"ocl",
|
||||
"plain",
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
"gpu"
|
||||
#endif
|
||||
};
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// temp solution: if no '--gtest_' and '--perf_' args switch to old behavior
|
||||
bool useGTest = false;
|
||||
|
||||
for(int i=1; i<argc; i++)
|
||||
{
|
||||
std::string arg( argv[i] );
|
||||
if( arg.find("--gtest_")==0 || arg.find("--perf_")==0 )
|
||||
useGTest = true;
|
||||
|
||||
// if (arg == "--perf_verify_sanity")
|
||||
// argv[i] = (char*)"--perf_no_verify_sanity";
|
||||
}
|
||||
|
||||
if( !useGTest )
|
||||
return old_main(argc, (const char**)argv);
|
||||
cv::ocl::setDevice(oclinfo[pid], device);
|
||||
cv::ocl::setBinaryDiskCache(cv::ocl::CACHE_UPDATE);
|
||||
|
||||
CV_PERF_TEST_MAIN_INTERNALS(ocl, impls)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -82,12 +82,9 @@ typedef TestBaseWithParam<Size> blendLinearFixture;
|
||||
|
||||
PERF_TEST_P(blendLinearFixture, blendLinear, OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const int type = CV_8UC1;
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src1(srcSize, type), src2(srcSize, CV_8UC1), dst;
|
||||
Mat weights1(srcSize, CV_32FC1), weights2(srcSize, CV_32FC1);
|
||||
|
||||
@ -95,8 +92,7 @@ PERF_TEST_P(blendLinearFixture, blendLinear, OCL_TYPICAL_MAT_SIZES)
|
||||
randu(weights1, 0.0f, 1.0f);
|
||||
randu(weights2, 0.0f, 1.0f);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc1(src1), oclSrc2(src2), oclDst;
|
||||
ocl::oclMat oclWeights1(weights1), oclWeights2(weights2);
|
||||
@ -107,16 +103,12 @@ PERF_TEST_P(blendLinearFixture, blendLinear, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() blendLinearGold<uchar>(src1, src2, weights1, weights2, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -53,11 +53,10 @@ using namespace perf;
|
||||
|
||||
typedef TestBaseWithParam<Size> BruteForceMatcherFixture;
|
||||
|
||||
PERF_TEST_P(BruteForceMatcherFixture, match,
|
||||
OCL_BFMATCHER_TYPICAL_MAT_SIZES)
|
||||
PERF_TEST_P(BruteForceMatcherFixture, DISABLED_match,
|
||||
OCL_BFMATCHER_TYPICAL_MAT_SIZES) // TODO too big difference between implementations
|
||||
{
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
vector<DMatch> matches;
|
||||
Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
|
||||
@ -65,16 +64,15 @@ PERF_TEST_P(BruteForceMatcherFixture, match,
|
||||
randu(query, 0.0f, 1.0f);
|
||||
randu(train, 0.0f, 1.0f);
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
BFMatcher matcher(NORM_L2);
|
||||
TEST_CYCLE() matcher.match(query, train, matches);
|
||||
|
||||
SANITY_CHECK_MATCHES(matches);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
// Init GPU matcher
|
||||
ocl::BruteForceMatcher_OCL_base oclMatcher(ocl::BruteForceMatcher_OCL_base::L2Dist);
|
||||
ocl::oclMat oclQuery(query), oclTrain(train);
|
||||
|
||||
@ -82,53 +80,14 @@ PERF_TEST_P(BruteForceMatcherFixture, match,
|
||||
|
||||
SANITY_CHECK_MATCHES(matches);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
//PERF_TEST_P(BruteForceMatcherFixture, matchSingle,
|
||||
// OCL_BFMATCHER_TYPICAL_MAT_SIZES)
|
||||
//{
|
||||
// const Size srcSize = GetParam();
|
||||
// const string impl = getSelectedImpl();
|
||||
|
||||
// Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
|
||||
// Mat trainIdx, distance;
|
||||
|
||||
// randu(query, 0.0f, 1.0f);
|
||||
// randu(train, 0.0f, 1.0f);
|
||||
|
||||
// if (impl == "plain")
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
// else if (impl == "ocl")
|
||||
// {
|
||||
// ocl::oclMat oclQuery(query), oclTrain(train), oclTrainIdx, oclDistance;
|
||||
|
||||
// TEST_CYCLE() oclMatcher->matchSingle(oclQuery, oclTrain, oclTrainIdx, oclDistance);
|
||||
|
||||
// oclTrainIdx.download(trainIdx);
|
||||
// oclDistance.download(distance);
|
||||
|
||||
// SANITY_CHECK(trainIdx);
|
||||
// SANITY_CHECK(distance);
|
||||
// }
|
||||
//#ifdef HAVE_OPENCV_GPU
|
||||
// else if (impl == "gpu")
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
//#endif
|
||||
// else
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
//}
|
||||
|
||||
PERF_TEST_P(BruteForceMatcherFixture, knnMatch,
|
||||
OCL_BFMATCHER_TYPICAL_MAT_SIZES)
|
||||
PERF_TEST_P(BruteForceMatcherFixture, DISABLED_knnMatch,
|
||||
OCL_BFMATCHER_TYPICAL_MAT_SIZES) // TODO too many outliers
|
||||
{
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
vector<vector<DMatch> > matches(2);
|
||||
Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
|
||||
@ -139,7 +98,7 @@ PERF_TEST_P(BruteForceMatcherFixture, knnMatch,
|
||||
if (srcSize.height == 2000)
|
||||
declare.time(8);
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
BFMatcher matcher (NORM_L2);
|
||||
TEST_CYCLE() matcher.knnMatch(query, train, matches, 2);
|
||||
@ -148,7 +107,7 @@ PERF_TEST_P(BruteForceMatcherFixture, knnMatch,
|
||||
SANITY_CHECK_MATCHES(matches0);
|
||||
SANITY_CHECK_MATCHES(matches1);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::BruteForceMatcher_OCL_base oclMatcher(ocl::BruteForceMatcher_OCL_base::L2Dist);
|
||||
ocl::oclMat oclQuery(query), oclTrain(train);
|
||||
@ -159,55 +118,14 @@ PERF_TEST_P(BruteForceMatcherFixture, knnMatch,
|
||||
SANITY_CHECK_MATCHES(matches0);
|
||||
SANITY_CHECK_MATCHES(matches1);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
//PERF_TEST_P(BruteForceMatcherFixture, knnMatchSingle,
|
||||
// OCL_BFMATCHER_TYPICAL_MAT_SIZES)
|
||||
//{
|
||||
// const Size srcSize = GetParam();
|
||||
// const string impl = getSelectedImpl();
|
||||
|
||||
// Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
|
||||
// Mat trainIdx, distance, allDist;
|
||||
|
||||
// randu(query, 0.0f, 1.0f);
|
||||
// randu(train, 0.0f, 1.0f);
|
||||
|
||||
// if (impl == "plain")
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
// else if (impl == "ocl")
|
||||
// {
|
||||
// ocl::oclMat oclQuery(query), oclTrain(train), oclTrainIdx, oclDistance, oclAllDist;
|
||||
|
||||
// TEST_CYCLE() oclMatcher->knnMatchSingle(oclQuery, oclTrain, oclTrainIdx, oclDistance, oclAllDist, 2);
|
||||
|
||||
// oclTrainIdx.download(trainIdx);
|
||||
// oclDistance.download(distance);
|
||||
// oclAllDist.download(allDist);
|
||||
|
||||
// SANITY_CHECK(trainIdx);
|
||||
// SANITY_CHECK(distance);
|
||||
// SANITY_CHECK(allDist);
|
||||
// }
|
||||
//#ifdef HAVE_OPENCV_GPU
|
||||
// else if (impl == "gpu")
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
//#endif
|
||||
// else
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
//}
|
||||
|
||||
PERF_TEST_P(BruteForceMatcherFixture, DISABLED_radiusMatch,
|
||||
OCL_BFMATCHER_TYPICAL_MAT_SIZES)
|
||||
OCL_BFMATCHER_TYPICAL_MAT_SIZES) // TODO too many outliers
|
||||
{
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
const float max_distance = 2.0f;
|
||||
vector<vector<DMatch> > matches(2);
|
||||
@ -218,7 +136,7 @@ PERF_TEST_P(BruteForceMatcherFixture, DISABLED_radiusMatch,
|
||||
randu(query, 0.0f, 1.0f);
|
||||
randu(train, 0.0f, 1.0f);
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
BFMatcher matcher (NORM_L2);
|
||||
TEST_CYCLE() matcher.radiusMatch(query, matches, max_distance);
|
||||
@ -227,7 +145,7 @@ PERF_TEST_P(BruteForceMatcherFixture, DISABLED_radiusMatch,
|
||||
SANITY_CHECK_MATCHES(matches0);
|
||||
SANITY_CHECK_MATCHES(matches1);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclQuery(query), oclTrain(train);
|
||||
ocl::BruteForceMatcher_OCL_base oclMatcher(ocl::BruteForceMatcher_OCL_base::L2Dist);
|
||||
@ -238,49 +156,8 @@ PERF_TEST_P(BruteForceMatcherFixture, DISABLED_radiusMatch,
|
||||
SANITY_CHECK_MATCHES(matches0);
|
||||
SANITY_CHECK_MATCHES(matches1);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
//PERF_TEST_P(BruteForceMatcherFixture, radiusMatchSingle,
|
||||
// OCL_BFMATCHER_TYPICAL_MAT_SIZES)
|
||||
//{
|
||||
// const Size srcSize = GetParam();
|
||||
// const string impl = getSelectedImpl();
|
||||
|
||||
// const float max_distance = 2.0f;
|
||||
// Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
|
||||
// Mat trainIdx, distance, nMatches;
|
||||
|
||||
// randu(query, 0.0f, 1.0f);
|
||||
// randu(train, 0.0f, 1.0f);
|
||||
|
||||
// if (impl == "plain")
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
// else if (impl == "ocl")
|
||||
// {
|
||||
// ocl::oclMat oclQuery(query), oclTrain(train), oclTrainIdx, oclDistance, oclNMatches;
|
||||
|
||||
// TEST_CYCLE() oclMatcher->radiusMatchSingle(oclQuery, oclTrain, oclTrainIdx, oclDistance, oclNMatches, max_distance);
|
||||
|
||||
// oclTrainIdx.download(trainIdx);
|
||||
// oclDistance.download(distance);
|
||||
// oclNMatches.download(nMatches);
|
||||
|
||||
// SANITY_CHECK(trainIdx);
|
||||
// SANITY_CHECK(distance);
|
||||
// SANITY_CHECK(nMatches);
|
||||
// }
|
||||
//#ifdef HAVE_OPENCV_GPU
|
||||
// else if (impl == "gpu")
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
//#endif
|
||||
// else
|
||||
// CV_TEST_FAIL_NO_IMPL();
|
||||
//}
|
||||
|
||||
#undef OCL_BFMATCHER_TYPICAL_MAT_SIZES
|
||||
|
@ -48,7 +48,7 @@
|
||||
|
||||
///////////// StereoMatchBM ////////////////////////
|
||||
|
||||
PERF_TEST(StereoMatchBMFixture, DISABLED_StereoMatchBM)
|
||||
PERF_TEST(StereoMatchBMFixture, DISABLED_StereoMatchBM) // TODO doesn't work properly
|
||||
{
|
||||
Mat left_image = imread(getDataPath("gpu/stereobm/aloe-L.png"), cv::IMREAD_GRAYSCALE);
|
||||
Mat right_image = imread(getDataPath("gpu/stereobm/aloe-R.png"), cv::IMREAD_GRAYSCALE);
|
||||
@ -58,13 +58,12 @@ PERF_TEST(StereoMatchBMFixture, DISABLED_StereoMatchBM)
|
||||
ASSERT_TRUE(right_image.size() == left_image.size());
|
||||
ASSERT_TRUE(right_image.size() == left_image.size());
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
const int n_disp = 128, winSize = 19;
|
||||
Mat disp(left_image.size(), CV_16SC1);
|
||||
|
||||
declare.in(left_image, right_image).out(disp);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclLeft(left_image), oclRight(right_image),
|
||||
oclDisp(left_image.size(), CV_16SC1);
|
||||
@ -76,7 +75,7 @@ PERF_TEST(StereoMatchBMFixture, DISABLED_StereoMatchBM)
|
||||
|
||||
SANITY_CHECK(disp);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
StereoBM bm(0, n_disp, winSize);
|
||||
|
||||
@ -84,10 +83,6 @@ PERF_TEST(StereoMatchBMFixture, DISABLED_StereoMatchBM)
|
||||
|
||||
SANITY_CHECK(disp);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
else
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -49,34 +49,29 @@ using namespace perf;
|
||||
|
||||
///////////// Canny ////////////////////////
|
||||
|
||||
PERF_TEST(CannyFixture, Canny)
|
||||
PERF_TEST(CannyFixture, DISABLED_Canny) // TODO difference between implmentations
|
||||
{
|
||||
Mat img = imread(getDataPath("gpu/stereobm/aloe-L.png"), cv::IMREAD_GRAYSCALE),
|
||||
edges(img.size(), CV_8UC1);
|
||||
ASSERT_TRUE(!img.empty()) << "can't open aloeL.jpg";
|
||||
ASSERT_TRUE(!img.empty()) << "can't open aloe-L.png";
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
declare.in(img).out(edges);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclImg(img), oclEdges(img.size(), CV_8UC1);
|
||||
|
||||
TEST_CYCLE() Canny(oclImg, oclEdges, 50.0, 100.0);
|
||||
TEST_CYCLE() ocl::Canny(oclImg, oclEdges, 50.0, 100.0);
|
||||
oclEdges.download(edges);
|
||||
|
||||
SANITY_CHECK(edges);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() Canny(img, edges, 50.0, 100.0);
|
||||
|
||||
SANITY_CHECK(edges);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -54,12 +54,11 @@ typedef TestBaseWithParam<Size> cvtColorFixture;
|
||||
PERF_TEST_P(cvtColorFixture, cvtColor, OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
const Size srcSize = GetParam();
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, CV_8UC4), dst(srcSize, CV_8UC4);
|
||||
declare.in(src).out(dst);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(src.size(), CV_8UC4);
|
||||
|
||||
@ -68,16 +67,12 @@ PERF_TEST_P(cvtColorFixture, cvtColor, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::cvtColor(src, dst, CV_RGBA2GRAY, 4);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -51,37 +51,33 @@ using namespace perf;
|
||||
|
||||
typedef TestBaseWithParam<Size> dftFixture;
|
||||
|
||||
PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES)
|
||||
PERF_TEST_P(dftFixture, DISABLED_dft, OCL_TYPICAL_MAT_SIZES) // TODO not implemented
|
||||
{
|
||||
const std::string impl = getSelectedImpl();
|
||||
Size srcSize = GetParam();
|
||||
const Size srcSize = GetParam();
|
||||
|
||||
Mat src(srcSize, CV_32FC2), dst;
|
||||
randu(src, 0.0f, 1.0f);
|
||||
declare.in(src);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (srcSize == OCL_SIZE_4000)
|
||||
declare.time(7.4);
|
||||
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst;
|
||||
|
||||
EXPECT_NO_THROW({
|
||||
TEST_CYCLE() cv::ocl::dft(oclSrc, oclDst);
|
||||
});
|
||||
TEST_CYCLE() cv::ocl::dft(oclSrc, oclDst);
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::dft(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -51,29 +51,23 @@ using std::tr1::tuple;
|
||||
|
||||
///////////// Blur////////////////////////
|
||||
|
||||
CV_ENUM(BlurMatType, CV_8UC1, CV_8UC4)
|
||||
|
||||
typedef tuple<Size, BlurMatType> BlurParams;
|
||||
typedef TestBaseWithParam<BlurParams> BlurFixture;
|
||||
typedef Size_MatType BlurFixture;
|
||||
|
||||
PERF_TEST_P(BlurFixture, Blur,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
BlurMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
BlurParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params), ksize(3, 3);
|
||||
const int type = get<1>(params), bordertype = BORDER_CONSTANT;
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
if (srcSize == OCL_SIZE_4000 && type == CV_8UC4)
|
||||
declare.time(5);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -83,44 +77,35 @@ PERF_TEST_P(BlurFixture, Blur,
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::blur(src, dst, ksize, Point(-1, -1), bordertype);
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// Laplacian////////////////////////
|
||||
|
||||
typedef BlurMatType LaplacianMatType;
|
||||
typedef tuple<Size, LaplacianMatType> LaplacianParams;
|
||||
typedef TestBaseWithParam<LaplacianParams> LaplacianFixture;
|
||||
typedef Size_MatType LaplacianFixture;
|
||||
|
||||
PERF_TEST_P(LaplacianFixture, Laplacian,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
LaplacianMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
LaplacianParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), ksize = 3;
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
if (srcSize == OCL_SIZE_4000 && type == CV_8UC4)
|
||||
declare.time(6);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -130,46 +115,36 @@ PERF_TEST_P(LaplacianFixture, Laplacian,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::Laplacian(src, dst, -1, ksize, 1);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// Erode ////////////////////
|
||||
|
||||
CV_ENUM(ErodeMatType, CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4)
|
||||
|
||||
typedef tuple<Size, ErodeMatType> ErodeParams;
|
||||
typedef TestBaseWithParam<ErodeParams> ErodeFixture;
|
||||
typedef Size_MatType ErodeFixture;
|
||||
|
||||
PERF_TEST_P(ErodeFixture, Erode,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
ErodeMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4)))
|
||||
{
|
||||
// getting params
|
||||
ErodeParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), ksize = 3;
|
||||
const Mat ker = getStructuringElement(MORPH_RECT, Size(ksize, ksize));
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst).in(ker);
|
||||
|
||||
if (srcSize == OCL_SIZE_4000 && type == CV_8UC4)
|
||||
declare.time(5);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type), oclKer(ker);
|
||||
|
||||
@ -179,37 +154,28 @@ PERF_TEST_P(ErodeFixture, Erode,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::erode(src, dst, ker);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// Sobel ////////////////////////
|
||||
|
||||
typedef BlurMatType SobelMatType;
|
||||
typedef tuple<Size, SobelMatType> SobelMatParams;
|
||||
typedef TestBaseWithParam<SobelMatParams> SobelFixture;
|
||||
typedef Size_MatType SobelFixture;
|
||||
|
||||
PERF_TEST_P(SobelFixture, Sobel,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
SobelMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
SobelMatParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), dx = 1, dy = 1;
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
@ -219,7 +185,7 @@ PERF_TEST_P(SobelFixture, Sobel,
|
||||
else if (srcSize == OCL_SIZE_4000 && type == CV_8UC4)
|
||||
declare.time(20);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -229,37 +195,28 @@ PERF_TEST_P(SobelFixture, Sobel,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::Sobel(src, dst, -1, dx, dy);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// Scharr ////////////////////////
|
||||
|
||||
typedef BlurMatType ScharrMatType;
|
||||
typedef tuple<Size, ScharrMatType> ScharrParams;
|
||||
typedef TestBaseWithParam<ScharrParams> ScharrFixture;
|
||||
typedef Size_MatType ScharrFixture;
|
||||
|
||||
PERF_TEST_P(ScharrFixture, Scharr,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
ScharrMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
ScharrParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), dx = 1, dy = 0;
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
@ -269,7 +226,7 @@ PERF_TEST_P(ScharrFixture, Scharr,
|
||||
else if (srcSize == OCL_SIZE_4000 && type == CV_8UC4)
|
||||
declare.time(21);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -279,43 +236,34 @@ PERF_TEST_P(ScharrFixture, Scharr,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::Scharr(src, dst, -1, dx, dy);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// GaussianBlur ////////////////////////
|
||||
|
||||
typedef ErodeMatType GaussianBlurMatType;
|
||||
typedef tuple<Size, GaussianBlurMatType> GaussianBlurParams;
|
||||
typedef TestBaseWithParam<GaussianBlurParams> GaussianBlurFixture;
|
||||
typedef Size_MatType GaussianBlurFixture;
|
||||
|
||||
PERF_TEST_P(GaussianBlurFixture, GaussianBlur,
|
||||
::testing::Combine(::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000),
|
||||
GaussianBlurMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4, CV_32FC1, CV_32FC4)))
|
||||
{
|
||||
// getting params
|
||||
GaussianBlurParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), ksize = 7;
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
const double eps = src.depth() == CV_8U ? 1 + DBL_EPSILON : 3e-4;
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -325,37 +273,28 @@ PERF_TEST_P(GaussianBlurFixture, GaussianBlur,
|
||||
|
||||
SANITY_CHECK(dst, eps);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::GaussianBlur(src, dst, Size(ksize, ksize), 0);
|
||||
|
||||
SANITY_CHECK(dst, eps);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// filter2D////////////////////////
|
||||
|
||||
typedef BlurMatType filter2DMatType;
|
||||
typedef tuple<Size, filter2DMatType> filter2DParams;
|
||||
typedef TestBaseWithParam<filter2DParams> filter2DFixture;
|
||||
typedef Size_MatType filter2DFixture;
|
||||
|
||||
PERF_TEST_P(filter2DFixture, filter2D,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
filter2DMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
filter2DParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), ksize = 3;
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src(srcSize, type), dst(srcSize, type), kernel(ksize, ksize, CV_32SC1);
|
||||
declare.in(src, WARMUP_RNG).in(kernel).out(dst);
|
||||
randu(kernel, -3.0, 3.0);
|
||||
@ -363,7 +302,7 @@ PERF_TEST_P(filter2DFixture, filter2D,
|
||||
if (srcSize == OCL_SIZE_4000 && type == CV_8UC4)
|
||||
declare.time(8);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type), oclKernel(kernel);
|
||||
|
||||
@ -373,16 +312,12 @@ PERF_TEST_P(filter2DFixture, filter2D,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::filter2D(src, dst, -1, kernel);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -51,11 +51,9 @@ using namespace perf;
|
||||
|
||||
typedef TestBaseWithParam<Size> gemmFixture;
|
||||
|
||||
PERF_TEST_P(gemmFixture, DISABLED_gemm, OCL_TYPICAL_MAT_SIZES)
|
||||
PERF_TEST_P(gemmFixture, DISABLED_gemm, OCL_TYPICAL_MAT_SIZES) // TODO not implemented
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
Mat src1(srcSize, CV_32FC1), src2(srcSize, CV_32FC1),
|
||||
src3(srcSize, CV_32FC1), dst(srcSize, CV_32FC1);
|
||||
@ -64,7 +62,7 @@ PERF_TEST_P(gemmFixture, DISABLED_gemm, OCL_TYPICAL_MAT_SIZES)
|
||||
randu(src2, -10.0f, 10.0f);
|
||||
randu(src3, -10.0f, 10.0f);
|
||||
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc1(src1), oclSrc2(src2),
|
||||
oclSrc3(src3), oclDst(srcSize, CV_32FC1);
|
||||
@ -75,16 +73,12 @@ PERF_TEST_P(gemmFixture, DISABLED_gemm, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::gemm(src1, src2, 1.0, src3, 1.0, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -60,22 +60,22 @@ PERF_TEST_P(GoodFeaturesToTrackFixture, GoodFeaturesToTrack,
|
||||
string("gpu/stereobm/aloe-L.png")),
|
||||
::testing::Range(0.0, 4.0, 3.0)))
|
||||
{
|
||||
std::vector<cv::Point2f> pts_gold;
|
||||
|
||||
// getting params
|
||||
GoodFeaturesToTrackParams param = GetParam();
|
||||
const string fileName = getDataPath(get<0>(param)), impl = getSelectedImpl();
|
||||
const GoodFeaturesToTrackParams param = GetParam();
|
||||
const string fileName = getDataPath(get<0>(param));
|
||||
const int maxCorners = 2000;
|
||||
const double qualityLevel = 0.01, minDistance = get<1>(param);
|
||||
|
||||
Mat frame = imread(fileName, IMREAD_GRAYSCALE);
|
||||
declare.in(frame);
|
||||
ASSERT_TRUE(!frame.empty()) << "no input image";
|
||||
|
||||
if (impl == "ocl")
|
||||
vector<Point2f> pts_gold;
|
||||
declare.in(frame);
|
||||
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclFrame(frame), pts_oclmat;
|
||||
cv::ocl::GoodFeaturesToTrackDetector_OCL detector(maxCorners, qualityLevel, minDistance);
|
||||
ocl::GoodFeaturesToTrackDetector_OCL detector(maxCorners, qualityLevel, minDistance);
|
||||
|
||||
TEST_CYCLE() detector(oclFrame, pts_oclmat);
|
||||
|
||||
@ -83,17 +83,13 @@ PERF_TEST_P(GoodFeaturesToTrackFixture, GoodFeaturesToTrack,
|
||||
|
||||
SANITY_CHECK(pts_gold);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::goodFeaturesToTrack(frame, pts_gold,
|
||||
maxCorners, qualityLevel, minDistance);
|
||||
|
||||
SANITY_CHECK(pts_gold);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -88,14 +88,13 @@ public:
|
||||
|
||||
PERF_TEST(HaarFixture, Haar)
|
||||
{
|
||||
const std::string impl = getSelectedImpl();
|
||||
vector<Rect> faces;
|
||||
|
||||
Mat img = imread(getDataPath("gpu/haarcascade/basketball1.png"), CV_LOAD_IMAGE_GRAYSCALE);
|
||||
ASSERT_TRUE(!img.empty()) << "can't open basketball1.png";
|
||||
declare.in(img);
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
CascadeClassifier faceCascade;
|
||||
ASSERT_TRUE(faceCascade.load(getDataPath("gpu/haarcascade/haarcascade_frontalface_alt.xml")))
|
||||
@ -106,7 +105,7 @@ PERF_TEST(HaarFixture, Haar)
|
||||
|
||||
SANITY_CHECK(faces, 4 + 1e-4);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::CascadeClassifier_GPU faceCascade;
|
||||
ocl::oclMat oclImg(img);
|
||||
@ -119,11 +118,6 @@ PERF_TEST(HaarFixture, Haar)
|
||||
|
||||
SANITY_CHECK(faces, 4 + 1e-4);
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -54,22 +54,21 @@ PERF_TEST(HOGFixture, HOG)
|
||||
Mat src = imread(getDataPath("gpu/hog/road.png"), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_TRUE(!src.empty()) << "can't open input image road.png";
|
||||
|
||||
const std::string impl = getSelectedImpl();
|
||||
std::vector<cv::Rect> found_locations;
|
||||
vector<cv::Rect> found_locations;
|
||||
declare.in(src).time(5);
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
cv::HOGDescriptor hog;
|
||||
HOGDescriptor hog;
|
||||
hog.setSVMDetector(hog.getDefaultPeopleDetector());
|
||||
|
||||
TEST_CYCLE() hog.detectMultiScale(src, found_locations);
|
||||
|
||||
SANITY_CHECK(found_locations, 1 + DBL_EPSILON);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
cv::ocl::HOGDescriptor ocl_hog;
|
||||
ocl::HOGDescriptor ocl_hog;
|
||||
ocl_hog.setSVMDetector(ocl_hog.getDefaultPeopleDetector());
|
||||
ocl::oclMat oclSrc(src);
|
||||
|
||||
@ -77,10 +76,6 @@ PERF_TEST(HOGFixture, HOG)
|
||||
|
||||
SANITY_CHECK(found_locations, 1 + DBL_EPSILON);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -55,16 +55,12 @@ typedef TestBaseWithParam<Size> equalizeHistFixture;
|
||||
|
||||
PERF_TEST_P(equalizeHistFixture, equalizeHist, OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_8UC1), dst(srcSize, CV_8UC1);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, src.type());
|
||||
|
||||
@ -74,45 +70,34 @@ PERF_TEST_P(equalizeHistFixture, equalizeHist, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::equalizeHist(src, dst);
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
/////////// CopyMakeBorder //////////////////////
|
||||
|
||||
CV_ENUM(CopyMakeBorderMatType, CV_8UC1, CV_8UC4)
|
||||
|
||||
typedef tuple<Size, CopyMakeBorderMatType> CopyMakeBorderParams;
|
||||
typedef TestBaseWithParam<CopyMakeBorderParams> CopyMakeBorderFixture;
|
||||
typedef Size_MatType CopyMakeBorderFixture;
|
||||
|
||||
PERF_TEST_P(CopyMakeBorderFixture, CopyMakeBorder,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
CopyMakeBorderMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
CopyMakeBorderParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), borderType = BORDER_CONSTANT;
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst;
|
||||
const Size dstSize = srcSize + Size(12, 12);
|
||||
dst.create(dstSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(dstSize, type);
|
||||
|
||||
@ -122,40 +107,29 @@ PERF_TEST_P(CopyMakeBorderFixture, CopyMakeBorder,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::copyMakeBorder(src, dst, 7, 5, 5, 7, borderType, cv::Scalar(1.0));
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// cornerMinEigenVal ////////////////////////
|
||||
|
||||
CV_ENUM(cornerMinEigenValMatType, CV_8UC1, CV_32FC1)
|
||||
|
||||
typedef tuple<Size, cornerMinEigenValMatType> cornerMinEigenValParams;
|
||||
typedef TestBaseWithParam<cornerMinEigenValParams> cornerMinEigenValFixture;
|
||||
typedef Size_MatType cornerMinEigenValFixture;
|
||||
|
||||
PERF_TEST_P(cornerMinEigenValFixture, cornerMinEigenVal,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
cornerMinEigenValMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_32FC1)))
|
||||
{
|
||||
// getting params
|
||||
cornerMinEigenValParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), borderType = BORDER_REFLECT;
|
||||
const int blockSize = 7, apertureSize = 1 + 2 * 3;
|
||||
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(srcSize, CV_32FC1);
|
||||
declare.in(src, WARMUP_RNG).out(dst)
|
||||
.time(srcSize == OCL_SIZE_4000 ? 20 : srcSize == OCL_SIZE_2000 ? 5 : 3);
|
||||
@ -163,8 +137,7 @@ PERF_TEST_P(cornerMinEigenValFixture, cornerMinEigenVal,
|
||||
const int depth = CV_MAT_DEPTH(type);
|
||||
const ERROR_TYPE errorType = depth == CV_8U ? ERROR_ABSOLUTE : ERROR_RELATIVE;
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, CV_32FC1);
|
||||
|
||||
@ -174,45 +147,34 @@ PERF_TEST_P(cornerMinEigenValFixture, cornerMinEigenVal,
|
||||
|
||||
SANITY_CHECK(dst, 1e-6, errorType);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::cornerMinEigenVal(src, dst, blockSize, apertureSize, borderType);
|
||||
|
||||
SANITY_CHECK(dst, 1e-6, errorType);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// cornerHarris ////////////////////////
|
||||
|
||||
typedef cornerMinEigenValMatType cornerHarrisMatType;
|
||||
typedef tuple<Size, cornerHarrisMatType> cornerHarrisParams;
|
||||
typedef TestBaseWithParam<cornerHarrisParams> cornerHarrisFixture;
|
||||
typedef Size_MatType cornerHarrisFixture;
|
||||
|
||||
PERF_TEST_P(cornerHarrisFixture, cornerHarris,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
cornerHarrisMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_32FC1)))
|
||||
{
|
||||
// getting params
|
||||
cornerHarrisParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), borderType = BORDER_REFLECT;
|
||||
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(srcSize, CV_32FC1);
|
||||
randu(src, 0, 1);
|
||||
declare.in(src).out(dst)
|
||||
.time(srcSize == OCL_SIZE_4000 ? 20 : srcSize == OCL_SIZE_2000 ? 5 : 3);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, CV_32FC1);
|
||||
|
||||
@ -222,36 +184,28 @@ PERF_TEST_P(cornerHarrisFixture, cornerHarris,
|
||||
|
||||
SANITY_CHECK(dst, 3e-5);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::cornerHarris(src, dst, 5, 7, 0.1, borderType);
|
||||
|
||||
SANITY_CHECK(dst, 3e-5);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// integral ////////////////////////
|
||||
|
||||
typedef TestBaseWithParam<Size> integralFixture;
|
||||
|
||||
PERF_TEST_P(integralFixture, DISABLED_integral, OCL_TYPICAL_MAT_SIZES)
|
||||
PERF_TEST_P(integralFixture, DISABLED_integral, OCL_TYPICAL_MAT_SIZES) // TODO does not work properly
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_8UC1), dst;
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst;
|
||||
|
||||
@ -261,29 +215,23 @@ PERF_TEST_P(integralFixture, DISABLED_integral, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::integral(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// WarpAffine ////////////////////////
|
||||
|
||||
typedef CopyMakeBorderMatType WarpAffineMatType;
|
||||
typedef tuple<Size, WarpAffineMatType> WarpAffineParams;
|
||||
typedef TestBaseWithParam<WarpAffineParams> WarpAffineFixture;
|
||||
typedef Size_MatType WarpAffineFixture;
|
||||
|
||||
PERF_TEST_P(WarpAffineFixture, WarpAffine,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
WarpAffineMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
static const double coeffs[2][3] =
|
||||
{
|
||||
@ -293,18 +241,14 @@ PERF_TEST_P(WarpAffineFixture, WarpAffine,
|
||||
Mat M(2, 3, CV_64F, (void *)coeffs);
|
||||
const int interpolation = INTER_NEAREST;
|
||||
|
||||
// getting params
|
||||
WarpAffineParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -314,29 +258,23 @@ PERF_TEST_P(WarpAffineFixture, WarpAffine,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::warpAffine(src, dst, M, srcSize, interpolation);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// WarpPerspective ////////////////////////
|
||||
|
||||
typedef CopyMakeBorderMatType WarpPerspectiveMatType;
|
||||
typedef tuple<Size, WarpPerspectiveMatType> WarpPerspectiveParams;
|
||||
typedef TestBaseWithParam<WarpPerspectiveParams> WarpPerspectiveFixture;
|
||||
typedef Size_MatType WarpPerspectiveFixture;
|
||||
|
||||
PERF_TEST_P(WarpPerspectiveFixture, WarpPerspective,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
WarpPerspectiveMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
static const double coeffs[3][3] =
|
||||
{
|
||||
@ -347,19 +285,15 @@ PERF_TEST_P(WarpPerspectiveFixture, WarpPerspective,
|
||||
Mat M(3, 3, CV_64F, (void *)coeffs);
|
||||
const int interpolation = INTER_LINEAR;
|
||||
|
||||
// getting params
|
||||
WarpPerspectiveParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst)
|
||||
.time(srcSize == OCL_SIZE_4000 ? 18 : srcSize == OCL_SIZE_2000 ? 5 : 2);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -369,42 +303,34 @@ PERF_TEST_P(WarpPerspectiveFixture, WarpPerspective,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::warpPerspective(src, dst, M, srcSize, interpolation);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// resize ////////////////////////
|
||||
|
||||
CV_ENUM(resizeInterType, INTER_NEAREST, INTER_LINEAR)
|
||||
|
||||
typedef CopyMakeBorderMatType resizeMatType;
|
||||
typedef tuple<Size, resizeMatType, resizeInterType, double> resizeParams;
|
||||
typedef tuple<Size, MatType, resizeInterType, double> resizeParams;
|
||||
typedef TestBaseWithParam<resizeParams> resizeFixture;
|
||||
|
||||
PERF_TEST_P(resizeFixture, resize,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
resizeMatType::all(),
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4),
|
||||
resizeInterType::all(),
|
||||
::testing::Values(0.5, 2.0)))
|
||||
{
|
||||
// getting params
|
||||
resizeParams params = GetParam();
|
||||
const resizeParams params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), interType = get<2>(params);
|
||||
double scale = get<3>(params);
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst;
|
||||
const Size dstSize(cvRound(srcSize.width * scale), cvRound(srcSize.height * scale));
|
||||
dst.create(dstSize, type);
|
||||
@ -412,8 +338,7 @@ PERF_TEST_P(resizeFixture, resize,
|
||||
if (interType == INTER_LINEAR && type == CV_8UC4 && OCL_SIZE_4000 == srcSize)
|
||||
declare.time(11);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(dstSize, type);
|
||||
|
||||
@ -423,18 +348,14 @@ PERF_TEST_P(resizeFixture, resize,
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::resize(src, dst, Size(), scale, scale, interType);
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// threshold////////////////////////
|
||||
@ -448,19 +369,15 @@ PERF_TEST_P(ThreshFixture, threshold,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
ThreshType::all()))
|
||||
{
|
||||
// getting params
|
||||
ThreshParams params = GetParam();
|
||||
const ThreshParams params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int threshType = get<1>(params);
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_8U), dst(srcSize, CV_8U);
|
||||
randu(src, 0, 100);
|
||||
declare.in(src).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, CV_8U);
|
||||
|
||||
@ -470,23 +387,19 @@ PERF_TEST_P(ThreshFixture, threshold,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::threshold(src, dst, 50.0, 0.0, threshType);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// meanShiftFiltering////////////////////////
|
||||
|
||||
typedef struct
|
||||
typedef struct _COOR
|
||||
{
|
||||
short x;
|
||||
short y;
|
||||
@ -681,7 +594,6 @@ PERF_TEST_P(meanShiftFilteringFixture, meanShiftFiltering,
|
||||
{
|
||||
const Size srcSize = GetParam();
|
||||
const int sp = 5, sr = 6;
|
||||
const string impl = getSelectedImpl();
|
||||
cv::TermCriteria crit(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 5, 1);
|
||||
|
||||
Mat src(srcSize, CV_8UC4), dst(srcSize, CV_8UC4);
|
||||
@ -689,13 +601,13 @@ PERF_TEST_P(meanShiftFilteringFixture, meanShiftFiltering,
|
||||
.time(srcSize == OCL_SIZE_4000 ?
|
||||
56 : srcSize == OCL_SIZE_2000 ? 15 : 3.8);
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() meanShiftFiltering_(src, dst, sp, sr, crit);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, CV_8UC4);
|
||||
|
||||
@ -705,12 +617,8 @@ PERF_TEST_P(meanShiftFilteringFixture, meanShiftFiltering,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
static void meanShiftProc_(const Mat &src_roi, Mat &dst_roi, Mat &dstCoor_roi, int sp, int sr, cv::TermCriteria crit)
|
||||
@ -778,7 +686,6 @@ PERF_TEST_P(meanShiftProcFixture, meanShiftProc,
|
||||
OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
TermCriteria crit(TermCriteria::COUNT + TermCriteria::EPS, 5, 1);
|
||||
|
||||
Mat src(srcSize, CV_8UC4), dst1(srcSize, CV_8UC4),
|
||||
@ -787,14 +694,14 @@ PERF_TEST_P(meanShiftProcFixture, meanShiftProc,
|
||||
.time(srcSize == OCL_SIZE_4000 ?
|
||||
56 : srcSize == OCL_SIZE_2000 ? 15 : 3.8);;
|
||||
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() meanShiftProc_(src, dst1, dst2, 5, 6, crit);
|
||||
|
||||
SANITY_CHECK(dst1);
|
||||
SANITY_CHECK(dst2);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst1(srcSize, CV_8UC4),
|
||||
oclDst2(srcSize, CV_16SC2);
|
||||
@ -807,34 +714,26 @@ PERF_TEST_P(meanShiftProcFixture, meanShiftProc,
|
||||
SANITY_CHECK(dst1);
|
||||
SANITY_CHECK(dst2);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// remap////////////////////////
|
||||
|
||||
CV_ENUM(RemapInterType, INTER_NEAREST, INTER_LINEAR)
|
||||
|
||||
typedef CopyMakeBorderMatType remapMatType;
|
||||
typedef tuple<Size, remapMatType, RemapInterType> remapParams;
|
||||
typedef tuple<Size, MatType, RemapInterType> remapParams;
|
||||
typedef TestBaseWithParam<remapParams> remapFixture;
|
||||
|
||||
PERF_TEST_P(remapFixture, remap,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
remapMatType::all(),
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4),
|
||||
RemapInterType::all()))
|
||||
{
|
||||
// getting params
|
||||
remapParams params = GetParam();
|
||||
const remapParams params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params), interpolation = get<2>(params);
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
@ -859,8 +758,7 @@ PERF_TEST_P(remapFixture, remap,
|
||||
|
||||
const int borderMode = BORDER_CONSTANT;
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
ocl::oclMat oclXMap(xmap), oclYMap(ymap);
|
||||
@ -871,18 +769,14 @@ PERF_TEST_P(remapFixture, remap,
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::remap(src, dst, xmap, ymap, interpolation, borderMode);
|
||||
|
||||
SANITY_CHECK(dst, 1 + DBL_EPSILON);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// CLAHE ////////////////////////
|
||||
@ -891,11 +785,9 @@ typedef TestBaseWithParam<Size> CLAHEFixture;
|
||||
|
||||
PERF_TEST_P(CLAHEFixture, CLAHE, OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_8UC1), dst;
|
||||
const double clipLimit = 40.0;
|
||||
declare.in(src, WARMUP_RNG);
|
||||
@ -903,8 +795,7 @@ PERF_TEST_P(CLAHEFixture, CLAHE, OCL_TYPICAL_MAT_SIZES)
|
||||
if (srcSize == OCL_SIZE_4000)
|
||||
declare.time(11);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst;
|
||||
cv::Ptr<cv::CLAHE> oclClahe = cv::ocl::createCLAHE(clipLimit);
|
||||
@ -915,19 +806,15 @@ PERF_TEST_P(CLAHEFixture, CLAHE, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(clipLimit);
|
||||
TEST_CYCLE() clahe->apply(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// columnSum////////////////////////
|
||||
@ -946,19 +833,15 @@ static void columnSumPerfTest(const Mat & src, Mat & dst)
|
||||
|
||||
PERF_TEST_P(columnSumFixture, columnSum, OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_32FC1), dst(srcSize, CV_32FC1);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
if (srcSize == OCL_SIZE_4000)
|
||||
declare.time(5);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, CV_32FC1);
|
||||
|
||||
@ -968,16 +851,12 @@ PERF_TEST_P(columnSumFixture, columnSum, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() columnSumPerfTest(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -51,23 +51,16 @@ using std::tr1::get;
|
||||
|
||||
/////////// matchTemplate ////////////////////////
|
||||
|
||||
CV_ENUM(CV_TM_CCORRMatType, CV_32FC1, CV_32FC4)
|
||||
|
||||
typedef tuple<Size, CV_TM_CCORRMatType> CV_TM_CCORRParams;
|
||||
typedef TestBaseWithParam<CV_TM_CCORRParams> CV_TM_CCORRFixture;
|
||||
typedef Size_MatType CV_TM_CCORRFixture;
|
||||
|
||||
PERF_TEST_P(CV_TM_CCORRFixture, matchTemplate,
|
||||
::testing::Combine(::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000),
|
||||
CV_TM_CCORRMatType::all()))
|
||||
OCL_PERF_ENUM(CV_32FC1, CV_32FC4)))
|
||||
{
|
||||
// getting params
|
||||
CV_TM_CCORRParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params), templSize(5, 5);
|
||||
const int type = get<1>(params);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), templ(templSize, type);
|
||||
const Size dstSize(src.cols - templ.cols + 1, src.rows - templ.rows + 1);
|
||||
Mat dst(dstSize, CV_32F);
|
||||
@ -75,8 +68,7 @@ PERF_TEST_P(CV_TM_CCORRFixture, matchTemplate,
|
||||
randu(templ, 0.0f, 1.0f);
|
||||
declare.time(srcSize == OCL_SIZE_2000 ? 20 : 6).in(src, templ).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclTempl(templ), oclDst(dstSize, CV_32F);
|
||||
|
||||
@ -86,37 +78,29 @@ PERF_TEST_P(CV_TM_CCORRFixture, matchTemplate,
|
||||
|
||||
SANITY_CHECK(dst, 1e-4);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR);
|
||||
|
||||
SANITY_CHECK(dst, 1e-4);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
typedef TestBaseWithParam<Size> CV_TM_CCORR_NORMEDFixture;
|
||||
|
||||
PERF_TEST_P(CV_TM_CCORR_NORMEDFixture, matchTemplate, OCL_TYPICAL_MAT_SIZES)
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam(), templSize(5, 5);
|
||||
const std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_8UC1), templ(templSize, CV_8UC1), dst;
|
||||
const Size dstSize(src.cols - templ.cols + 1, src.rows - templ.rows + 1);
|
||||
dst.create(dstSize, CV_8UC1);
|
||||
declare.in(src, templ, WARMUP_RNG).out(dst)
|
||||
.time(srcSize == OCL_SIZE_2000 ? 10 : srcSize == OCL_SIZE_4000 ? 23 : 2);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclTempl(templ), oclDst(dstSize, CV_8UC1);
|
||||
|
||||
@ -126,16 +110,12 @@ PERF_TEST_P(CV_TM_CCORR_NORMEDFixture, matchTemplate, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(dst, 2e-2);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED);
|
||||
|
||||
SANITY_CHECK(dst, 2e-2);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -51,30 +51,22 @@ using std::tr1::get;
|
||||
|
||||
///////////// ConvertTo////////////////////////
|
||||
|
||||
CV_ENUM(ConvertToMatType, CV_8UC1, CV_8UC4)
|
||||
|
||||
typedef tuple<Size, ConvertToMatType> ConvertToParams;
|
||||
typedef TestBaseWithParam<ConvertToParams> ConvertToFixture;
|
||||
typedef Size_MatType ConvertToFixture;
|
||||
|
||||
PERF_TEST_P(ConvertToFixture, ConvertTo,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
ConvertToMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
ConvertToParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst;
|
||||
const int dstType = CV_MAKE_TYPE(CV_32F, src.channels());
|
||||
dst.create(srcSize, dstType);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, dstType);
|
||||
|
||||
@ -84,43 +76,32 @@ PERF_TEST_P(ConvertToFixture, ConvertTo,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() src.convertTo(dst, dstType);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// copyTo////////////////////////
|
||||
|
||||
typedef ConvertToMatType copyToMatType;
|
||||
typedef tuple<Size, copyToMatType> copyToParams;
|
||||
typedef TestBaseWithParam<copyToParams> copyToFixture;
|
||||
typedef Size_MatType copyToFixture;
|
||||
|
||||
PERF_TEST_P(copyToFixture, copyTo,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
copyToMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
copyToParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(srcSize, type);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(srcSize, type);
|
||||
|
||||
@ -130,44 +111,33 @@ PERF_TEST_P(copyToFixture, copyTo,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() src.copyTo(dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// setTo////////////////////////
|
||||
|
||||
typedef ConvertToMatType setToMatType;
|
||||
typedef tuple<Size, setToMatType> setToParams;
|
||||
typedef TestBaseWithParam<setToParams> setToFixture;
|
||||
typedef Size_MatType setToFixture;
|
||||
|
||||
PERF_TEST_P(setToFixture, setTo,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
setToMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
setToParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
const Scalar val(1, 2, 3, 4);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type);
|
||||
declare.in(src);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(srcSize, type);
|
||||
|
||||
@ -176,16 +146,12 @@ PERF_TEST_P(setToFixture, setTo,
|
||||
|
||||
SANITY_CHECK(src);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() src.setTo(val);
|
||||
|
||||
SANITY_CHECK(src);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -52,50 +52,38 @@ using std::tr1::get;
|
||||
|
||||
///////////// Moments ////////////////////////
|
||||
|
||||
CV_ENUM(MomentsMatType, CV_8UC1, CV_16SC1, CV_32FC1, CV_64FC1)
|
||||
|
||||
typedef tuple<Size, MomentsMatType> MomentsParams;
|
||||
typedef TestBaseWithParam<MomentsParams> MomentsFixture;
|
||||
typedef Size_MatType MomentsFixture;
|
||||
|
||||
PERF_TEST_P(MomentsFixture, DISABLED_Moments,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
MomentsMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_16SC1, CV_32FC1, CV_64FC1))) // TODO does not work properly (see below)
|
||||
{
|
||||
// getting params
|
||||
MomentsParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst(7, 1, CV_64F);
|
||||
const bool binaryImage = false;
|
||||
cv::Moments mom;
|
||||
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src);
|
||||
|
||||
TEST_CYCLE() mom = cv::ocl::ocl_moments(oclSrc, binaryImage);
|
||||
TEST_CYCLE() mom = cv::ocl::ocl_moments(oclSrc, binaryImage); // TODO Use oclSrc
|
||||
cv::HuMoments(mom, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() mom = cv::moments(src, binaryImage);
|
||||
cv::HuMoments(mom, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -53,21 +53,18 @@ using std::tr1::get;
|
||||
|
||||
typedef TestBaseWithParam<Size> normFixture;
|
||||
|
||||
PERF_TEST_P(normFixture, DISABLED_norm, OCL_TYPICAL_MAT_SIZES)
|
||||
PERF_TEST_P(normFixture, DISABLED_norm, OCL_TYPICAL_MAT_SIZES) // TODO doesn't work properly
|
||||
{
|
||||
// getting params
|
||||
const Size srcSize = GetParam();
|
||||
const std::string impl = getSelectedImpl();
|
||||
double value = 0.0;
|
||||
|
||||
// creating src data
|
||||
Mat src1(srcSize, CV_8UC1), src2(srcSize, CV_8UC1);
|
||||
declare.in(src1, src2);
|
||||
randu(src1, 0, 1);
|
||||
randu(src2, 0, 1);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc1(src1), oclSrc2(src2);
|
||||
|
||||
@ -75,16 +72,12 @@ PERF_TEST_P(normFixture, DISABLED_norm, OCL_TYPICAL_MAT_SIZES)
|
||||
|
||||
SANITY_CHECK(value);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() value = cv::norm(src1, src2, NORM_INF);
|
||||
|
||||
SANITY_CHECK(value);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -70,25 +70,25 @@ typedef tuple<int, tuple<string, string, LoadMode> > PyrLKOpticalFlowParamType;
|
||||
typedef TestBaseWithParam<PyrLKOpticalFlowParamType> PyrLKOpticalFlowFixture;
|
||||
|
||||
PERF_TEST_P(PyrLKOpticalFlowFixture,
|
||||
PyrLKOpticalFlow,
|
||||
DISABLED_PyrLKOpticalFlow,
|
||||
::testing::Combine(
|
||||
::testing::Values(1000, 2000, 4000),
|
||||
::testing::Values(
|
||||
make_tuple<string, string, LoadMode>
|
||||
(
|
||||
string("gpu/opticalflow/rubberwhale1.png"),
|
||||
string("gpu/opticalflow/rubberwhale1.png"),
|
||||
string("gpu/opticalflow/rubberwhale2.png"),
|
||||
LoadMode(IMREAD_COLOR)
|
||||
)
|
||||
// , make_tuple<string, string, LoadMode>
|
||||
// (
|
||||
// string("gpu/stereobm/aloe-L.png"),
|
||||
// string("gpu/stereobm/aloe-R.png"),
|
||||
// LoadMode(IMREAD_GRAYSCALE)
|
||||
// )
|
||||
, make_tuple<string, string, LoadMode>
|
||||
(
|
||||
string("gpu/stereobm/aloe-L.png"),
|
||||
string("gpu/stereobm/aloe-R.png"),
|
||||
LoadMode(IMREAD_GRAYSCALE)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
) // TODO to big difference between implementations
|
||||
{
|
||||
PyrLKOpticalFlowParamType params = GetParam();
|
||||
tuple<string, string, LoadMode> fileParam = get<1>(params);
|
||||
@ -97,7 +97,6 @@ PERF_TEST_P(PyrLKOpticalFlowFixture,
|
||||
const string fileName0 = get<0>(fileParam), fileName1 = get<1>(fileParam);
|
||||
Mat frame0 = imread(getDataPath(fileName0), openMode);
|
||||
Mat frame1 = imread(getDataPath(fileName1), openMode);
|
||||
const string impl = getSelectedImpl();
|
||||
|
||||
ASSERT_FALSE(frame0.empty()) << "can't load " << fileName0;
|
||||
ASSERT_FALSE(frame1.empty()) << "can't load " << fileName1;
|
||||
@ -108,14 +107,12 @@ PERF_TEST_P(PyrLKOpticalFlowFixture,
|
||||
else
|
||||
grayFrame = frame0;
|
||||
|
||||
// initialization
|
||||
vector<Point2f> pts, nextPts;
|
||||
vector<unsigned char> status;
|
||||
vector<float> err;
|
||||
goodFeaturesToTrack(grayFrame, pts, pointsCount, 0.01, 0.0);
|
||||
|
||||
// selecting implementation
|
||||
if (impl == "plain")
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE()
|
||||
cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err);
|
||||
@ -124,7 +121,7 @@ PERF_TEST_P(PyrLKOpticalFlowFixture,
|
||||
SANITY_CHECK(status);
|
||||
SANITY_CHECK(err);
|
||||
}
|
||||
else if (impl == "ocl")
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::PyrLKOpticalFlow oclPyrLK;
|
||||
ocl::oclMat oclFrame0(frame0), oclFrame1(frame1);
|
||||
@ -142,228 +139,135 @@ PERF_TEST_P(PyrLKOpticalFlowFixture,
|
||||
SANITY_CHECK(status);
|
||||
SANITY_CHECK(err);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
|
||||
// size_t mismatch = 0;
|
||||
// for (int i = 0; i < (int)nextPts.size(); ++i)
|
||||
// {
|
||||
// if(status[i] != ocl_status.at<unsigned char>(0, i))
|
||||
// {
|
||||
// mismatch++;
|
||||
// continue;
|
||||
// }
|
||||
// if(status[i])
|
||||
// {
|
||||
// Point2f gpu_rst = ocl_nextPts.at<Point2f>(0, i);
|
||||
// Point2f cpu_rst = nextPts[i];
|
||||
// if(fabs(gpu_rst.x - cpu_rst.x) >= 1. || fabs(gpu_rst.y - cpu_rst.y) >= 1.)
|
||||
// mismatch++;
|
||||
// }
|
||||
// }
|
||||
// double ratio = (double)mismatch / (double)nextPts.size();
|
||||
// if(ratio < .02)
|
||||
// TestSystem::instance().setAccurate(1, ratio);
|
||||
// else
|
||||
// TestSystem::instance().setAccurate(0, ratio);
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
|
||||
PERFTEST(tvl1flow)
|
||||
PERF_TEST(tvl1flowFixture, tvl1flow)
|
||||
{
|
||||
cv::Mat frame0 = imread("rubberwhale1.png", cv::IMREAD_GRAYSCALE);
|
||||
assert(!frame0.empty());
|
||||
Mat frame0 = imread(getDataPath("gpu/opticalflow/rubberwhale1.png"), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame0.empty()) << "can't load rubberwhale1.png";
|
||||
|
||||
cv::Mat frame1 = imread("rubberwhale2.png", cv::IMREAD_GRAYSCALE);
|
||||
assert(!frame1.empty());
|
||||
Mat frame1 = imread(getDataPath("gpu/opticalflow/rubberwhale2.png"), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame1.empty()) << "can't load rubberwhale2.png";
|
||||
|
||||
cv::ocl::OpticalFlowDual_TVL1_OCL d_alg;
|
||||
cv::ocl::oclMat d_flowx(frame0.size(), CV_32FC1);
|
||||
cv::ocl::oclMat d_flowy(frame1.size(), CV_32FC1);
|
||||
const Size srcSize = frame0.size();
|
||||
const double eps = 1.2;
|
||||
Mat flow(srcSize, CV_32FC2), flow1(srcSize, CV_32FC1), flow2(srcSize, CV_32FC1);
|
||||
declare.in(frame0, frame1).out(flow1, flow2).time(159);
|
||||
|
||||
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
||||
cv::Mat flow;
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
Ptr<DenseOpticalFlow> alg = createOptFlow_DualTVL1();
|
||||
|
||||
TEST_CYCLE() alg->calc(frame0, frame1, flow);
|
||||
|
||||
SUBTEST << frame0.cols << 'x' << frame0.rows << "; rubberwhale1.png; "<<frame1.cols<<'x'<<frame1.rows<<"; rubberwhale2.png";
|
||||
alg->collectGarbage();
|
||||
Mat flows[2] = { flow1, flow2 };
|
||||
split(flow, flows);
|
||||
|
||||
alg->calc(frame0, frame1, flow);
|
||||
SANITY_CHECK(flow1, eps);
|
||||
SANITY_CHECK(flow2, eps);
|
||||
}
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::OpticalFlowDual_TVL1_OCL oclAlg;
|
||||
ocl::oclMat oclFrame0(frame0), oclFrame1(frame1), oclFlow1(srcSize, CV_32FC1),
|
||||
oclFlow2(srcSize, CV_32FC1);
|
||||
|
||||
CPU_ON;
|
||||
alg->calc(frame0, frame1, flow);
|
||||
CPU_OFF;
|
||||
TEST_CYCLE() oclAlg(oclFrame0, oclFrame1, oclFlow1, oclFlow2);
|
||||
|
||||
cv::Mat gold[2];
|
||||
cv::split(flow, gold);
|
||||
oclAlg.collectGarbage();
|
||||
|
||||
cv::ocl::oclMat d0(frame0.size(), CV_32FC1);
|
||||
d0.upload(frame0);
|
||||
cv::ocl::oclMat d1(frame1.size(), CV_32FC1);
|
||||
d1.upload(frame1);
|
||||
oclFlow1.download(flow1);
|
||||
oclFlow2.download(flow2);
|
||||
|
||||
WARMUP_ON;
|
||||
d_alg(d0, d1, d_flowx, d_flowy);
|
||||
WARMUP_OFF;
|
||||
/*
|
||||
double diff1 = 0.0, diff2 = 0.0;
|
||||
if(ExceptedMatSimilar(gold[0], cv::Mat(d_flowx), 3e-3, diff1) == 1
|
||||
&&ExceptedMatSimilar(gold[1], cv::Mat(d_flowy), 3e-3, diff2) == 1)
|
||||
TestSystem::instance().setAccurate(1);
|
||||
else
|
||||
TestSystem::instance().setAccurate(0);
|
||||
|
||||
TestSystem::instance().setDiff(diff1);
|
||||
TestSystem::instance().setDiff(diff2);
|
||||
*/
|
||||
|
||||
|
||||
GPU_ON;
|
||||
d_alg(d0, d1, d_flowx, d_flowy);
|
||||
d_alg.collectGarbage();
|
||||
GPU_OFF;
|
||||
|
||||
|
||||
cv::Mat flowx, flowy;
|
||||
|
||||
GPU_FULL_ON;
|
||||
d0.upload(frame0);
|
||||
d1.upload(frame1);
|
||||
d_alg(d0, d1, d_flowx, d_flowy);
|
||||
d_alg.collectGarbage();
|
||||
d_flowx.download(flowx);
|
||||
d_flowy.download(flowy);
|
||||
GPU_FULL_OFF;
|
||||
|
||||
TestSystem::instance().ExceptedMatSimilar(gold[0], flowx, 3e-3);
|
||||
TestSystem::instance().ExceptedMatSimilar(gold[1], flowy, 3e-3);
|
||||
SANITY_CHECK(flow1, eps);
|
||||
SANITY_CHECK(flow2, eps);
|
||||
}
|
||||
else
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// FarnebackOpticalFlow ////////////////////////
|
||||
PERFTEST(FarnebackOpticalFlow)
|
||||
|
||||
CV_ENUM(farneFlagType, 0, OPTFLOW_FARNEBACK_GAUSSIAN)
|
||||
|
||||
typedef tuple<tuple<int, double>, farneFlagType, bool> FarnebackOpticalFlowParams;
|
||||
typedef TestBaseWithParam<FarnebackOpticalFlowParams> FarnebackOpticalFlowFixture;
|
||||
|
||||
PERF_TEST_P(FarnebackOpticalFlowFixture, FarnebackOpticalFlow,
|
||||
::testing::Combine(
|
||||
::testing::Values(make_tuple<int, double>(5, 1.1),
|
||||
make_tuple<int, double>(7, 1.5)),
|
||||
farneFlagType::all(),
|
||||
::testing::Bool()))
|
||||
{
|
||||
cv::Mat frame0 = imread("rubberwhale1.png", cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame0.empty());
|
||||
Mat frame0 = imread(getDataPath("gpu/opticalflow/rubberwhale1.png"), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame0.empty()) << "can't load rubberwhale1.png";
|
||||
|
||||
cv::Mat frame1 = imread("rubberwhale2.png", cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame1.empty());
|
||||
Mat frame1 = imread(getDataPath("gpu/opticalflow/rubberwhale2.png"), cv::IMREAD_GRAYSCALE);
|
||||
ASSERT_FALSE(frame1.empty()) << "can't load rubberwhale2.png";
|
||||
|
||||
cv::ocl::oclMat d_frame0(frame0), d_frame1(frame1);
|
||||
const Size srcSize = frame0.size();
|
||||
|
||||
int polyNs[2] = { 5, 7 };
|
||||
double polySigmas[2] = { 1.1, 1.5 };
|
||||
int farneFlags[2] = { 0, cv::OPTFLOW_FARNEBACK_GAUSSIAN };
|
||||
bool UseInitFlows[2] = { false, true };
|
||||
double pyrScale = 0.5;
|
||||
const FarnebackOpticalFlowParams params = GetParam();
|
||||
const tuple<int, double> polyParams = get<0>(params);
|
||||
const int polyN = get<0>(polyParams), flags = get<1>(params);
|
||||
const double polySigma = get<1>(polyParams), pyrScale = 0.5;
|
||||
const bool useInitFlow = get<2>(params);
|
||||
const double eps = 1.5;
|
||||
|
||||
string farneFlagStrs[2] = { "BoxFilter", "GaussianBlur" };
|
||||
string useInitFlowStrs[2] = { "", "UseInitFlow" };
|
||||
Mat flowx(srcSize, CV_32FC1), flowy(srcSize, CV_32FC1), flow(srcSize, CV_32FC2);
|
||||
declare.in(frame0, frame1).out(flowx, flowy);
|
||||
|
||||
for ( int i = 0; i < 2; ++i)
|
||||
ocl::FarnebackOpticalFlow farn;
|
||||
farn.pyrScale = pyrScale;
|
||||
farn.polyN = polyN;
|
||||
farn.polySigma = polySigma;
|
||||
farn.flags = flags;
|
||||
|
||||
if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
int polyN = polyNs[i];
|
||||
double polySigma = polySigmas[i];
|
||||
|
||||
for ( int j = 0; j < 2; ++j)
|
||||
if (useInitFlow)
|
||||
{
|
||||
int flags = farneFlags[j];
|
||||
calcOpticalFlowFarneback(
|
||||
frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize,
|
||||
farn.numIters, farn.polyN, farn.polySigma, farn.flags);
|
||||
farn.flags |= OPTFLOW_USE_INITIAL_FLOW;
|
||||
}
|
||||
|
||||
for ( int k = 0; k < 2; ++k)
|
||||
{
|
||||
bool useInitFlow = UseInitFlows[k];
|
||||
SUBTEST << "polyN(" << polyN << "); " << farneFlagStrs[j] << "; " << useInitFlowStrs[k];
|
||||
|
||||
cv::ocl::FarnebackOpticalFlow farn;
|
||||
farn.pyrScale = pyrScale;
|
||||
farn.polyN = polyN;
|
||||
farn.polySigma = polySigma;
|
||||
farn.flags = flags;
|
||||
|
||||
cv::ocl::oclMat d_flowx, d_flowy;
|
||||
cv::Mat flow, flowBuf, flowxBuf, flowyBuf;
|
||||
|
||||
WARMUP_ON;
|
||||
farn(d_frame0, d_frame1, d_flowx, d_flowy);
|
||||
|
||||
if (useInitFlow)
|
||||
{
|
||||
cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)};
|
||||
cv::merge(flowxy, 2, flow);
|
||||
flow.copyTo(flowBuf);
|
||||
flowxy[0].copyTo(flowxBuf);
|
||||
flowxy[1].copyTo(flowyBuf);
|
||||
|
||||
farn.flags |= cv::OPTFLOW_USE_INITIAL_FLOW;
|
||||
farn(d_frame0, d_frame1, d_flowx, d_flowy);
|
||||
}
|
||||
WARMUP_OFF;
|
||||
|
||||
cv::calcOpticalFlowFarneback(
|
||||
TEST_CYCLE()
|
||||
calcOpticalFlowFarneback(
|
||||
frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize,
|
||||
farn.numIters, farn.polyN, farn.polySigma, farn.flags);
|
||||
|
||||
std::vector<cv::Mat> flowxy;
|
||||
cv::split(flow, flowxy);
|
||||
Mat flowxy[2] = { flowx, flowy };
|
||||
split(flow, flowxy);
|
||||
|
||||
Mat md_flowx = cv::Mat(d_flowx);
|
||||
Mat md_flowy = cv::Mat(d_flowy);
|
||||
TestSystem::instance().ExceptedMatSimilar(flowxy[0], md_flowx, 0.1);
|
||||
TestSystem::instance().ExceptedMatSimilar(flowxy[1], md_flowy, 0.1);
|
||||
|
||||
if (useInitFlow)
|
||||
{
|
||||
cv::Mat flowx, flowy;
|
||||
farn.flags = (flags | cv::OPTFLOW_USE_INITIAL_FLOW);
|
||||
|
||||
CPU_ON;
|
||||
cv::calcOpticalFlowFarneback(
|
||||
frame0, frame1, flowBuf, farn.pyrScale, farn.numLevels, farn.winSize,
|
||||
farn.numIters, farn.polyN, farn.polySigma, farn.flags);
|
||||
CPU_OFF;
|
||||
|
||||
GPU_ON;
|
||||
farn(d_frame0, d_frame1, d_flowx, d_flowy);
|
||||
GPU_OFF;
|
||||
|
||||
GPU_FULL_ON;
|
||||
d_frame0.upload(frame0);
|
||||
d_frame1.upload(frame1);
|
||||
d_flowx.upload(flowxBuf);
|
||||
d_flowy.upload(flowyBuf);
|
||||
farn(d_frame0, d_frame1, d_flowx, d_flowy);
|
||||
d_flowx.download(flowx);
|
||||
d_flowy.download(flowy);
|
||||
GPU_FULL_OFF;
|
||||
}
|
||||
else
|
||||
{
|
||||
cv::Mat flow, flowx, flowy;
|
||||
cv::ocl::oclMat d_flowx, d_flowy;
|
||||
|
||||
farn.flags = flags;
|
||||
|
||||
CPU_ON;
|
||||
cv::calcOpticalFlowFarneback(
|
||||
frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize,
|
||||
farn.numIters, farn.polyN, farn.polySigma, farn.flags);
|
||||
CPU_OFF;
|
||||
|
||||
GPU_ON;
|
||||
farn(d_frame0, d_frame1, d_flowx, d_flowy);
|
||||
GPU_OFF;
|
||||
|
||||
GPU_FULL_ON;
|
||||
d_frame0.upload(frame0);
|
||||
d_frame1.upload(frame1);
|
||||
farn(d_frame0, d_frame1, d_flowx, d_flowy);
|
||||
d_flowx.download(flowx);
|
||||
d_flowy.download(flowy);
|
||||
GPU_FULL_OFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
SANITY_CHECK(flowx, eps);
|
||||
SANITY_CHECK(flowy, eps);
|
||||
}
|
||||
else if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclFrame0(frame0), oclFrame1(frame1),
|
||||
oclFlowx(srcSize, CV_32FC1), oclFlowy(srcSize, CV_32FC1);
|
||||
|
||||
if (useInitFlow)
|
||||
{
|
||||
farn(oclFrame0, oclFrame1, oclFlowx, oclFlowy);
|
||||
farn.flags |= OPTFLOW_USE_INITIAL_FLOW;
|
||||
}
|
||||
|
||||
TEST_CYCLE()
|
||||
farn(oclFrame0, oclFrame1, oclFlowx, oclFlowy);
|
||||
|
||||
oclFlowx.download(flowx);
|
||||
oclFlowy.download(flowy);
|
||||
|
||||
SANITY_CHECK(flowx, eps);
|
||||
SANITY_CHECK(flowy, eps);
|
||||
}
|
||||
else
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -41,452 +41,3 @@
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp"
|
||||
#if GTEST_OS_WINDOWS
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
# include <windows.h>
|
||||
#endif
|
||||
|
||||
// This program test most of the functions in ocl module and generate data metrix of x-factor in .csv files
|
||||
// All images needed in this test are in samples/gpu folder.
|
||||
// For haar template, haarcascade_frontalface_alt.xml shouold be in working directory
|
||||
void TestSystem::run()
|
||||
{
|
||||
if (is_list_mode_)
|
||||
{
|
||||
for (vector<Runnable *>::iterator it = tests_.begin(); it != tests_.end(); ++it)
|
||||
{
|
||||
cout << (*it)->name() << endl;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Run test initializers
|
||||
for (vector<Runnable *>::iterator it = inits_.begin(); it != inits_.end(); ++it)
|
||||
{
|
||||
if ((*it)->name().find(test_filter_, 0) != string::npos)
|
||||
{
|
||||
(*it)->run();
|
||||
}
|
||||
}
|
||||
|
||||
printHeading();
|
||||
writeHeading();
|
||||
|
||||
// Run tests
|
||||
for (vector<Runnable *>::iterator it = tests_.begin(); it != tests_.end(); ++it)
|
||||
{
|
||||
try
|
||||
{
|
||||
if ((*it)->name().find(test_filter_, 0) != string::npos)
|
||||
{
|
||||
cout << endl << (*it)->name() << ":\n";
|
||||
|
||||
setCurrentTest((*it)->name());
|
||||
//fprintf(record_,"%s\n",(*it)->name().c_str());
|
||||
|
||||
(*it)->run();
|
||||
finishCurrentSubtest();
|
||||
}
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
// Message is printed via callback
|
||||
resetCurrentSubtest();
|
||||
}
|
||||
catch (const runtime_error &e)
|
||||
{
|
||||
printError(e.what());
|
||||
resetCurrentSubtest();
|
||||
}
|
||||
}
|
||||
|
||||
printSummary();
|
||||
writeSummary();
|
||||
}
|
||||
|
||||
|
||||
void TestSystem::finishCurrentSubtest()
|
||||
{
|
||||
if (cur_subtest_is_empty_)
|
||||
// There is no need to print subtest statistics
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
double cpu_time = cpu_elapsed_ / getTickFrequency() * 1000.0;
|
||||
double gpu_time = gpu_elapsed_ / getTickFrequency() * 1000.0;
|
||||
double gpu_full_time = gpu_full_elapsed_ / getTickFrequency() * 1000.0;
|
||||
|
||||
double speedup = static_cast<double>(cpu_elapsed_) / std::max(1.0, gpu_elapsed_);
|
||||
speedup_total_ += speedup;
|
||||
|
||||
double fullspeedup = static_cast<double>(cpu_elapsed_) / std::max(1.0, gpu_full_elapsed_);
|
||||
speedup_full_total_ += fullspeedup;
|
||||
|
||||
if (speedup > top_)
|
||||
{
|
||||
speedup_faster_count_++;
|
||||
}
|
||||
else if (speedup < bottom_)
|
||||
{
|
||||
speedup_slower_count_++;
|
||||
}
|
||||
else
|
||||
{
|
||||
speedup_equal_count_++;
|
||||
}
|
||||
|
||||
if (fullspeedup > top_)
|
||||
{
|
||||
speedup_full_faster_count_++;
|
||||
}
|
||||
else if (fullspeedup < bottom_)
|
||||
{
|
||||
speedup_full_slower_count_++;
|
||||
}
|
||||
else
|
||||
{
|
||||
speedup_full_equal_count_++;
|
||||
}
|
||||
|
||||
// compute min, max and
|
||||
std::sort(gpu_times_.begin(), gpu_times_.end());
|
||||
double gpu_min = gpu_times_.front() / getTickFrequency() * 1000.0;
|
||||
double gpu_max = gpu_times_.back() / getTickFrequency() * 1000.0;
|
||||
double deviation = 0;
|
||||
|
||||
if (gpu_times_.size() > 1)
|
||||
{
|
||||
double sum = 0;
|
||||
|
||||
for (size_t i = 0; i < gpu_times_.size(); i++)
|
||||
{
|
||||
int64 diff = gpu_times_[i] - static_cast<int64>(gpu_elapsed_);
|
||||
double diff_time = diff * 1000 / getTickFrequency();
|
||||
sum += diff_time * diff_time;
|
||||
}
|
||||
|
||||
deviation = std::sqrt(sum / gpu_times_.size());
|
||||
}
|
||||
|
||||
printMetrics(is_accurate_, cpu_time, gpu_time, gpu_full_time, speedup, fullspeedup);
|
||||
writeMetrics(cpu_time, gpu_time, gpu_full_time, speedup, fullspeedup, gpu_min, gpu_max, deviation);
|
||||
|
||||
num_subtests_called_++;
|
||||
resetCurrentSubtest();
|
||||
}
|
||||
|
||||
|
||||
double TestSystem::meanTime(const vector<int64> &samples)
|
||||
{
|
||||
double sum = accumulate(samples.begin(), samples.end(), 0.);
|
||||
return sum / samples.size();
|
||||
}
|
||||
|
||||
|
||||
void TestSystem::printHeading()
|
||||
{
|
||||
cout << endl;
|
||||
cout<< setiosflags(ios_base::left);
|
||||
|
||||
#if 0
|
||||
cout<<TAB<<setw(7)<< "Accu." << setw(10) << "CPU (ms)" << setw(10) << "GPU, ms"
|
||||
<< setw(8) << "Speedup"<< setw(10)<<"GPUTotal" << setw(10) << "Total"
|
||||
<< "Description\n";
|
||||
cout<<TAB<<setw(7)<<""<<setw(10)<<""<<setw(10)<<""<<setw(8)<<""<<setw(10)<<"(ms)"<<setw(10)<<"Speedup\n";
|
||||
#endif
|
||||
|
||||
cout<<TAB<< setw(10) << "CPU (ms)" << setw(10) << "GPU, ms"
|
||||
<< setw(8) << "Speedup"<< setw(10)<<"GPUTotal" << setw(10) << "Total"
|
||||
<< "Description\n";
|
||||
cout<<TAB<<setw(10)<<""<<setw(10)<<""<<setw(8)<<""<<setw(10)<<"(ms)"<<setw(10)<<"Speedup\n";
|
||||
|
||||
cout << resetiosflags(ios_base::left);
|
||||
}
|
||||
|
||||
void TestSystem::writeHeading()
|
||||
{
|
||||
if (!record_)
|
||||
{
|
||||
recordname_ += "_OCL.csv";
|
||||
record_ = fopen(recordname_.c_str(), "w");
|
||||
if(record_ == NULL)
|
||||
{
|
||||
cout<<".csv file open failed.\n";
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(record_, "NAME,DESCRIPTION,ACCURACY,DIFFERENCE,CPU (ms),GPU (ms),SPEEDUP,GPUTOTAL (ms),TOTALSPEEDUP,GPU Min (ms),GPU Max (ms), Standard deviation (ms)\n");
|
||||
|
||||
fflush(record_);
|
||||
}
|
||||
|
||||
void TestSystem::printSummary()
|
||||
{
|
||||
cout << setiosflags(ios_base::fixed);
|
||||
cout << "\naverage GPU speedup: x"
|
||||
<< setprecision(3) << speedup_total_ / std::max(1, num_subtests_called_)
|
||||
<< endl;
|
||||
cout << "\nGPU exceeded: "
|
||||
<< setprecision(3) << speedup_faster_count_
|
||||
<< "\nGPU passed: "
|
||||
<< setprecision(3) << speedup_equal_count_
|
||||
<< "\nGPU failed: "
|
||||
<< setprecision(3) << speedup_slower_count_
|
||||
<< endl;
|
||||
cout << "\nGPU exceeded rate: "
|
||||
<< setprecision(3) << (float)speedup_faster_count_ / std::max(1, num_subtests_called_) * 100
|
||||
<< "%"
|
||||
<< "\nGPU passed rate: "
|
||||
<< setprecision(3) << (float)speedup_equal_count_ / std::max(1, num_subtests_called_) * 100
|
||||
<< "%"
|
||||
<< "\nGPU failed rate: "
|
||||
<< setprecision(3) << (float)speedup_slower_count_ / std::max(1, num_subtests_called_) * 100
|
||||
<< "%"
|
||||
<< endl;
|
||||
cout << "\naverage GPUTOTAL speedup: x"
|
||||
<< setprecision(3) << speedup_full_total_ / std::max(1, num_subtests_called_)
|
||||
<< endl;
|
||||
cout << "\nGPUTOTAL exceeded: "
|
||||
<< setprecision(3) << speedup_full_faster_count_
|
||||
<< "\nGPUTOTAL passed: "
|
||||
<< setprecision(3) << speedup_full_equal_count_
|
||||
<< "\nGPUTOTAL failed: "
|
||||
<< setprecision(3) << speedup_full_slower_count_
|
||||
<< endl;
|
||||
cout << "\nGPUTOTAL exceeded rate: "
|
||||
<< setprecision(3) << (float)speedup_full_faster_count_ / std::max(1, num_subtests_called_) * 100
|
||||
<< "%"
|
||||
<< "\nGPUTOTAL passed rate: "
|
||||
<< setprecision(3) << (float)speedup_full_equal_count_ / std::max(1, num_subtests_called_) * 100
|
||||
<< "%"
|
||||
<< "\nGPUTOTAL failed rate: "
|
||||
<< setprecision(3) << (float)speedup_full_slower_count_ / std::max(1, num_subtests_called_) * 100
|
||||
<< "%"
|
||||
<< endl;
|
||||
cout << resetiosflags(ios_base::fixed);
|
||||
}
|
||||
|
||||
|
||||
enum GTestColor {
|
||||
COLOR_DEFAULT,
|
||||
COLOR_RED,
|
||||
COLOR_GREEN,
|
||||
COLOR_YELLOW
|
||||
};
|
||||
#if GTEST_OS_WINDOWS&&!GTEST_OS_WINDOWS_MOBILE
|
||||
// Returns the character attribute for the given color.
|
||||
static WORD GetColorAttribute(GTestColor color) {
|
||||
switch (color) {
|
||||
case COLOR_RED: return FOREGROUND_RED;
|
||||
case COLOR_GREEN: return FOREGROUND_GREEN;
|
||||
case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static const char* GetAnsiColorCode(GTestColor color) {
|
||||
switch (color) {
|
||||
case COLOR_RED: return "1";
|
||||
case COLOR_GREEN: return "2";
|
||||
case COLOR_YELLOW: return "3";
|
||||
default: return NULL;
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
static void printMetricsUti(double cpu_time, double gpu_time, double gpu_full_time, double speedup, double fullspeedup, std::stringstream& stream, std::stringstream& cur_subtest_description)
|
||||
{
|
||||
//cout <<TAB<< setw(7) << stream.str();
|
||||
cout <<TAB;
|
||||
|
||||
stream.str("");
|
||||
stream << cpu_time;
|
||||
cout << setw(10) << stream.str();
|
||||
|
||||
stream.str("");
|
||||
stream << gpu_time;
|
||||
cout << setw(10) << stream.str();
|
||||
|
||||
stream.str("");
|
||||
stream << "x" << setprecision(3) << speedup;
|
||||
cout << setw(8) << stream.str();
|
||||
|
||||
stream.str("");
|
||||
stream << gpu_full_time;
|
||||
cout << setw(10) << stream.str();
|
||||
|
||||
stream.str("");
|
||||
stream << "x" << setprecision(3) << fullspeedup;
|
||||
cout << setw(10) << stream.str();
|
||||
|
||||
cout << cur_subtest_description.str();
|
||||
cout << resetiosflags(ios_base::left) << endl;
|
||||
}
|
||||
|
||||
void TestSystem::printMetrics(int is_accurate, double cpu_time, double gpu_time, double gpu_full_time, double speedup, double fullspeedup)
|
||||
{
|
||||
cout << setiosflags(ios_base::left);
|
||||
stringstream stream;
|
||||
|
||||
std::stringstream &cur_subtest_description = getCurSubtestDescription();
|
||||
|
||||
#if GTEST_OS_WINDOWS&&!GTEST_OS_WINDOWS_MOBILE
|
||||
|
||||
WORD color;
|
||||
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
|
||||
// Gets the current text color.
|
||||
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
|
||||
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
|
||||
const WORD old_color_attrs = buffer_info.wAttributes;
|
||||
// We need to flush the stream buffers into the console before each
|
||||
// SetConsoleTextAttribute call lest it affect the text that is already
|
||||
// printed but has not yet reached the console.
|
||||
fflush(stdout);
|
||||
|
||||
if(is_accurate == 1||is_accurate == -1)
|
||||
{
|
||||
color = old_color_attrs;
|
||||
printMetricsUti(cpu_time, gpu_time, gpu_full_time, speedup, fullspeedup, stream, cur_subtest_description);
|
||||
|
||||
}else
|
||||
{
|
||||
color = GetColorAttribute(COLOR_RED);
|
||||
SetConsoleTextAttribute(stdout_handle,
|
||||
color| FOREGROUND_INTENSITY);
|
||||
|
||||
printMetricsUti(cpu_time, gpu_time, gpu_full_time, speedup, fullspeedup, stream, cur_subtest_description);
|
||||
fflush(stdout);
|
||||
// Restores the text color.
|
||||
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
|
||||
}
|
||||
#else
|
||||
GTestColor color = COLOR_RED;
|
||||
if(is_accurate == 1|| is_accurate == -1)
|
||||
{
|
||||
printMetricsUti(cpu_time, gpu_time, gpu_full_time, speedup, fullspeedup, stream, cur_subtest_description);
|
||||
|
||||
}else
|
||||
{
|
||||
printf("\033[0;3%sm", GetAnsiColorCode(color));
|
||||
printMetricsUti(cpu_time, gpu_time, gpu_full_time, speedup, fullspeedup, stream, cur_subtest_description);
|
||||
printf("\033[m"); // Resets the terminal to default.
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void TestSystem::writeMetrics(double cpu_time, double gpu_time, double gpu_full_time, double speedup, double fullspeedup, double gpu_min, double gpu_max, double std_dev)
|
||||
{
|
||||
if (!record_)
|
||||
{
|
||||
recordname_ += ".csv";
|
||||
record_ = fopen(recordname_.c_str(), "w");
|
||||
}
|
||||
|
||||
string _is_accurate_;
|
||||
|
||||
if(is_accurate_ == 1)
|
||||
_is_accurate_ = "Pass";
|
||||
else if(is_accurate_ == 0)
|
||||
_is_accurate_ = "Fail";
|
||||
else if(is_accurate_ == -1)
|
||||
_is_accurate_ = " ";
|
||||
else
|
||||
{
|
||||
std::cout<<"is_accurate errer: "<<is_accurate_<<"\n";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
fprintf(record_, "%s,%s,%s,%.2f,%.3f,%.3f,%.3f,%.3f,%.3f,%.3f,%.3f,%.3f\n",
|
||||
itname_changed_ ? itname_.c_str() : "",
|
||||
cur_subtest_description_.str().c_str(),
|
||||
_is_accurate_.c_str(),
|
||||
accurate_diff_,
|
||||
cpu_time, gpu_time, speedup, gpu_full_time, fullspeedup,
|
||||
gpu_min, gpu_max, std_dev);
|
||||
|
||||
if (itname_changed_)
|
||||
{
|
||||
itname_changed_ = false;
|
||||
}
|
||||
|
||||
fflush(record_);
|
||||
}
|
||||
|
||||
void TestSystem::writeSummary()
|
||||
{
|
||||
if (!record_)
|
||||
{
|
||||
recordname_ += ".csv";
|
||||
record_ = fopen(recordname_.c_str(), "w");
|
||||
}
|
||||
|
||||
fprintf(record_, "\nAverage GPU speedup: %.3f\n"
|
||||
"exceeded: %d (%.3f%%)\n"
|
||||
"passed: %d (%.3f%%)\n"
|
||||
"failed: %d (%.3f%%)\n"
|
||||
"\nAverage GPUTOTAL speedup: %.3f\n"
|
||||
"exceeded: %d (%.3f%%)\n"
|
||||
"passed: %d (%.3f%%)\n"
|
||||
"failed: %d (%.3f%%)\n",
|
||||
speedup_total_ / std::max(1, num_subtests_called_),
|
||||
speedup_faster_count_, (float)speedup_faster_count_ / std::max(1, num_subtests_called_) * 100,
|
||||
speedup_equal_count_, (float)speedup_equal_count_ / std::max(1, num_subtests_called_) * 100,
|
||||
speedup_slower_count_, (float)speedup_slower_count_ / std::max(1, num_subtests_called_) * 100,
|
||||
speedup_full_total_ / std::max(1, num_subtests_called_),
|
||||
speedup_full_faster_count_, (float)speedup_full_faster_count_ / std::max(1, num_subtests_called_) * 100,
|
||||
speedup_full_equal_count_, (float)speedup_full_equal_count_ / std::max(1, num_subtests_called_) * 100,
|
||||
speedup_full_slower_count_, (float)speedup_full_slower_count_ / std::max(1, num_subtests_called_) * 100
|
||||
);
|
||||
fflush(record_);
|
||||
}
|
||||
|
||||
void TestSystem::printError(const std::string &msg)
|
||||
{
|
||||
if(msg != "CL_INVALID_BUFFER_SIZE")
|
||||
{
|
||||
cout << TAB << "[error: " << msg << "] " << cur_subtest_description_.str() << endl;
|
||||
}
|
||||
}
|
||||
|
||||
void gen(Mat &mat, int rows, int cols, int type, Scalar low, Scalar high)
|
||||
{
|
||||
mat.create(rows, cols, type);
|
||||
RNG rng(0);
|
||||
rng.fill(mat, RNG::UNIFORM, low, high);
|
||||
}
|
||||
|
||||
string abspath(const string &relpath)
|
||||
{
|
||||
return TestSystem::instance().workingDir() + relpath;
|
||||
}
|
||||
|
||||
|
||||
int CV_CDECL cvErrorCallback(int /*status*/, const char * /*func_name*/,
|
||||
const char *err_msg, const char * /*file_name*/,
|
||||
int /*line*/, void * /*userdata*/)
|
||||
{
|
||||
TestSystem::instance().printError(err_msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
double checkNorm(const Mat &m)
|
||||
{
|
||||
return norm(m, NORM_INF);
|
||||
}
|
||||
|
||||
double checkNorm(const Mat &m1, const Mat &m2)
|
||||
{
|
||||
return norm(m1, m2, NORM_INF);
|
||||
}
|
||||
|
||||
double checkSimilarity(const Mat &m1, const Mat &m2)
|
||||
{
|
||||
Mat diff;
|
||||
matchTemplate(m1, m2, diff, CV_TM_CCORR_NORMED);
|
||||
return std::abs(diff.at<float>(0, 0) - 1.f);
|
||||
}
|
||||
|
@ -42,7 +42,6 @@
|
||||
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
# pragma GCC diagnostic ignored "-Wunused-function"
|
||||
# if defined __clang__ || defined __APPLE__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
|
||||
# pragma GCC diagnostic ignored "-Wextra"
|
||||
@ -70,457 +69,37 @@
|
||||
#include "opencv2/ocl/ocl.hpp"
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
|
||||
#define OCL_SIZE_1000 cv::Size(1000, 1000)
|
||||
#define OCL_SIZE_2000 cv::Size(2000, 2000)
|
||||
#define OCL_SIZE_4000 cv::Size(4000, 4000)
|
||||
|
||||
#define OCL_TYPICAL_MAT_SIZES ::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000, OCL_SIZE_4000)
|
||||
|
||||
#define Min_Size 1000
|
||||
#define Max_Size 4000
|
||||
#define Multiple 2
|
||||
#define TAB " "
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void gen(Mat &mat, int rows, int cols, int type, Scalar low, Scalar high);
|
||||
void gen(Mat &mat, int rows, int cols, int type, int low, int high, int n);
|
||||
|
||||
string abspath(const string &relpath);
|
||||
int CV_CDECL cvErrorCallback(int, const char *, const char *, const char *, int, void *);
|
||||
//typedef struct
|
||||
//{
|
||||
// short x;
|
||||
// short y;
|
||||
//} COOR;
|
||||
//COOR do_meanShift(int x0, int y0, uchar *sptr, uchar *dptr, int sstep,
|
||||
// cv::Size size, int sp, int sr, int maxIter, float eps, int *tab);
|
||||
//void meanShiftProc_(const Mat &src_roi, Mat &dst_roi, Mat &dstCoor_roi,
|
||||
// int sp, int sr, cv::TermCriteria crit);
|
||||
|
||||
|
||||
template<class T1, class T2>
|
||||
int ExpectedEQ(T1 expected, T2 actual)
|
||||
{
|
||||
if(expected == actual)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class T1>
|
||||
int EeceptDoubleEQ(T1 expected, T1 actual)
|
||||
{
|
||||
testing::internal::Double lhs(expected);
|
||||
testing::internal::Double rhs(actual);
|
||||
|
||||
if (lhs.AlmostEquals(rhs))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
int AssertEQ(T expected, T actual)
|
||||
{
|
||||
if(expected == actual)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ExceptDoubleNear(double val1, double val2, double abs_error);
|
||||
bool match_rect(cv::Rect r1, cv::Rect r2, int threshold);
|
||||
|
||||
double checkNorm(const cv::Mat &m);
|
||||
double checkNorm(const cv::Mat &m1, const cv::Mat &m2);
|
||||
double checkSimilarity(const cv::Mat &m1, const cv::Mat &m2);
|
||||
|
||||
int ExpectedMatNear(cv::Mat dst, cv::Mat cpu_dst, double eps);
|
||||
int ExceptedMatSimilar(cv::Mat dst, cv::Mat cpu_dst, double eps);
|
||||
|
||||
class Runnable
|
||||
{
|
||||
public:
|
||||
explicit Runnable(const std::string &runname): name_(runname) {}
|
||||
virtual ~Runnable() {}
|
||||
|
||||
const std::string &name() const
|
||||
{
|
||||
return name_;
|
||||
}
|
||||
|
||||
virtual void run() = 0;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
};
|
||||
|
||||
class TestSystem
|
||||
{
|
||||
public:
|
||||
static TestSystem &instance()
|
||||
{
|
||||
static TestSystem me;
|
||||
return me;
|
||||
}
|
||||
|
||||
void setWorkingDir(const std::string &val)
|
||||
{
|
||||
working_dir_ = val;
|
||||
}
|
||||
const std::string &workingDir() const
|
||||
{
|
||||
return working_dir_;
|
||||
}
|
||||
|
||||
void setTestFilter(const std::string &val)
|
||||
{
|
||||
test_filter_ = val;
|
||||
}
|
||||
const std::string &testFilter() const
|
||||
{
|
||||
return test_filter_;
|
||||
}
|
||||
|
||||
void setNumIters(int num_iters)
|
||||
{
|
||||
num_iters_ = num_iters;
|
||||
}
|
||||
void setGPUWarmupIters(int num_iters)
|
||||
{
|
||||
gpu_warmup_iters_ = num_iters;
|
||||
}
|
||||
void setCPUIters(int num_iters)
|
||||
{
|
||||
cpu_num_iters_ = num_iters;
|
||||
}
|
||||
|
||||
void setTopThreshold(double top)
|
||||
{
|
||||
top_ = top;
|
||||
}
|
||||
void setBottomThreshold(double bottom)
|
||||
{
|
||||
bottom_ = bottom;
|
||||
}
|
||||
|
||||
void addInit(Runnable *init)
|
||||
{
|
||||
inits_.push_back(init);
|
||||
}
|
||||
void addTest(Runnable *test)
|
||||
{
|
||||
tests_.push_back(test);
|
||||
}
|
||||
void run();
|
||||
|
||||
// It's public because OpenCV callback uses it
|
||||
void printError(const std::string &msg);
|
||||
|
||||
std::stringstream &startNewSubtest()
|
||||
{
|
||||
finishCurrentSubtest();
|
||||
return cur_subtest_description_;
|
||||
}
|
||||
|
||||
bool stop() const
|
||||
{
|
||||
return cur_iter_idx_ >= num_iters_;
|
||||
}
|
||||
|
||||
bool cpu_stop() const
|
||||
{
|
||||
return cur_iter_idx_ >= cpu_num_iters_;
|
||||
}
|
||||
|
||||
int get_cur_iter_idx()
|
||||
{
|
||||
return cur_iter_idx_;
|
||||
}
|
||||
|
||||
int get_cpu_num_iters()
|
||||
{
|
||||
return cpu_num_iters_;
|
||||
}
|
||||
|
||||
bool warmupStop()
|
||||
{
|
||||
return cur_warmup_idx_++ >= gpu_warmup_iters_;
|
||||
}
|
||||
|
||||
void warmupComplete()
|
||||
{
|
||||
cur_warmup_idx_ = 0;
|
||||
}
|
||||
|
||||
void cpuOn()
|
||||
{
|
||||
cpu_started_ = cv::getTickCount();
|
||||
}
|
||||
void cpuOff()
|
||||
{
|
||||
int64 delta = cv::getTickCount() - cpu_started_;
|
||||
cpu_times_.push_back(delta);
|
||||
++cur_iter_idx_;
|
||||
}
|
||||
void cpuComplete()
|
||||
{
|
||||
cpu_elapsed_ += meanTime(cpu_times_);
|
||||
cur_subtest_is_empty_ = false;
|
||||
cur_iter_idx_ = 0;
|
||||
}
|
||||
|
||||
void gpuOn()
|
||||
{
|
||||
gpu_started_ = cv::getTickCount();
|
||||
}
|
||||
void gpuOff()
|
||||
{
|
||||
int64 delta = cv::getTickCount() - gpu_started_;
|
||||
gpu_times_.push_back(delta);
|
||||
++cur_iter_idx_;
|
||||
}
|
||||
void gpuComplete()
|
||||
{
|
||||
gpu_elapsed_ += meanTime(gpu_times_);
|
||||
cur_subtest_is_empty_ = false;
|
||||
cur_iter_idx_ = 0;
|
||||
}
|
||||
|
||||
void gpufullOn()
|
||||
{
|
||||
gpu_full_started_ = cv::getTickCount();
|
||||
}
|
||||
void gpufullOff()
|
||||
{
|
||||
int64 delta = cv::getTickCount() - gpu_full_started_;
|
||||
gpu_full_times_.push_back(delta);
|
||||
++cur_iter_idx_;
|
||||
}
|
||||
void gpufullComplete()
|
||||
{
|
||||
gpu_full_elapsed_ += meanTime(gpu_full_times_);
|
||||
cur_subtest_is_empty_ = false;
|
||||
cur_iter_idx_ = 0;
|
||||
}
|
||||
|
||||
bool isListMode() const
|
||||
{
|
||||
return is_list_mode_;
|
||||
}
|
||||
void setListMode(bool value)
|
||||
{
|
||||
is_list_mode_ = value;
|
||||
}
|
||||
|
||||
void setRecordName(const std::string &name)
|
||||
{
|
||||
recordname_ = name;
|
||||
}
|
||||
|
||||
void setCurrentTest(const std::string &name)
|
||||
{
|
||||
itname_ = name;
|
||||
itname_changed_ = true;
|
||||
}
|
||||
|
||||
void setAccurate(int accurate, double diff)
|
||||
{
|
||||
is_accurate_ = accurate;
|
||||
accurate_diff_ = diff;
|
||||
}
|
||||
|
||||
void ExpectMatsNear(vector<Mat>& dst, vector<Mat>& cpu_dst, vector<double>& eps)
|
||||
{
|
||||
assert(dst.size() == cpu_dst.size());
|
||||
assert(cpu_dst.size() == eps.size());
|
||||
is_accurate_ = 1;
|
||||
for(size_t i=0; i<dst.size(); i++)
|
||||
{
|
||||
double cur_diff = checkNorm(dst[i], cpu_dst[i]);
|
||||
accurate_diff_ = max(accurate_diff_, cur_diff);
|
||||
if(cur_diff > eps[i])
|
||||
is_accurate_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ExpectedMatNear(cv::Mat& dst, cv::Mat& cpu_dst, double eps)
|
||||
{
|
||||
assert(dst.type() == cpu_dst.type());
|
||||
assert(dst.size() == cpu_dst.size());
|
||||
accurate_diff_ = checkNorm(dst, cpu_dst);
|
||||
if(accurate_diff_ <= eps)
|
||||
is_accurate_ = 1;
|
||||
else
|
||||
is_accurate_ = 0;
|
||||
}
|
||||
|
||||
void ExceptedMatSimilar(cv::Mat& dst, cv::Mat& cpu_dst, double eps)
|
||||
{
|
||||
assert(dst.type() == cpu_dst.type());
|
||||
assert(dst.size() == cpu_dst.size());
|
||||
accurate_diff_ = checkSimilarity(cpu_dst, dst);
|
||||
if(accurate_diff_ <= eps)
|
||||
is_accurate_ = 1;
|
||||
else
|
||||
is_accurate_ = 0;
|
||||
}
|
||||
|
||||
std::stringstream &getCurSubtestDescription()
|
||||
{
|
||||
return cur_subtest_description_;
|
||||
}
|
||||
|
||||
private:
|
||||
TestSystem():
|
||||
cur_subtest_is_empty_(true), cpu_elapsed_(0),
|
||||
gpu_elapsed_(0), gpu_full_elapsed_(0), speedup_total_(0.0),
|
||||
num_subtests_called_(0),
|
||||
speedup_faster_count_(0), speedup_slower_count_(0), speedup_equal_count_(0),
|
||||
speedup_full_faster_count_(0), speedup_full_slower_count_(0), speedup_full_equal_count_(0), is_list_mode_(false),
|
||||
num_iters_(10), cpu_num_iters_(2),
|
||||
gpu_warmup_iters_(1), cur_iter_idx_(0), cur_warmup_idx_(0),
|
||||
record_(0), recordname_("performance"), itname_changed_(true),
|
||||
is_accurate_(-1), accurate_diff_(0.)
|
||||
{
|
||||
cpu_times_.reserve(num_iters_);
|
||||
gpu_times_.reserve(num_iters_);
|
||||
gpu_full_times_.reserve(num_iters_);
|
||||
}
|
||||
|
||||
void finishCurrentSubtest();
|
||||
void resetCurrentSubtest()
|
||||
{
|
||||
cpu_elapsed_ = 0;
|
||||
gpu_elapsed_ = 0;
|
||||
gpu_full_elapsed_ = 0;
|
||||
cur_subtest_description_.str("");
|
||||
cur_subtest_is_empty_ = true;
|
||||
cur_iter_idx_ = 0;
|
||||
cur_warmup_idx_ = 0;
|
||||
cpu_times_.clear();
|
||||
gpu_times_.clear();
|
||||
gpu_full_times_.clear();
|
||||
is_accurate_ = -1;
|
||||
accurate_diff_ = 0.;
|
||||
}
|
||||
|
||||
double meanTime(const std::vector<int64> &samples);
|
||||
|
||||
void printHeading();
|
||||
void printSummary();
|
||||
void printMetrics(int is_accurate, double cpu_time, double gpu_time = 0.0f, double gpu_full_time = 0.0f, double speedup = 0.0f, double fullspeedup = 0.0f);
|
||||
|
||||
void writeHeading();
|
||||
void writeSummary();
|
||||
void writeMetrics(double cpu_time, double gpu_time = 0.0f, double gpu_full_time = 0.0f,
|
||||
double speedup = 0.0f, double fullspeedup = 0.0f,
|
||||
double gpu_min = 0.0f, double gpu_max = 0.0f, double std_dev = 0.0f);
|
||||
|
||||
std::string working_dir_;
|
||||
std::string test_filter_;
|
||||
|
||||
std::vector<Runnable *> inits_;
|
||||
std::vector<Runnable *> tests_;
|
||||
|
||||
std::stringstream cur_subtest_description_;
|
||||
bool cur_subtest_is_empty_;
|
||||
|
||||
int64 cpu_started_;
|
||||
int64 gpu_started_;
|
||||
int64 gpu_full_started_;
|
||||
double cpu_elapsed_;
|
||||
double gpu_elapsed_;
|
||||
double gpu_full_elapsed_;
|
||||
|
||||
double speedup_total_;
|
||||
double speedup_full_total_;
|
||||
int num_subtests_called_;
|
||||
|
||||
int speedup_faster_count_;
|
||||
int speedup_slower_count_;
|
||||
int speedup_equal_count_;
|
||||
|
||||
int speedup_full_faster_count_;
|
||||
int speedup_full_slower_count_;
|
||||
int speedup_full_equal_count_;
|
||||
|
||||
bool is_list_mode_;
|
||||
|
||||
double top_;
|
||||
double bottom_;
|
||||
|
||||
int num_iters_;
|
||||
int cpu_num_iters_; //there's no need to set cpu running same times with gpu
|
||||
int gpu_warmup_iters_; //gpu warm up times, default is 1
|
||||
int cur_iter_idx_;
|
||||
int cur_warmup_idx_; //current gpu warm up times
|
||||
std::vector<int64> cpu_times_;
|
||||
std::vector<int64> gpu_times_;
|
||||
std::vector<int64> gpu_full_times_;
|
||||
|
||||
FILE *record_;
|
||||
std::string recordname_;
|
||||
std::string itname_;
|
||||
bool itname_changed_;
|
||||
|
||||
int is_accurate_;
|
||||
double accurate_diff_;
|
||||
};
|
||||
|
||||
|
||||
#define GLOBAL_INIT(name) \
|
||||
struct name##_init: Runnable { \
|
||||
name##_init(): Runnable(#name) { \
|
||||
TestSystem::instance().addInit(this); \
|
||||
} \
|
||||
void run(); \
|
||||
} name##_init_instance; \
|
||||
void name##_init::run()
|
||||
|
||||
|
||||
#define PERFTEST(name) \
|
||||
struct name##_test: Runnable { \
|
||||
name##_test(): Runnable(#name) { \
|
||||
TestSystem::instance().addTest(this); \
|
||||
} \
|
||||
void run(); \
|
||||
} name##_test_instance; \
|
||||
void name##_test::run()
|
||||
|
||||
#define SUBTEST TestSystem::instance().startNewSubtest()
|
||||
|
||||
#define CPU_ON \
|
||||
while (!TestSystem::instance().cpu_stop()) { \
|
||||
TestSystem::instance().cpuOn()
|
||||
#define CPU_OFF \
|
||||
TestSystem::instance().cpuOff(); \
|
||||
} TestSystem::instance().cpuComplete()
|
||||
|
||||
#define GPU_ON \
|
||||
while (!TestSystem::instance().stop()) { \
|
||||
TestSystem::instance().gpuOn()
|
||||
#define GPU_OFF \
|
||||
ocl::finish();\
|
||||
TestSystem::instance().gpuOff(); \
|
||||
} TestSystem::instance().gpuComplete()
|
||||
|
||||
#define GPU_FULL_ON \
|
||||
while (!TestSystem::instance().stop()) { \
|
||||
TestSystem::instance().gpufullOn()
|
||||
#define GPU_FULL_OFF \
|
||||
TestSystem::instance().gpufullOff(); \
|
||||
} TestSystem::instance().gpufullComplete()
|
||||
|
||||
#define WARMUP_ON \
|
||||
while (!TestSystem::instance().warmupStop()) {
|
||||
#define WARMUP_OFF \
|
||||
ocl::finish();\
|
||||
} TestSystem::instance().warmupComplete()
|
||||
#define OCL_SIZE_1000 Size(1000, 1000)
|
||||
#define OCL_SIZE_2000 Size(2000, 2000)
|
||||
#define OCL_SIZE_4000 Size(4000, 4000)
|
||||
|
||||
#define OCL_TYPICAL_MAT_SIZES ::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000, OCL_SIZE_4000)
|
||||
|
||||
#define OCL_PERF_ENUM(type, ...) ::testing::Values(type, ## __VA_ARGS__ )
|
||||
|
||||
#define IMPL_OCL "ocl"
|
||||
#define IMPL_GPU "gpu"
|
||||
#define IMPL_PLAIN "plain"
|
||||
|
||||
#define RUN_OCL_IMPL (IMPL_OCL == getSelectedImpl())
|
||||
#define RUN_PLAIN_IMPL (IMPL_PLAIN == getSelectedImpl())
|
||||
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
# define RUN_GPU_IMPL (IMPL_GPU == getSelectedImpl())
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
#define OCL_PERF_ELSE \
|
||||
if (RUN_GPU_IMPL) \
|
||||
CV_TEST_FAIL_NO_IMPL(); \
|
||||
else \
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#else
|
||||
#define OCL_PERF_ELSE \
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -51,97 +51,74 @@ using std::tr1::get;
|
||||
|
||||
///////////// pyrDown //////////////////////
|
||||
|
||||
CV_ENUM(pyrDownMatType, CV_8UC1, CV_8UC4)
|
||||
|
||||
typedef tuple<Size, pyrDownMatType> pyrDownParams;
|
||||
typedef TestBaseWithParam<pyrDownParams> pyrDownFixture;
|
||||
typedef Size_MatType pyrDownFixture;
|
||||
|
||||
PERF_TEST_P(pyrDownFixture, pyrDown,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
pyrDownMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
pyrDownParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst;
|
||||
Size dstSize((srcSize.height + 1) >> 1, (srcSize.width + 1) >> 1);
|
||||
dst.create(dstSize, type);
|
||||
declare.in(src).out(dst);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(dstSize, type);
|
||||
|
||||
TEST_CYCLE() cv::ocl::pyrDown(oclSrc, oclDst);
|
||||
TEST_CYCLE() ocl::pyrDown(oclSrc, oclDst);
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::pyrDown(src, dst);
|
||||
TEST_CYCLE() pyrDown(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// pyrUp ////////////////////////
|
||||
|
||||
typedef pyrDownMatType pyrUpMatType;
|
||||
typedef tuple<Size, pyrUpMatType> pyrUpParams;
|
||||
typedef TestBaseWithParam<pyrUpParams> pyrUpFixture;
|
||||
typedef Size_MatType pyrUpFixture;
|
||||
|
||||
PERF_TEST_P(pyrUpFixture, pyrUp,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
pyrUpMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8UC1, CV_8UC4)))
|
||||
{
|
||||
// getting params
|
||||
pyrUpParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int type = get<1>(params);
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, type), dst;
|
||||
Size dstSize(srcSize.height << 1, srcSize.width << 1);
|
||||
dst.create(dstSize, type);
|
||||
declare.in(src).out(dst);
|
||||
declare.in(src, WARMUP_RNG).out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src), oclDst(dstSize, type);
|
||||
|
||||
TEST_CYCLE() cv::ocl::pyrDown(oclSrc, oclDst);
|
||||
TEST_CYCLE() ocl::pyrDown(oclSrc, oclDst);
|
||||
|
||||
oclDst.download(dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::pyrDown(src, dst);
|
||||
TEST_CYCLE() pyrDown(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
@ -51,23 +51,16 @@ using std::tr1::get;
|
||||
|
||||
///////////// Merge////////////////////////
|
||||
|
||||
CV_ENUM(MergeMatType, CV_8U, CV_32F)
|
||||
|
||||
typedef tuple<Size, MergeMatType> MergeParams;
|
||||
typedef TestBaseWithParam<MergeParams> MergeFixture;
|
||||
typedef Size_MatType MergeFixture;
|
||||
|
||||
PERF_TEST_P(MergeFixture, Merge,
|
||||
::testing::Combine(::testing::Values(OCL_SIZE_1000, OCL_SIZE_2000),
|
||||
MergeMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8U, CV_32F)))
|
||||
{
|
||||
// getting params
|
||||
MergeParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int depth = get<1>(params), channels = 3;
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
const int dstType = CV_MAKE_TYPE(depth, channels);
|
||||
Mat dst(srcSize, dstType);
|
||||
vector<Mat> src(channels);
|
||||
@ -78,8 +71,7 @@ PERF_TEST_P(MergeFixture, Merge,
|
||||
}
|
||||
declare.out(dst);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclDst(srcSize, dstType);
|
||||
vector<ocl::oclMat> oclSrc(src.size());
|
||||
@ -92,50 +84,39 @@ PERF_TEST_P(MergeFixture, Merge,
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
TEST_CYCLE() cv::merge(src, dst);
|
||||
|
||||
SANITY_CHECK(dst);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
||||
///////////// Split////////////////////////
|
||||
|
||||
typedef MergeMatType SplitMatType;
|
||||
typedef tuple<Size, SplitMatType> SplitParams;
|
||||
typedef TestBaseWithParam<SplitParams> SplitFixture;
|
||||
typedef Size_MatType SplitFixture;
|
||||
|
||||
PERF_TEST_P(SplitFixture, Split,
|
||||
::testing::Combine(OCL_TYPICAL_MAT_SIZES,
|
||||
SplitMatType::all()))
|
||||
OCL_PERF_ENUM(CV_8U, CV_32F)))
|
||||
{
|
||||
// getting params
|
||||
MergeParams params = GetParam();
|
||||
const Size_MatType_t params = GetParam();
|
||||
const Size srcSize = get<0>(params);
|
||||
const int depth = get<1>(params), channels = 3;
|
||||
|
||||
std::string impl = getSelectedImpl();
|
||||
|
||||
// creating src data
|
||||
Mat src(srcSize, CV_MAKE_TYPE(depth, channels));
|
||||
declare.in(src, WARMUP_RNG);
|
||||
|
||||
// select implementation
|
||||
if (impl == "ocl")
|
||||
if (RUN_OCL_IMPL)
|
||||
{
|
||||
ocl::oclMat oclSrc(src);
|
||||
vector<ocl::oclMat> oclDst(channels, ocl::oclMat(srcSize, CV_MAKE_TYPE(depth, 1)));
|
||||
|
||||
TEST_CYCLE() cv::ocl::split(oclSrc, oclDst);
|
||||
|
||||
AssertEQ(channels, 3);
|
||||
ASSERT_EQ(3, channels);
|
||||
Mat dst0, dst1, dst2;
|
||||
oclDst[0].download(dst0);
|
||||
oclDst[1].download(dst1);
|
||||
@ -144,21 +125,17 @@ PERF_TEST_P(SplitFixture, Split,
|
||||
SANITY_CHECK(dst1);
|
||||
SANITY_CHECK(dst2);
|
||||
}
|
||||
else if (impl == "plain")
|
||||
else if (RUN_PLAIN_IMPL)
|
||||
{
|
||||
vector<Mat> dst(channels, Mat(srcSize, CV_MAKE_TYPE(depth, 1)));
|
||||
TEST_CYCLE() cv::split(src, dst);
|
||||
|
||||
AssertEQ(channels, 3);
|
||||
ASSERT_EQ(3, channels);
|
||||
Mat & dst0 = dst[0], & dst1 = dst[1], & dst2 = dst[2];
|
||||
SANITY_CHECK(dst0);
|
||||
SANITY_CHECK(dst1);
|
||||
SANITY_CHECK(dst2);
|
||||
}
|
||||
#ifdef HAVE_OPENCV_GPU
|
||||
else if (impl == "gpu")
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
#endif
|
||||
else
|
||||
CV_TEST_FAIL_NO_IMPL();
|
||||
OCL_PERF_ELSE
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user