added more assertion on device features to gpu functions and tests

moved TargerArchs and DeviceInfo to core
fixed bug in GpuMat::copy with mask (incorrect index in function tab)
This commit is contained in:
Vladislav Vinogradov
2012-03-27 10:34:30 +00:00
parent e8fab91d51
commit eaea6782d5
11 changed files with 1501 additions and 1619 deletions

View File

@@ -43,6 +43,138 @@
namespace {
////////////////////////////////////////////////////////////////////////////////
// Merge
PARAM_TEST_CASE(Merge, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Merge, Accuracy)
{
std::vector<cv::Mat> src;
src.reserve(channels);
for (int i = 0; i < channels; ++i)
src.push_back(cv::Mat(size, depth, cv::Scalar::all(i)));
std::vector<cv::gpu::GpuMat> d_src;
for (int i = 0; i < channels; ++i)
d_src.push_back(loadMat(src[i], useRoi));
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat dst;
cv::gpu::merge(d_src, dst);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat dst;
cv::gpu::merge(d_src, dst);
cv::Mat dst_gold;
cv::merge(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Merge, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Split
PARAM_TEST_CASE(Split, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth;
int channels;
bool useRoi;
int type;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth = GET_PARAM(2);
channels = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
type = CV_MAKE_TYPE(depth, channels);
}
};
TEST_P(Split, Accuracy)
{
cv::Mat src = randomMat(size, type);
if (depth == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
std::vector<cv::gpu::GpuMat> dst;
cv::gpu::split(loadMat(src), dst);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
std::vector<cv::gpu::GpuMat> dst;
cv::gpu::split(loadMat(src, useRoi), dst);
std::vector<cv::Mat> dst_gold;
cv::split(src, dst_gold);
ASSERT_EQ(dst_gold.size(), dst.size());
for (size_t i = 0; i < dst_gold.size(); ++i)
{
EXPECT_MAT_NEAR(dst_gold[i], dst[i], 0.0);
}
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Split, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
testing::Values(1, 2, 3, 4),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// Add_Array
@@ -1974,7 +2106,7 @@ TEST_P(AddWeighted, Accuracy)
cv::Mat dst_gold;
cv::addWeighted(src1, alpha, src2, beta, gamma, dst_gold, dst_depth);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-12);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 1.0 : 1e-3);
}
}
@@ -2487,16 +2619,32 @@ TEST_P(MeanStdDev, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
if (!supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_13))
{
try
{
cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
cv::Scalar mean;
cv::Scalar stddev;
cv::gpu::meanStdDev(loadMat(src, useRoi), mean, stddev);
cv::Scalar mean_gold;
cv::Scalar stddev_gold;
cv::meanStdDev(src, mean_gold, stddev_gold);
cv::Scalar mean_gold;
cv::Scalar stddev_gold;
cv::meanStdDev(src, mean_gold, stddev_gold);
EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Core, MeanStdDev, testing::Combine(

View File

@@ -0,0 +1,325 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace {
////////////////////////////////////////////////////////////////////////////////
// SetTo
PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(zero);
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
}
TEST_P(SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
TEST_P(SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
}
}
TEST_P(SetTo, Masked)
{
cv::Scalar val = randomScalar(0.0, 255.0);
cv::Mat mat_gold = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val, loadMat(mask));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat mat = loadMat(mat_gold, useRoi);
mat.setTo(val, loadMat(mask, useRoi));
mat_gold.setTo(val, mask);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, SetTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// CopyTo
PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
d_src.copyTo(dst);
EXPECT_MAT_NEAR(src, dst, 0.0);
}
TEST_P(CopyTo, Masked)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.copyTo(dst, loadMat(mask, useRoi));
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
d_src.copyTo(dst, loadMat(mask, useRoi));
cv::Mat dst_gold = cv::Mat::zeros(size, type);
src.copyTo(dst_gold, mask);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, CopyTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// ConvertTo
PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int depth1;
int depth2;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
depth1 = GET_PARAM(2);
depth2 = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.convertTo(dst, depth2);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
}
TEST_P(ConvertTo, WithScaling)
{
cv::Mat src = randomMat(size, depth1);
double a = randomDouble(0.0, 1.0);
double b = randomDouble(-10.0, 10.0);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
d_src.convertTo(dst, depth2, a, b);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsUnsupportedFormat, e.code);
}
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2, a, b);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2, a, b);
EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 0.0 : 1e-4);
}
}
INSTANTIATE_TEST_CASE_P(GPU_GpuMat, ConvertTo, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
ALL_DEPTH,
ALL_DEPTH,
WHOLE_SUBMAT));
} // namespace

View File

@@ -1,323 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
using namespace testing;
//#define DUMP
struct CV_GpuHogDetectTestRunner : cv::gpu::HOGDescriptor
{
void run()
{
cv::Mat img_rgb = readImage("hog/road.png");
ASSERT_FALSE(img_rgb.empty());
#ifdef DUMP
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
ASSERT_TRUE(f.is_open());
#else
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
ASSERT_TRUE(f.is_open());
#endif
// Test on color image
cv::Mat img;
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
test(img);
// Test on gray image
cv::cvtColor(img_rgb, img, CV_BGR2GRAY);
test(img);
f.close();
}
#ifdef DUMP
void dump(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)
{
f.write((char*)&block_hists.rows, sizeof(block_hists.rows));
f.write((char*)&block_hists.cols, sizeof(block_hists.cols));
for (int i = 0; i < block_hists.rows; ++i)
{
for (int j = 0; j < block_hists.cols; ++j)
{
float val = block_hists.at<float>(i, j);
f.write((char*)&val, sizeof(val));
}
}
int nlocations = locations.size();
f.write((char*)&nlocations, sizeof(nlocations));
for (int i = 0; i < locations.size(); ++i)
f.write((char*)&locations[i], sizeof(locations[i]));
}
#else
void compare(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)
{
int rows, cols;
int nlocations;
f.read((char*)&rows, sizeof(rows));
f.read((char*)&cols, sizeof(cols));
ASSERT_EQ(rows, block_hists.rows);
ASSERT_EQ(cols, block_hists.cols);
for (int i = 0; i < block_hists.rows; ++i)
{
for (int j = 0; j < block_hists.cols; ++j)
{
float val;
f.read((char*)&val, sizeof(val));
ASSERT_NEAR(val, block_hists.at<float>(i, j), 1e-3);
}
}
f.read((char*)&nlocations, sizeof(nlocations));
ASSERT_EQ(nlocations, static_cast<int>(locations.size()));
for (int i = 0; i < nlocations; ++i)
{
cv::Point location;
f.read((char*)&location, sizeof(location));
ASSERT_EQ(location, locations[i]);
}
}
#endif
void test(const cv::Mat& img)
{
cv::gpu::GpuMat d_img(img);
gamma_correction = false;
setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
//cpu detector may be updated soon
//hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
std::vector<cv::Point> locations;
// Test detect
detect(d_img, locations, 0);
#ifdef DUMP
dump(block_hists, locations);
#else
compare(cv::Mat(block_hists), locations);
#endif
// Test detect on smaller image
cv::Mat img2;
cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2));
detect(cv::gpu::GpuMat(img2), locations, 0);
#ifdef DUMP
dump(block_hists, locations);
#else
compare(cv::Mat(block_hists), locations);
#endif
// Test detect on greater image
cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2));
detect(cv::gpu::GpuMat(img2), locations, 0);
#ifdef DUMP
dump(block_hists, locations);
#else
compare(cv::Mat(block_hists), locations);
#endif
}
#ifdef DUMP
std::ofstream f;
#else
std::ifstream f;
#endif
};
struct Detect : TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(Detect, Accuracy)
{
CV_GpuHogDetectTestRunner runner;
runner.run();
}
INSTANTIATE_TEST_CASE_P(HOG, Detect, ALL_DEVICES);
struct CV_GpuHogGetDescriptorsTestRunner : cv::gpu::HOGDescriptor
{
CV_GpuHogGetDescriptorsTestRunner(): cv::gpu::HOGDescriptor(cv::Size(64, 128)) {}
void run()
{
// Load image (e.g. train data, composed from windows)
cv::Mat img_rgb = readImage("hog/train_data.png");
ASSERT_FALSE(img_rgb.empty());
// Convert to C4
cv::Mat img;
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
cv::gpu::GpuMat d_img(img);
// Convert train images into feature vectors (train table)
cv::gpu::GpuMat descriptors, descriptors_by_cols;
getDescriptors(d_img, win_size, descriptors, DESCR_FORMAT_ROW_BY_ROW);
getDescriptors(d_img, win_size, descriptors_by_cols, DESCR_FORMAT_COL_BY_COL);
// Check size of the result train table
wins_per_img_x = 3;
wins_per_img_y = 2;
blocks_per_win_x = 7;
blocks_per_win_y = 15;
block_hist_size = 36;
cv::Size descr_size_expected = cv::Size(blocks_per_win_x * blocks_per_win_y * block_hist_size,
wins_per_img_x * wins_per_img_y);
ASSERT_EQ(descr_size_expected, descriptors.size());
// Check both formats of output descriptors are handled correctly
cv::Mat dr(descriptors);
cv::Mat dc(descriptors_by_cols);
for (int i = 0; i < wins_per_img_x * wins_per_img_y; ++i)
{
const float* l = dr.rowRange(i, i + 1).ptr<float>();
const float* r = dc.rowRange(i, i + 1).ptr<float>();
for (int y = 0; y < blocks_per_win_y; ++y)
for (int x = 0; x < blocks_per_win_x; ++x)
for (int k = 0; k < block_hist_size; ++k)
ASSERT_EQ(l[(y * blocks_per_win_x + x) * block_hist_size + k],
r[(x * blocks_per_win_y + y) * block_hist_size + k]);
}
/* Now we want to extract the same feature vectors, but from single images. NOTE: results will
be defferent, due to border values interpolation. Using of many small images is slower, however we
wont't call getDescriptors and will use computeBlockHistograms instead of. computeBlockHistograms
works good, it can be checked in the gpu_hog sample */
img_rgb = readImage("hog/positive1.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
// Everything is fine with interpolation for left top subimage
ASSERT_EQ(0.0, cv::norm((cv::Mat)block_hists, (cv::Mat)descriptors.rowRange(0, 1)));
img_rgb = readImage("hog/positive2.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(1, 2)));
img_rgb = readImage("hog/negative1.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(2, 3)));
img_rgb = readImage("hog/negative2.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(3, 4)));
img_rgb = readImage("hog/positive3.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(4, 5)));
img_rgb = readImage("hog/negative3.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
// Does not compare border value, as interpolation leads to delta
void compare_inner_parts(cv::Mat d1, cv::Mat d2)
{
for (int i = 1; i < blocks_per_win_y - 1; ++i)
for (int j = 1; j < blocks_per_win_x - 1; ++j)
for (int k = 0; k < block_hist_size; ++k)
{
float a = d1.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
float b = d2.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
ASSERT_FLOAT_EQ(a, b);
}
}
int wins_per_img_x;
int wins_per_img_y;
int blocks_per_win_x;
int blocks_per_win_y;
int block_hist_size;
};
struct GetDescriptors : TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(GetDescriptors, Accuracy)
{
CV_GpuHogGetDescriptorsTestRunner runner;
runner.run();
}
INSTANTIATE_TEST_CASE_P(HOG, GetDescriptors, ALL_DEVICES);
#endif // HAVE_CUDA

View File

@@ -1,559 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
using namespace testing;
////////////////////////////////////////////////////////////////////////////////
// merge
PARAM_TEST_CASE(Merge, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
std::vector<cv::Mat> src;
cv::Mat dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
int depth = CV_MAT_DEPTH(type);
int num_channels = CV_MAT_CN(type);
src.reserve(num_channels);
for (int i = 0; i < num_channels; ++i)
src.push_back(cv::Mat(size, depth, cv::Scalar::all(i)));
cv::merge(src, dst_gold);
}
};
TEST_P(Merge, Accuracy)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst;
std::vector<cv::gpu::GpuMat> dev_src;
cv::gpu::GpuMat dev_dst;
for (size_t i = 0; i < src.size(); ++i)
dev_src.push_back(loadMat(src[i], useRoi));
cv::gpu::merge(dev_src, dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, Merge, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// split
PARAM_TEST_CASE(Split, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat src;
std::vector<cv::Mat> dst_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src.create(size, type);
src.setTo(cv::Scalar(1.0, 2.0, 3.0, 4.0));
cv::split(src, dst_gold);
}
};
TEST_P(Split, Accuracy)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
std::vector<cv::Mat> dst;
std::vector<cv::gpu::GpuMat> dev_dst;
cv::gpu::split(loadMat(src, useRoi), dev_dst);
dst.resize(dev_dst.size());
for (size_t i = 0; i < dev_dst.size(); ++i)
dev_dst[i].download(dst[i]);
ASSERT_EQ(dst_gold.size(), dst.size());
for (size_t i = 0; i < dst_gold.size(); ++i)
{
EXPECT_MAT_NEAR(dst_gold[i], dst[i], 0.0);
}
}
INSTANTIATE_TEST_CASE_P(MatOp, Split, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// split_merge_consistency
PARAM_TEST_CASE(SplitMerge, cv::gpu::DeviceInfo, MatType)
{
cv::gpu::DeviceInfo devInfo;
int type;
cv::Size size;
cv::Mat orig;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
orig.create(size, type);
orig.setTo(cv::Scalar(1.0, 2.0, 3.0, 4.0));
}
};
TEST_P(SplitMerge, Consistency)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat final;
std::vector<cv::gpu::GpuMat> dev_vec;
cv::gpu::GpuMat dev_final;
cv::gpu::split(loadMat(orig), dev_vec);
cv::gpu::merge(dev_vec, dev_final);
dev_final.download(final);
EXPECT_MAT_NEAR(orig, final, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, SplitMerge, Combine(
ALL_DEVICES,
ALL_TYPES));
////////////////////////////////////////////////////////////////////////////////
// setTo
PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat mat_gold;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
mat_gold.create(size, type);
}
};
TEST_P(SetTo, Zero)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar zero = cv::Scalar::all(0);
cv::Mat mat;
cv::gpu::GpuMat dev_mat = loadMat(mat_gold, useRoi);
mat_gold.setTo(zero);
dev_mat.setTo(zero);
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
TEST_P(SetTo, SameVal)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar s = cv::Scalar::all(1);
cv::Mat mat;
cv::gpu::GpuMat dev_mat(mat_gold);
mat_gold.setTo(s);
dev_mat.setTo(s);
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
TEST_P(SetTo, DifferentVal)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar s = cv::Scalar(1, 2, 3, 4);
cv::Mat mat;
cv::gpu::GpuMat dev_mat = loadMat(mat_gold, useRoi);
mat_gold.setTo(s);
dev_mat.setTo(s);
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
TEST_P(SetTo, Masked)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Scalar s = cv::Scalar(1, 2, 3, 4);
cv::RNG& rng = TS::ptr()->get_rng();
cv::Mat mask = randomMat(rng, mat_gold.size(), CV_8UC1, 0.0, 1.5, false);
cv::Mat mat;
cv::gpu::GpuMat dev_mat = loadMat(mat_gold, useRoi);
mat_gold.setTo(s, mask);
dev_mat.setTo(s, loadMat(mask, useRoi));
dev_mat.download(mat);
EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, SetTo, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// copyTo
PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int type;
bool useRoi;
cv::Size size;
cv::Mat src;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
useRoi = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src = randomMat(rng, size, type, 0.0, 127.0, false);
}
};
TEST_P(CopyTo, WithoutMask)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold;
src.copyTo(dst_gold);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst = loadMat(src, useRoi);
dev_src.copyTo(dev_dst);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(CopyTo, Masked)
{
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::RNG& rng = TS::ptr()->get_rng();
cv::Mat mask = randomMat(rng, src.size(), CV_8UC1, 0.0, 2.0, false);
cv::Mat zeroMat(src.size(), src.type(), cv::Scalar::all(0));
cv::Mat dst_gold = zeroMat.clone();
src.copyTo(dst_gold, mask);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst = loadMat(zeroMat, useRoi);
dev_src.copyTo(dev_dst, loadMat(mask, useRoi));
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, CopyTo, Combine(
ALL_DEVICES,
ALL_TYPES,
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// convertTo
PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, MatType, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int depth1;
int depth2;
bool useRoi;
cv::Size size;
cv::Mat src;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
depth1 = GET_PARAM(1);
depth2 = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
size = cv::Size(rng.uniform(20, 150), rng.uniform(20, 150));
src = randomMat(rng, size, depth1, 0.0, 127.0, false);
}
};
TEST_P(ConvertTo, WithoutScaling)
{
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst;
dev_src.convertTo(dev_dst, depth2);
dev_dst.download(dst);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
TEST_P(ConvertTo, WithScaling)
{
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
return;
cv::RNG& rng = TS::ptr()->get_rng();
const double a = rng.uniform(0.0, 1.0);
const double b = rng.uniform(-10.0, 10.0);
cv::Mat dst_gold;
src.convertTo(dst_gold, depth2, a, b);
cv::Mat dst;
cv::gpu::GpuMat dev_src = loadMat(src, useRoi);
cv::gpu::GpuMat dev_dst;
dev_src.convertTo(dev_dst, depth2, a, b);
dev_dst.download(dst);
const double eps = depth2 < CV_32F ? 1 : 1e-4;
EXPECT_MAT_NEAR(dst_gold, dst, eps);
}
INSTANTIATE_TEST_CASE_P(MatOp, ConvertTo, Combine(
ALL_DEVICES,
TYPES(CV_8U, CV_64F, 1, 1),
TYPES(CV_8U, CV_64F, 1, 1),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// async
struct Async : TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::gpu::CudaMem src;
cv::Mat dst_gold0;
cv::Mat dst_gold1;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::RNG& rng = TS::ptr()->get_rng();
int rows = rng.uniform(100, 200);
int cols = rng.uniform(100, 200);
src = cv::gpu::CudaMem(cv::Mat::zeros(rows, cols, CV_8UC1));
dst_gold0 = cv::Mat(rows, cols, CV_8UC1, cv::Scalar::all(255));
dst_gold1 = cv::Mat(rows, cols, CV_8UC1, cv::Scalar::all(128));
}
};
TEST_P(Async, Accuracy)
{
cv::Mat dst0, dst1;
cv::gpu::CudaMem cpudst0;
cv::gpu::CudaMem cpudst1;
cv::gpu::GpuMat gpusrc;
cv::gpu::GpuMat gpudst0;
cv::gpu::GpuMat gpudst1(src.rows, src.cols, CV_8UC1);
cv::gpu::Stream stream0;
cv::gpu::Stream stream1;
stream0.enqueueUpload(src, gpusrc);
cv::gpu::bitwise_not(gpusrc, gpudst0, cv::gpu::GpuMat(), stream0);
stream0.enqueueDownload(gpudst0, cpudst0);
stream1.enqueueMemSet(gpudst1, cv::Scalar::all(128));
stream1.enqueueDownload(gpudst1, cpudst1);
stream0.waitForCompletion();
stream1.waitForCompletion();
dst0 = cpudst0.createMatHeader();
dst1 = cpudst1.createMatHeader();
EXPECT_MAT_NEAR(dst_gold0, dst0, 0.0);
EXPECT_MAT_NEAR(dst_gold1, dst1, 0.0);
}
INSTANTIATE_TEST_CASE_P(MatOp, Async, ALL_DEVICES);
#endif // HAVE_CUDA

View File

@@ -0,0 +1,287 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace {
//#define DUMP
struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
{
cv::gpu::DeviceInfo devInfo;
#ifdef DUMP
std::ofstream f;
#else
std::ifstream f;
#endif
int wins_per_img_x;
int wins_per_img_y;
int blocks_per_win_x;
int blocks_per_win_y;
int block_hist_size;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
#ifdef DUMP
void dump(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)
{
f.write((char*)&block_hists.rows, sizeof(block_hists.rows));
f.write((char*)&block_hists.cols, sizeof(block_hists.cols));
for (int i = 0; i < block_hists.rows; ++i)
{
for (int j = 0; j < block_hists.cols; ++j)
{
float val = block_hists.at<float>(i, j);
f.write((char*)&val, sizeof(val));
}
}
int nlocations = locations.size();
f.write((char*)&nlocations, sizeof(nlocations));
for (int i = 0; i < locations.size(); ++i)
f.write((char*)&locations[i], sizeof(locations[i]));
}
#else
void compare(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)
{
int rows, cols;
f.read((char*)&rows, sizeof(rows));
f.read((char*)&cols, sizeof(cols));
ASSERT_EQ(rows, block_hists.rows);
ASSERT_EQ(cols, block_hists.cols);
for (int i = 0; i < block_hists.rows; ++i)
{
for (int j = 0; j < block_hists.cols; ++j)
{
float val;
f.read((char*)&val, sizeof(val));
ASSERT_NEAR(val, block_hists.at<float>(i, j), 1e-3);
}
}
int nlocations;
f.read((char*)&nlocations, sizeof(nlocations));
ASSERT_EQ(nlocations, static_cast<int>(locations.size()));
for (int i = 0; i < nlocations; ++i)
{
cv::Point location;
f.read((char*)&location, sizeof(location));
ASSERT_EQ(location, locations[i]);
}
}
#endif
void testDetect(const cv::Mat& img)
{
gamma_correction = false;
setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
std::vector<cv::Point> locations;
// Test detect
detect(loadMat(img), locations, 0);
#ifdef DUMP
dump(cv::Mat(block_hists), locations);
#else
compare(cv::Mat(block_hists), locations);
#endif
// Test detect on smaller image
cv::Mat img2;
cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2));
detect(loadMat(img2), locations, 0);
#ifdef DUMP
dump(cv::Mat(block_hists), locations);
#else
compare(cv::Mat(block_hists), locations);
#endif
// Test detect on greater image
cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2));
detect(loadMat(img2), locations, 0);
#ifdef DUMP
dump(cv::Mat(block_hists), locations);
#else
compare(cv::Mat(block_hists), locations);
#endif
}
// Does not compare border value, as interpolation leads to delta
void compare_inner_parts(cv::Mat d1, cv::Mat d2)
{
for (int i = 1; i < blocks_per_win_y - 1; ++i)
for (int j = 1; j < blocks_per_win_x - 1; ++j)
for (int k = 0; k < block_hist_size; ++k)
{
float a = d1.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
float b = d2.at<float>(0, (i * blocks_per_win_x + j) * block_hist_size);
ASSERT_FLOAT_EQ(a, b);
}
}
};
TEST_P(HOG, Detect)
{
cv::Mat img_rgb = readImage("hog/road.png");
ASSERT_FALSE(img_rgb.empty());
#ifdef DUMP
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
ASSERT_TRUE(f.is_open());
#else
f.open((std::string(cvtest::TS::ptr()->get_data_path()) + "hog/expected_output.bin").c_str(), std::ios_base::binary);
ASSERT_TRUE(f.is_open());
#endif
// Test on color image
cv::Mat img;
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
testDetect(img);
// Test on gray image
cv::cvtColor(img_rgb, img, CV_BGR2GRAY);
testDetect(img);
f.close();
}
TEST_P(HOG, GetDescriptors)
{
// Load image (e.g. train data, composed from windows)
cv::Mat img_rgb = readImage("hog/train_data.png");
ASSERT_FALSE(img_rgb.empty());
// Convert to C4
cv::Mat img;
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
cv::gpu::GpuMat d_img(img);
// Convert train images into feature vectors (train table)
cv::gpu::GpuMat descriptors, descriptors_by_cols;
getDescriptors(d_img, win_size, descriptors, DESCR_FORMAT_ROW_BY_ROW);
getDescriptors(d_img, win_size, descriptors_by_cols, DESCR_FORMAT_COL_BY_COL);
// Check size of the result train table
wins_per_img_x = 3;
wins_per_img_y = 2;
blocks_per_win_x = 7;
blocks_per_win_y = 15;
block_hist_size = 36;
cv::Size descr_size_expected = cv::Size(blocks_per_win_x * blocks_per_win_y * block_hist_size,
wins_per_img_x * wins_per_img_y);
ASSERT_EQ(descr_size_expected, descriptors.size());
// Check both formats of output descriptors are handled correctly
cv::Mat dr(descriptors);
cv::Mat dc(descriptors_by_cols);
for (int i = 0; i < wins_per_img_x * wins_per_img_y; ++i)
{
const float* l = dr.rowRange(i, i + 1).ptr<float>();
const float* r = dc.rowRange(i, i + 1).ptr<float>();
for (int y = 0; y < blocks_per_win_y; ++y)
for (int x = 0; x < blocks_per_win_x; ++x)
for (int k = 0; k < block_hist_size; ++k)
ASSERT_EQ(l[(y * blocks_per_win_x + x) * block_hist_size + k],
r[(x * blocks_per_win_y + y) * block_hist_size + k]);
}
/* Now we want to extract the same feature vectors, but from single images. NOTE: results will
be defferent, due to border values interpolation. Using of many small images is slower, however we
wont't call getDescriptors and will use computeBlockHistograms instead of. computeBlockHistograms
works good, it can be checked in the gpu_hog sample */
img_rgb = readImage("hog/positive1.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
// Everything is fine with interpolation for left top subimage
ASSERT_EQ(0.0, cv::norm((cv::Mat)block_hists, (cv::Mat)descriptors.rowRange(0, 1)));
img_rgb = readImage("hog/positive2.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(1, 2)));
img_rgb = readImage("hog/negative1.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(2, 3)));
img_rgb = readImage("hog/negative2.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(3, 4)));
img_rgb = readImage("hog/positive3.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(4, 5)));
img_rgb = readImage("hog/negative3.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, CV_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, HOG, ALL_DEVICES);
} // namespace