added support of CV_TM_CCORR (via FFT) into gpu::matchTemplate (versions both with block and without blocks)
This commit is contained in:
parent
3beac049d5
commit
40304721a7
@ -112,6 +112,7 @@ target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${DEPS})
|
|||||||
|
|
||||||
if (HAVE_CUDA)
|
if (HAVE_CUDA)
|
||||||
target_link_libraries(${the_target} ${CUDA_LIBRARIES} ${CUDA_NPP_LIBRARIES})
|
target_link_libraries(${the_target} ${CUDA_LIBRARIES} ${CUDA_NPP_LIBRARIES})
|
||||||
|
CUDA_ADD_CUFFT_TO_TARGET(${the_target})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
|
@ -40,8 +40,12 @@
|
|||||||
//
|
//
|
||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
|
#include <cufft.h>
|
||||||
#include "internal_shared.hpp"
|
#include "internal_shared.hpp"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
using namespace cv::gpu;
|
using namespace cv::gpu;
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace imgproc {
|
namespace cv { namespace gpu { namespace imgproc {
|
||||||
@ -50,7 +54,7 @@ texture<unsigned char, 2> imageTex_8U;
|
|||||||
texture<unsigned char, 2> templTex_8U;
|
texture<unsigned char, 2> templTex_8U;
|
||||||
|
|
||||||
|
|
||||||
__global__ void matchTemplateKernel_8U_SqDiff(int w, int h, DevMem2Df result)
|
__global__ void matchTemplateKernel_8U_SQDIFF(int w, int h, DevMem2Df result)
|
||||||
{
|
{
|
||||||
int x = blockDim.x * blockIdx.x + threadIdx.x;
|
int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
int y = blockDim.y * blockIdx.y + threadIdx.y;
|
int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
@ -75,7 +79,7 @@ __global__ void matchTemplateKernel_8U_SqDiff(int w, int h, DevMem2Df result)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void matchTemplateCaller_8U_SqDiff(const DevMem2D image, const DevMem2D templ, DevMem2Df result)
|
void matchTemplate_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)
|
||||||
{
|
{
|
||||||
dim3 threads(32, 8);
|
dim3 threads(32, 8);
|
||||||
dim3 grid(divUp(image.cols - templ.cols + 1, threads.x),
|
dim3 grid(divUp(image.cols - templ.cols + 1, threads.x),
|
||||||
@ -87,11 +91,32 @@ void matchTemplateCaller_8U_SqDiff(const DevMem2D image, const DevMem2D templ, D
|
|||||||
imageTex_8U.filterMode = cudaFilterModePoint;
|
imageTex_8U.filterMode = cudaFilterModePoint;
|
||||||
templTex_8U.filterMode = cudaFilterModePoint;
|
templTex_8U.filterMode = cudaFilterModePoint;
|
||||||
|
|
||||||
matchTemplateKernel_8U_SqDiff<<<grid, threads>>>(templ.cols, templ.rows, result);
|
matchTemplateKernel_8U_SQDIFF<<<grid, threads>>>(templ.cols, templ.rows, result);
|
||||||
cudaSafeCall(cudaThreadSynchronize());
|
cudaSafeCall(cudaThreadSynchronize());
|
||||||
cudaSafeCall(cudaUnbindTexture(imageTex_8U));
|
cudaSafeCall(cudaUnbindTexture(imageTex_8U));
|
||||||
cudaSafeCall(cudaUnbindTexture(templTex_8U));
|
cudaSafeCall(cudaUnbindTexture(templTex_8U));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
__global__ void multiplyAndNormalizeSpectsKernel(int n, float scale, const cufftComplex* a,
|
||||||
|
const cufftComplex* b, cufftComplex* c)
|
||||||
|
{
|
||||||
|
int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||||
|
if (x < n)
|
||||||
|
{
|
||||||
|
cufftComplex v = cuCmulf(a[x], cuConjf(b[x]));
|
||||||
|
c[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void multiplyAndNormalizeSpects(int n, float scale, const cufftComplex* a, const cufftComplex* b,
|
||||||
|
cufftComplex* c)
|
||||||
|
{
|
||||||
|
dim3 threads(256);
|
||||||
|
dim3 grid(divUp(n, threads.x));
|
||||||
|
multiplyAndNormalizeSpectsKernel<<<grid, threads>>>(n, scale, a, b, c);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}}}
|
}}}
|
||||||
|
@ -41,6 +41,14 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "precomp.hpp"
|
#include "precomp.hpp"
|
||||||
|
#include <cufft.h>
|
||||||
|
#include <iostream>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
using namespace cv;
|
||||||
|
using namespace cv::gpu;
|
||||||
|
|
||||||
|
#define BLOCK_VERSION
|
||||||
|
|
||||||
#if !defined (HAVE_CUDA)
|
#if !defined (HAVE_CUDA)
|
||||||
|
|
||||||
@ -48,22 +56,207 @@ void cv::gpu::matchTemplate(const GpuMat&, const GpuMat&, GpuMat&, int) { throw_
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace imgproc {
|
namespace cv { namespace gpu { namespace imgproc
|
||||||
|
{
|
||||||
void matchTemplateCaller_8U_SqDiff(const DevMem2D, const DevMem2D, DevMem2Df);
|
void multiplyAndNormalizeSpects(int n, float scale, const cufftComplex* a,
|
||||||
|
const cufftComplex* b, cufftComplex* c);
|
||||||
|
void matchTemplate_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result);
|
||||||
}}}
|
}}}
|
||||||
|
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
template <int type, int method>
|
||||||
|
void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result);
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef BLOCK_VERSION
|
||||||
|
void estimateBlockSize(int w, int h, int tw, int th, int& bw, int& bh)
|
||||||
|
{
|
||||||
|
const int scale = 40;
|
||||||
|
const int bh_min = 1024;
|
||||||
|
const int bw_min = 1024;
|
||||||
|
bw = std::max(tw * scale, bw_min);
|
||||||
|
bh = std::max(th * scale, bh_min);
|
||||||
|
bw = std::min(bw, w);
|
||||||
|
bh = std::min(bh, h);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
template <>
|
||||||
|
void matchTemplate<CV_8U, CV_TM_SQDIFF>(const GpuMat& image, const GpuMat& templ, GpuMat& result)
|
||||||
|
{
|
||||||
|
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
|
||||||
|
imgproc::matchTemplate_8U_SQDIFF(image, templ, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef BLOCK_VERSION
|
||||||
|
template <>
|
||||||
|
void matchTemplate<CV_32F, CV_TM_CCORR>(const GpuMat& image, const GpuMat& templ, GpuMat& result)
|
||||||
|
{
|
||||||
|
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
|
||||||
|
|
||||||
|
Size block_size;
|
||||||
|
estimateBlockSize(result.cols, result.rows, templ.cols, templ.rows,
|
||||||
|
block_size.width, block_size.height);
|
||||||
|
|
||||||
|
Size dft_size;
|
||||||
|
dft_size.width = getOptimalDFTSize(block_size.width + templ.cols - 1);
|
||||||
|
dft_size.height = getOptimalDFTSize(block_size.width + templ.rows - 1);
|
||||||
|
|
||||||
|
block_size.width = std::min(dft_size.width - templ.cols + 1, result.cols);
|
||||||
|
block_size.height = std::min(dft_size.height - templ.rows + 1, result.rows);
|
||||||
|
|
||||||
|
cufftReal* image_data;
|
||||||
|
cufftReal* templ_data;
|
||||||
|
cufftReal* result_data;
|
||||||
|
cudaMalloc((void**)&image_data, sizeof(cufftReal) * dft_size.area());
|
||||||
|
cudaMalloc((void**)&templ_data, sizeof(cufftReal) * dft_size.area());
|
||||||
|
cudaMalloc((void**)&result_data, sizeof(cufftReal) * dft_size.area());
|
||||||
|
|
||||||
|
int spect_len = dft_size.height * (dft_size.width / 2 + 1);
|
||||||
|
cufftComplex* image_spect;
|
||||||
|
cufftComplex* templ_spect;
|
||||||
|
cufftComplex* result_spect;
|
||||||
|
cudaMalloc((void**)&image_spect, sizeof(cufftComplex) * spect_len);
|
||||||
|
cudaMalloc((void**)&templ_spect, sizeof(cufftComplex) * spect_len);
|
||||||
|
cudaMalloc((void**)&result_spect, sizeof(cufftComplex) * spect_len);
|
||||||
|
|
||||||
|
cufftHandle planR2C, planC2R;
|
||||||
|
CV_Assert(cufftPlan2d(&planC2R, dft_size.height, dft_size.width, CUFFT_C2R) == CUFFT_SUCCESS);
|
||||||
|
CV_Assert(cufftPlan2d(&planR2C, dft_size.height, dft_size.width, CUFFT_R2C) == CUFFT_SUCCESS);
|
||||||
|
|
||||||
|
GpuMat templ_roi(templ.size(), CV_32S, templ.data, templ.step);
|
||||||
|
GpuMat templ_block(dft_size, CV_32S, templ_data, dft_size.width * sizeof(cufftReal));
|
||||||
|
copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
|
||||||
|
templ_block.cols - templ_roi.cols, 0);
|
||||||
|
CV_Assert(cufftExecR2C(planR2C, templ_data, templ_spect) == CUFFT_SUCCESS);
|
||||||
|
|
||||||
|
GpuMat image_block(dft_size, CV_32S, image_data, dft_size.width * sizeof(cufftReal));
|
||||||
|
|
||||||
|
for (int y = 0; y < result.rows; y += block_size.height)
|
||||||
|
{
|
||||||
|
for (int x = 0; x < result.cols; x += block_size.width)
|
||||||
|
{
|
||||||
|
Size image_roi_size;
|
||||||
|
image_roi_size.width = min(x + dft_size.width, image.cols) - x;
|
||||||
|
image_roi_size.height = min(y + dft_size.height, image.rows) - y;
|
||||||
|
GpuMat image_roi(image_roi_size, CV_32S, (void*)(image.ptr<float>(y) + x), image.step);
|
||||||
|
copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows, 0,
|
||||||
|
image_block.cols - image_roi.cols, 0);
|
||||||
|
|
||||||
|
CV_Assert(cufftExecR2C(planR2C, image_data, image_spect) == CUFFT_SUCCESS);
|
||||||
|
imgproc::multiplyAndNormalizeSpects(spect_len, 1.f / dft_size.area(),
|
||||||
|
image_spect, templ_spect, result_spect);
|
||||||
|
CV_Assert(cufftExecC2R(planC2R, result_spect, result_data) == CUFFT_SUCCESS);
|
||||||
|
|
||||||
|
Size result_roi_size;
|
||||||
|
result_roi_size.width = min(x + block_size.width, result.cols) - x;
|
||||||
|
result_roi_size.height = min(y + block_size.height, result.rows) - y;
|
||||||
|
GpuMat result_roi(result_roi_size, CV_32F, (void*)(result.ptr<float>(y) + x), result.step);
|
||||||
|
GpuMat result_block(result_roi_size, CV_32F, result_data, dft_size.width * sizeof(cufftReal));
|
||||||
|
result_block.copyTo(result_roi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cufftDestroy(planR2C);
|
||||||
|
cufftDestroy(planC2R);
|
||||||
|
|
||||||
|
cudaFree(image_spect);
|
||||||
|
cudaFree(templ_spect);
|
||||||
|
cudaFree(result_spect);
|
||||||
|
cudaFree(image_data);
|
||||||
|
cudaFree(templ_data);
|
||||||
|
cudaFree(result_data);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
template <>
|
||||||
|
void matchTemplate<CV_32F, CV_TM_CCORR>(const GpuMat& image, const GpuMat& templ, GpuMat& result)
|
||||||
|
{
|
||||||
|
Size opt_size;
|
||||||
|
opt_size.width = getOptimalDFTSize(image.cols);
|
||||||
|
opt_size.height = getOptimalDFTSize(image.rows);
|
||||||
|
|
||||||
|
cufftReal* image_data;
|
||||||
|
cufftReal* templ_data;
|
||||||
|
cufftReal* result_data;
|
||||||
|
cudaMalloc((void**)&image_data, sizeof(cufftReal) * opt_size.area());
|
||||||
|
cudaMalloc((void**)&templ_data, sizeof(cufftReal) * opt_size.area());
|
||||||
|
cudaMalloc((void**)&result_data, sizeof(cufftReal) * opt_size.area());
|
||||||
|
|
||||||
|
int spect_len = opt_size.height * (opt_size.width / 2 + 1);
|
||||||
|
cufftComplex* image_spect;
|
||||||
|
cufftComplex* templ_spect;
|
||||||
|
cufftComplex* result_spect;
|
||||||
|
cudaMalloc((void**)&image_spect, sizeof(cufftComplex) * spect_len);
|
||||||
|
cudaMalloc((void**)&templ_spect, sizeof(cufftComplex) * spect_len);
|
||||||
|
cudaMalloc((void**)&result_spect, sizeof(cufftComplex) * spect_len);
|
||||||
|
|
||||||
|
GpuMat image_(image.size(), CV_32S, image.data, image.step);
|
||||||
|
GpuMat image_cont(opt_size, CV_32S, image_data, opt_size.width * sizeof(cufftReal));
|
||||||
|
copyMakeBorder(image_, image_cont, 0, image_cont.rows - image.rows, 0,
|
||||||
|
image_cont.cols - image.cols, 0);
|
||||||
|
|
||||||
|
GpuMat templ_(templ.size(), CV_32S, templ.data, templ.step);
|
||||||
|
GpuMat templ_cont(opt_size, CV_32S, templ_data, opt_size.width * sizeof(cufftReal));
|
||||||
|
copyMakeBorder(templ_, templ_cont, 0, templ_cont.rows - templ.rows, 0,
|
||||||
|
templ_cont.cols - templ.cols, 0);
|
||||||
|
|
||||||
|
cufftHandle planR2C, planC2R;
|
||||||
|
CV_Assert(cufftPlan2d(&planC2R, opt_size.height, opt_size.width, CUFFT_C2R) == CUFFT_SUCCESS);
|
||||||
|
CV_Assert(cufftPlan2d(&planR2C, opt_size.height, opt_size.width, CUFFT_R2C) == CUFFT_SUCCESS);
|
||||||
|
|
||||||
|
CV_Assert(cufftExecR2C(planR2C, image_data, image_spect) == CUFFT_SUCCESS);
|
||||||
|
CV_Assert(cufftExecR2C(planR2C, templ_data, templ_spect) == CUFFT_SUCCESS);
|
||||||
|
imgproc::multiplyAndNormalizeSpects(spect_len, 1.f / opt_size.area(),
|
||||||
|
image_spect, templ_spect, result_spect);
|
||||||
|
|
||||||
|
CV_Assert(cufftExecC2R(planC2R, result_spect, result_data) == CUFFT_SUCCESS);
|
||||||
|
|
||||||
|
cufftDestroy(planR2C);
|
||||||
|
cufftDestroy(planC2R);
|
||||||
|
|
||||||
|
GpuMat result_cont(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F,
|
||||||
|
result_data, opt_size.width * sizeof(cufftReal));
|
||||||
|
result_cont.copyTo(result);
|
||||||
|
|
||||||
|
cudaFree(image_spect);
|
||||||
|
cudaFree(templ_spect);
|
||||||
|
cudaFree(result_spect);
|
||||||
|
cudaFree(image_data);
|
||||||
|
cudaFree(templ_data);
|
||||||
|
cudaFree(result_data);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void cv::gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method)
|
void cv::gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method)
|
||||||
{
|
{
|
||||||
CV_Assert(image.type() == CV_8U);
|
|
||||||
CV_Assert(method == CV_TM_SQDIFF);
|
|
||||||
|
|
||||||
CV_Assert(image.type() == templ.type());
|
CV_Assert(image.type() == templ.type());
|
||||||
CV_Assert(image.cols >= templ.cols && image.rows >= templ.rows);
|
CV_Assert(image.cols >= templ.cols && image.rows >= templ.rows);
|
||||||
|
|
||||||
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
|
typedef void (*Caller)(const GpuMat&, const GpuMat&, GpuMat&);
|
||||||
imgproc::matchTemplateCaller_8U_SqDiff(image, templ, result);
|
|
||||||
|
static const Caller callers8U[] = { ::matchTemplate<CV_8U, CV_TM_SQDIFF>, 0, 0, 0, 0, 0 };
|
||||||
|
static const Caller callers32F[] = { 0, 0, ::matchTemplate<CV_32F, CV_TM_CCORR>, 0, 0, 0 };
|
||||||
|
|
||||||
|
const Caller* callers;
|
||||||
|
switch (image.type())
|
||||||
|
{
|
||||||
|
case CV_8U: callers = callers8U; break;
|
||||||
|
case CV_32F: callers = callers32F; break;
|
||||||
|
default: CV_Error(CV_StsBadArg, "matchTemplate: unsupported data type");
|
||||||
|
}
|
||||||
|
|
||||||
|
Caller caller = callers[method];
|
||||||
|
CV_Assert(caller);
|
||||||
|
caller(image, templ, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -41,6 +41,17 @@
|
|||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#include "gputest.hpp"
|
#include "gputest.hpp"
|
||||||
|
#include <string>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
//#define SHOW_TIME
|
||||||
|
|
||||||
|
#ifdef SHOW_TIME
|
||||||
|
#include <ctime>
|
||||||
|
#define F(x)
|
||||||
|
#else
|
||||||
|
#define F(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@ -57,40 +68,75 @@ struct CV_GpuMatchTemplateTest: CvTest
|
|||||||
Mat dst_gold;
|
Mat dst_gold;
|
||||||
gpu::GpuMat dst;
|
gpu::GpuMat dst;
|
||||||
int n, m, h, w;
|
int n, m, h, w;
|
||||||
|
F(clock_t t;)
|
||||||
|
|
||||||
for (int i = 0; i < 4; ++i)
|
for (int i = 0; i < 3; ++i)
|
||||||
{
|
{
|
||||||
n = 1 + rand() % 100;
|
n = 1 + rand() % 2000;
|
||||||
m = 1 + rand() % 100;
|
m = 1 + rand() % 1000;
|
||||||
do h = 1 + rand() % 20; while (h > n);
|
do h = 1 + rand() % 30; while (h > n);
|
||||||
do w = 1 + rand() % 20; while (w > m);
|
do w = 1 + rand() % 30; while (w > m);
|
||||||
|
|
||||||
gen(image, n, m, CV_8U);
|
gen(image, n, m, CV_8U);
|
||||||
gen(templ, h, w, CV_8U);
|
gen(templ, h, w, CV_8U);
|
||||||
|
F(t = clock();)
|
||||||
match_template_naive(image, templ, dst_gold);
|
matchTemplate(image, templ, dst_gold, CV_TM_SQDIFF);
|
||||||
|
F(cout << "cpu:" << clock() - t << endl;)
|
||||||
|
F(t = clock();)
|
||||||
gpu::matchTemplate(gpu::GpuMat(image), gpu::GpuMat(templ), dst, CV_TM_SQDIFF);
|
gpu::matchTemplate(gpu::GpuMat(image), gpu::GpuMat(templ), dst, CV_TM_SQDIFF);
|
||||||
if (!check8U(dst_gold, Mat(dst))) return;
|
F(cout << "gpu_block: " << clock() - t << endl;)
|
||||||
|
if (!check(dst_gold, Mat(dst), 5 * h * w * 1e-5f)) return;
|
||||||
|
|
||||||
|
gen(image, n, m, CV_32F);
|
||||||
|
gen(templ, h, w, CV_32F);
|
||||||
|
F(t = clock();)
|
||||||
|
matchTemplate(image, templ, dst_gold, CV_TM_CCORR);
|
||||||
|
F(cout << "cpu:" << clock() - t << endl;)
|
||||||
|
F(t = clock();)
|
||||||
|
gpu::matchTemplate(gpu::GpuMat(image), gpu::GpuMat(templ), dst, CV_TM_CCORR);
|
||||||
|
F(cout << "gpu_block: " << clock() - t << endl;)
|
||||||
|
if (!check(dst_gold, Mat(dst), 0.25f * h * w * 1e-5f)) return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const Exception& e)
|
catch (const Exception& e)
|
||||||
{
|
{
|
||||||
|
ts->printf(CvTS::CONSOLE, e.what());
|
||||||
if (!check_and_treat_gpu_exception(e, ts)) throw;
|
if (!check_and_treat_gpu_exception(e, ts)) throw;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void gen(Mat& a, int rows, int cols, int type)
|
void gen(Mat& a, int rows, int cols, int type)
|
||||||
{
|
{
|
||||||
RNG rng;
|
RNG rng;
|
||||||
a.create(rows, cols, type);
|
a.create(rows, cols, type);
|
||||||
if (type == CV_8U)
|
if (type == CV_8U)
|
||||||
rng.fill(a, RNG::UNIFORM, Scalar(0), Scalar(10));
|
rng.fill(a, RNG::UNIFORM, Scalar(0), Scalar(10));
|
||||||
|
else if (type == CV_32F)
|
||||||
|
rng.fill(a, RNG::UNIFORM, Scalar(0.f), Scalar(1.f));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Naive version for unsigned char
|
bool check(const Mat& a, const Mat& b, float max_err)
|
||||||
// Time complexity is O(a.size().area() * b.size().area()).
|
{
|
||||||
void match_template_naive(const Mat& a, const Mat& b, Mat& c)
|
if (a.size() != b.size())
|
||||||
|
{
|
||||||
|
ts->printf(CvTS::CONSOLE, "bad size");
|
||||||
|
ts->set_failed_test_info(CvTS::FAIL_INVALID_OUTPUT);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
float err = (float)norm(a, b, NORM_INF);
|
||||||
|
if (err > max_err)
|
||||||
|
{
|
||||||
|
ts->printf(CvTS::CONSOLE, "bad accuracy: %f\n", err);
|
||||||
|
ts->set_failed_test_info(CvTS::FAIL_INVALID_OUTPUT);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void match_template_naive_SQDIFF(const Mat& a, const Mat& b, Mat& c)
|
||||||
{
|
{
|
||||||
c.create(a.rows - b.rows + 1, a.cols - b.cols + 1, CV_32F);
|
c.create(a.rows - b.rows + 1, a.cols - b.cols + 1, CV_32F);
|
||||||
for (int i = 0; i < c.rows; ++i)
|
for (int i = 0; i < c.rows; ++i)
|
||||||
@ -114,32 +160,24 @@ struct CV_GpuMatchTemplateTest: CvTest
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void match_template_naive_CCORR(const Mat& a, const Mat& b, Mat& c)
|
||||||
bool check8U(const Mat& a, const Mat& b)
|
|
||||||
{
|
{
|
||||||
if (a.size() != b.size())
|
c.create(a.rows - b.rows + 1, a.cols - b.cols + 1, CV_32F);
|
||||||
|
for (int i = 0; i < c.rows; ++i)
|
||||||
{
|
{
|
||||||
ts->printf(CvTS::CONSOLE, "bad size");
|
for (int j = 0; j < c.cols; ++j)
|
||||||
ts->set_failed_test_info(CvTS::FAIL_INVALID_OUTPUT);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < a.rows; ++i)
|
|
||||||
{
|
|
||||||
for (int j = 0; j < a.cols; ++j)
|
|
||||||
{
|
{
|
||||||
float v1 = a.at<float>(i, j);
|
float sum = 0.f;
|
||||||
float v2 = b.at<float>(i, j);
|
for (int y = 0; y < b.rows; ++y)
|
||||||
if (fabs(v1 - v2) > 1e-3f)
|
|
||||||
{
|
{
|
||||||
ts->printf(CvTS::CONSOLE, "(gold)%f != %f, pos: (%d, %d) size: (%d, %d)\n",
|
const float* arow = a.ptr<float>(i + y);
|
||||||
v1, v2, j, i, a.cols, a.rows);
|
const float* brow = b.ptr<float>(y);
|
||||||
ts->set_failed_test_info(CvTS::FAIL_INVALID_OUTPUT);
|
for (int x = 0; x < b.cols; ++x)
|
||||||
return false;
|
sum += arow[j + x] * brow[x];
|
||||||
}
|
}
|
||||||
|
c.at<float>(i, j) = sum;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
} match_template_test;
|
} match_template_test;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user