#1713 Added the possibility of setting user_block_size manually for gpu::matchTemplate function (and gpu::convolve). Added a buffer param into these functions. Removed using of 2^n block sizes when it's not necessary.

This commit is contained in:
Alexey Spizhevoy
2012-03-28 07:11:07 +00:00
parent d1423adbc7
commit c776bff95b
5 changed files with 223 additions and 189 deletions

View File

@@ -101,23 +101,23 @@ namespace cv { namespace gpu { namespace device
void matchTemplatePrepared_CCOFF_NORMED_8U(
int w, int h, const DevMem2D_<unsigned int> image_sum,
const DevMem2D_<unsigned long long> image_sqsum,
unsigned int templ_sum, unsigned int templ_sqsum,
unsigned int templ_sum, unsigned long long templ_sqsum,
DevMem2Df result, cudaStream_t stream);
void matchTemplatePrepared_CCOFF_NORMED_8UC2(
int w, int h,
const DevMem2D_<unsigned int> image_sum_r, const DevMem2D_<unsigned long long> image_sqsum_r,
const DevMem2D_<unsigned int> image_sum_g, const DevMem2D_<unsigned long long> image_sqsum_g,
unsigned int templ_sum_r, unsigned int templ_sqsum_r,
unsigned int templ_sum_g, unsigned int templ_sqsum_g,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
DevMem2Df result, cudaStream_t stream);
void matchTemplatePrepared_CCOFF_NORMED_8UC3(
int w, int h,
const DevMem2D_<unsigned int> image_sum_r, const DevMem2D_<unsigned long long> image_sqsum_r,
const DevMem2D_<unsigned int> image_sum_g, const DevMem2D_<unsigned long long> image_sqsum_g,
const DevMem2D_<unsigned int> image_sum_b, const DevMem2D_<unsigned long long> image_sqsum_b,
unsigned int templ_sum_r, unsigned int templ_sqsum_r,
unsigned int templ_sum_g, unsigned int templ_sqsum_g,
unsigned int templ_sum_b, unsigned int templ_sqsum_b,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
unsigned int templ_sum_b, unsigned long long templ_sqsum_b,
DevMem2Df result, cudaStream_t stream);
void matchTemplatePrepared_CCOFF_NORMED_8UC4(
int w, int h,
@@ -125,14 +125,14 @@ namespace cv { namespace gpu { namespace device
const DevMem2D_<unsigned int> image_sum_g, const DevMem2D_<unsigned long long> image_sqsum_g,
const DevMem2D_<unsigned int> image_sum_b, const DevMem2D_<unsigned long long> image_sqsum_b,
const DevMem2D_<unsigned int> image_sum_a, const DevMem2D_<unsigned long long> image_sqsum_a,
unsigned int templ_sum_r, unsigned int templ_sqsum_r,
unsigned int templ_sum_g, unsigned int templ_sqsum_g,
unsigned int templ_sum_b, unsigned int templ_sqsum_b,
unsigned int templ_sum_a, unsigned int templ_sqsum_a,
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,
unsigned int templ_sum_g, unsigned long long templ_sqsum_g,
unsigned int templ_sum_b, unsigned long long templ_sqsum_b,
unsigned int templ_sum_a, unsigned long long templ_sqsum_a,
DevMem2Df result, cudaStream_t stream);
void normalize_8U(int w, int h, const DevMem2D_<unsigned long long> image_sqsum,
unsigned int templ_sqsum, DevMem2Df result, int cn, cudaStream_t stream);
unsigned long long templ_sqsum, DevMem2Df result, int cn, cudaStream_t stream);
void extractFirstChannel_32F(const DevMem2Db image, DevMem2Df result, int cn, cudaStream_t stream);
}
@@ -146,20 +146,6 @@ namespace
// Evaluates optimal template's area threshold. If
// template's area is less than the threshold, we use naive match
// template version, otherwise FFT-based (if available)
int getTemplateThreshold(int method, int depth);
void matchTemplate_CCORR_32F(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_CCORR_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_CCORR_NORMED_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_SQDIFF_32F(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_SQDIFF_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_SQDIFF_NORMED_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_CCOFF_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result);
void matchTemplate_CCOFF_NORMED_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result);
int getTemplateThreshold(int method, int depth)
{
switch (method)
@@ -177,8 +163,9 @@ namespace
}
void matchTemplate_CCORR_32F(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
{
void matchTemplate_CCORR_32F(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
if (templ.size().area() < getTemplateThreshold(CV_TM_CCORR, CV_32F))
{
@@ -186,14 +173,22 @@ namespace
return;
}
GpuMat result_;
ConvolveBuf buf;
convolve(image.reshape(1), templ.reshape(1), result_, true, buf, stream);
extractFirstChannel_32F(result_, result, image.channels(), StreamAccessor::getStream(stream));
ConvolveBuf convolve_buf;
convolve_buf.user_block_size = buf.user_block_size;
if (image.channels() == 1)
convolve(image.reshape(1), templ.reshape(1), result, true, convolve_buf, stream);
else
{
GpuMat result_;
convolve(image.reshape(1), templ.reshape(1), result_, true, convolve_buf, stream);
extractFirstChannel_32F(result_, result, image.channels(), StreamAccessor::getStream(stream));
}
}
void matchTemplate_CCORR_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_CCORR_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
if (templ.size().area() < getTemplateThreshold(CV_TM_CCORR, CV_8U))
{
@@ -202,41 +197,43 @@ namespace
return;
}
GpuMat imagef, templf;
if (stream)
{
stream.enqueueConvert(image, imagef, CV_32F);
stream.enqueueConvert(templ, templf, CV_32F);
stream.enqueueConvert(image, buf.imagef, CV_32F);
stream.enqueueConvert(templ, buf.templf, CV_32F);
}
else
{
image.convertTo(imagef, CV_32F);
templ.convertTo(templf, CV_32F);
image.convertTo(buf.imagef, CV_32F);
templ.convertTo(buf.templf, CV_32F);
}
matchTemplate_CCORR_32F(imagef, templf, result, stream);
matchTemplate_CCORR_32F(buf.imagef, buf.templf, result, buf, stream);
}
void matchTemplate_CCORR_NORMED_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_CCORR_NORMED_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
matchTemplate_CCORR_8U(image, templ, result, stream);
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
GpuMat img_sqsum;
sqrIntegral(image.reshape(1), img_sqsum, stream);
buf.image_sqsums.resize(1);
sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
unsigned int templ_sqsum = (unsigned int)sqrSum(templ.reshape(1))[0];
normalize_8U(templ.cols, templ.rows, img_sqsum, templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
normalize_8U(templ.cols, templ.rows, buf.image_sqsums[0], templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
}
void matchTemplate_SQDIFF_32F(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_SQDIFF_32F(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);
matchTemplateNaive_SQDIFF_32F(image, templ, result, image.channels(), StreamAccessor::getStream(stream));
}
void matchTemplate_SQDIFF_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_SQDIFF_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
if (templ.size().area() < getTemplateThreshold(CV_TM_SQDIFF, CV_8U))
{
@@ -245,48 +242,48 @@ namespace
return;
}
GpuMat img_sqsum;
sqrIntegral(image.reshape(1), img_sqsum, stream);
buf.image_sqsums.resize(1);
sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
matchTemplate_CCORR_8U(image, templ, result, stream);
matchTemplatePrepared_SQDIFF_8U(templ.cols, templ.rows, img_sqsum, templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
matchTemplatePrepared_SQDIFF_8U(templ.cols, templ.rows, buf.image_sqsums[0], templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
}
void matchTemplate_SQDIFF_NORMED_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_SQDIFF_NORMED_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
GpuMat img_sqsum;
sqrIntegral(image.reshape(1), img_sqsum, stream);
buf.image_sqsums.resize(1);
sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
matchTemplate_CCORR_8U(image, templ, result, stream);
matchTemplatePrepared_SQDIFF_NORMED_8U(templ.cols, templ.rows, img_sqsum, templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
matchTemplatePrepared_SQDIFF_NORMED_8U(templ.cols, templ.rows, buf.image_sqsums[0], templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
}
void matchTemplate_CCOFF_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_CCOFF_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
matchTemplate_CCORR_8U(image, templ, result, stream);
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
if (image.channels() == 1)
{
GpuMat image_sum;
integral(image, image_sum, stream);
buf.image_sums.resize(1);
integral(image, buf.image_sums[0], stream);
unsigned int templ_sum = (unsigned int)sum(templ)[0];
matchTemplatePrepared_CCOFF_8U(templ.cols, templ.rows, image_sum, templ_sum, result, StreamAccessor::getStream(stream));
matchTemplatePrepared_CCOFF_8U(templ.cols, templ.rows, buf.image_sums[0], templ_sum, result, StreamAccessor::getStream(stream));
}
else
{
vector<GpuMat> images;
vector<GpuMat> image_sums(image.channels());
split(image, images);
split(image, buf.images);
buf.image_sums.resize(buf.images.size());
for (int i = 0; i < image.channels(); ++i)
integral(images[i], image_sums[i], stream);
integral(buf.images[i], buf.image_sums[i], stream);
Scalar templ_sum = sum(templ);
@@ -294,19 +291,19 @@ namespace
{
case 2:
matchTemplatePrepared_CCOFF_8UC2(
templ.cols, templ.rows, image_sums[0], image_sums[1],
templ.cols, templ.rows, buf.image_sums[0], buf.image_sums[1],
(unsigned int)templ_sum[0], (unsigned int)templ_sum[1],
result, StreamAccessor::getStream(stream));
break;
case 3:
matchTemplatePrepared_CCOFF_8UC3(
templ.cols, templ.rows, image_sums[0], image_sums[1], image_sums[2],
templ.cols, templ.rows, buf.image_sums[0], buf.image_sums[1], buf.image_sums[2],
(unsigned int)templ_sum[0], (unsigned int)templ_sum[1], (unsigned int)templ_sum[2],
result, StreamAccessor::getStream(stream));
break;
case 4:
matchTemplatePrepared_CCOFF_8UC4(
templ.cols, templ.rows, image_sums[0], image_sums[1], image_sums[2], image_sums[3],
templ.cols, templ.rows, buf.image_sums[0], buf.image_sums[1], buf.image_sums[2], buf.image_sums[3],
(unsigned int)templ_sum[0], (unsigned int)templ_sum[1], (unsigned int)templ_sum[2],
(unsigned int)templ_sum[3], result, StreamAccessor::getStream(stream));
break;
@@ -317,46 +314,45 @@ namespace
}
void matchTemplate_CCOFF_NORMED_8U(const GpuMat& image, const GpuMat& templ, GpuMat& result, Stream& stream)
void matchTemplate_CCOFF_NORMED_8U(
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
GpuMat imagef, templf;
if (stream)
{
stream.enqueueConvert(image, imagef, CV_32F);
stream.enqueueConvert(templ, templf, CV_32F);
stream.enqueueConvert(image, buf.imagef, CV_32F);
stream.enqueueConvert(templ, buf.templf, CV_32F);
}
else
{
image.convertTo(imagef, CV_32F);
templ.convertTo(templf, CV_32F);
image.convertTo(buf.imagef, CV_32F);
templ.convertTo(buf.templf, CV_32F);
}
matchTemplate_CCORR_32F(imagef, templf, result, stream);
matchTemplate_CCORR_32F(buf.imagef, buf.templf, result, buf, stream);
if (image.channels() == 1)
{
GpuMat image_sum, image_sqsum;
integral(image, image_sum, stream);
sqrIntegral(image, image_sqsum, stream);
buf.image_sums.resize(1);
integral(image, buf.image_sums[0], stream);
buf.image_sqsums.resize(1);
sqrIntegral(image, buf.image_sqsums[0], stream);
unsigned int templ_sum = (unsigned int)sum(templ)[0];
unsigned int templ_sqsum = (unsigned int)sqrSum(templ)[0];
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ)[0];
matchTemplatePrepared_CCOFF_NORMED_8U(
templ.cols, templ.rows, image_sum, image_sqsum,
templ.cols, templ.rows, buf.image_sums[0], buf.image_sqsums[0],
templ_sum, templ_sqsum, result, StreamAccessor::getStream(stream));
}
else
{
vector<GpuMat> images;
vector<GpuMat> image_sums(image.channels());
vector<GpuMat> image_sqsums(image.channels());
split(image, images);
split(image, buf.images);
buf.image_sums.resize(buf.images.size());
buf.image_sqsums.resize(buf.images.size());
for (int i = 0; i < image.channels(); ++i)
{
integral(images[i], image_sums[i], stream);
sqrIntegral(images[i], image_sqsums[i], stream);
integral(buf.images[i], buf.image_sums[i], stream);
sqrIntegral(buf.images[i], buf.image_sqsums[i], stream);
}
Scalar templ_sum = sum(templ);
@@ -367,34 +363,34 @@ namespace
case 2:
matchTemplatePrepared_CCOFF_NORMED_8UC2(
templ.cols, templ.rows,
image_sums[0], image_sqsums[0],
image_sums[1], image_sqsums[1],
(unsigned int)templ_sum[0], (unsigned int)templ_sqsum[0],
(unsigned int)templ_sum[1], (unsigned int)templ_sqsum[1],
buf.image_sums[0], buf.image_sqsums[0],
buf.image_sums[1], buf.image_sqsums[1],
(unsigned int)templ_sum[0], (unsigned long long)templ_sqsum[0],
(unsigned int)templ_sum[1], (unsigned long long)templ_sqsum[1],
result, StreamAccessor::getStream(stream));
break;
case 3:
matchTemplatePrepared_CCOFF_NORMED_8UC3(
templ.cols, templ.rows,
image_sums[0], image_sqsums[0],
image_sums[1], image_sqsums[1],
image_sums[2], image_sqsums[2],
(unsigned int)templ_sum[0], (unsigned int)templ_sqsum[0],
(unsigned int)templ_sum[1], (unsigned int)templ_sqsum[1],
(unsigned int)templ_sum[2], (unsigned int)templ_sqsum[2],
buf.image_sums[0], buf.image_sqsums[0],
buf.image_sums[1], buf.image_sqsums[1],
buf.image_sums[2], buf.image_sqsums[2],
(unsigned int)templ_sum[0], (unsigned long long)templ_sqsum[0],
(unsigned int)templ_sum[1], (unsigned long long)templ_sqsum[1],
(unsigned int)templ_sum[2], (unsigned long long)templ_sqsum[2],
result, StreamAccessor::getStream(stream));
break;
case 4:
matchTemplatePrepared_CCOFF_NORMED_8UC4(
templ.cols, templ.rows,
image_sums[0], image_sqsums[0],
image_sums[1], image_sqsums[1],
image_sums[2], image_sqsums[2],
image_sums[3], image_sqsums[3],
(unsigned int)templ_sum[0], (unsigned int)templ_sqsum[0],
(unsigned int)templ_sum[1], (unsigned int)templ_sqsum[1],
(unsigned int)templ_sum[2], (unsigned int)templ_sqsum[2],
(unsigned int)templ_sum[3], (unsigned int)templ_sqsum[3],
buf.image_sums[0], buf.image_sqsums[0],
buf.image_sums[1], buf.image_sqsums[1],
buf.image_sums[2], buf.image_sqsums[2],
buf.image_sums[3], buf.image_sqsums[3],
(unsigned int)templ_sum[0], (unsigned long long)templ_sqsum[0],
(unsigned int)templ_sum[1], (unsigned long long)templ_sqsum[1],
(unsigned int)templ_sum[2], (unsigned long long)templ_sqsum[2],
(unsigned int)templ_sum[3], (unsigned long long)templ_sqsum[3],
result, StreamAccessor::getStream(stream));
break;
default:
@@ -406,16 +402,25 @@ namespace
void cv::gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream& stream)
{
MatchTemplateBuf buf;
matchTemplate(image, templ, result, method, buf, stream);
}
void cv::gpu::matchTemplate(
const GpuMat& image, const GpuMat& templ, GpuMat& result, int method,
MatchTemplateBuf &buf, Stream& stream)
{
CV_Assert(image.type() == templ.type());
CV_Assert(image.cols >= templ.cols && image.rows >= templ.rows);
typedef void (*Caller)(const GpuMat&, const GpuMat&, GpuMat&, Stream& stream);
typedef void (*Caller)(const GpuMat&, const GpuMat&, GpuMat&, MatchTemplateBuf&, Stream& stream);
static const Caller callers8U[] = { ::matchTemplate_SQDIFF_8U, ::matchTemplate_SQDIFF_NORMED_8U,
::matchTemplate_CCORR_8U, ::matchTemplate_CCORR_NORMED_8U,
static const Caller callers8U[] = { ::matchTemplate_SQDIFF_8U, ::matchTemplate_SQDIFF_NORMED_8U,
::matchTemplate_CCORR_8U, ::matchTemplate_CCORR_NORMED_8U,
::matchTemplate_CCOFF_8U, ::matchTemplate_CCOFF_NORMED_8U };
static const Caller callers32F[] = { ::matchTemplate_SQDIFF_32F, 0,
static const Caller callers32F[] = { ::matchTemplate_SQDIFF_32F, 0,
::matchTemplate_CCORR_32F, 0, 0, 0 };
const Caller* callers = 0;
@@ -428,7 +433,7 @@ void cv::gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& re
Caller caller = callers[method];
CV_Assert(caller);
caller(image, templ, result, stream);
caller(image, templ, result, buf, stream);
}
#endif