Merge branch 'master' of https://github.com/Itseez/opencv
Conflicts: modules/imgproc/src/sumpixels.cpp
This commit is contained in:
145
modules/imgproc/src/blend.cpp
Normal file
145
modules/imgproc/src/blend.cpp
Normal file
@@ -0,0 +1,145 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Nathan, liujun@multicorewareinc.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels.hpp"
|
||||
|
||||
namespace cv {
|
||||
|
||||
template <typename T>
|
||||
class BlendLinearInvoker :
|
||||
public ParallelLoopBody
|
||||
{
|
||||
public:
|
||||
BlendLinearInvoker(const Mat & _src1, const Mat & _src2, const Mat & _weights1,
|
||||
const Mat & _weights2, Mat & _dst) :
|
||||
src1(&_src1), src2(&_src2), weights1(&_weights1), weights2(&_weights2), dst(&_dst)
|
||||
{
|
||||
}
|
||||
|
||||
virtual void operator() (const Range & range) const
|
||||
{
|
||||
int cn = src1->channels(), width = src1->cols * cn;
|
||||
|
||||
for (int y = range.start; y < range.end; ++y)
|
||||
{
|
||||
const float * const weights1_row = weights1->ptr<float>(y);
|
||||
const float * const weights2_row = weights2->ptr<float>(y);
|
||||
const T * const src1_row = src1->ptr<T>(y);
|
||||
const T * const src2_row = src2->ptr<T>(y);
|
||||
T * const dst_row = dst->ptr<T>(y);
|
||||
|
||||
for (int x = 0; x < width; ++x)
|
||||
{
|
||||
int x1 = x / cn;
|
||||
float w1 = weights1_row[x1], w2 = weights2_row[x1];
|
||||
float den = (w1 + w2 + 1e-5f);
|
||||
float num = (src1_row[x] * w1 + src2_row[x] * w2);
|
||||
|
||||
dst_row[x] = saturate_cast<T>(num / den);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
const BlendLinearInvoker & operator= (const BlendLinearInvoker &);
|
||||
BlendLinearInvoker(const BlendLinearInvoker &);
|
||||
|
||||
const Mat * src1, * src2, * weights1, * weights2;
|
||||
Mat * dst;
|
||||
};
|
||||
|
||||
static bool ocl_blendLinear( InputArray _src1, InputArray _src2, InputArray _weights1, InputArray _weights2, OutputArray _dst )
|
||||
{
|
||||
int type = _src1.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
|
||||
|
||||
char cvt[30];
|
||||
ocl::Kernel k("blendLinear", ocl::imgproc::blend_linear_oclsrc,
|
||||
format("-D T=%s -D cn=%d -D convertToT=%s", ocl::typeToStr(depth),
|
||||
cn, ocl::convertTypeStr(CV_32F, depth, 1, cvt)));
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
UMat src1 = _src1.getUMat(), src2 = _src2.getUMat(), weights1 = _weights1.getUMat(),
|
||||
weights2 = _weights2.getUMat(), dst = _dst.getUMat();
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnlyNoSize(src1), ocl::KernelArg::ReadOnlyNoSize(src2),
|
||||
ocl::KernelArg::ReadOnlyNoSize(weights1), ocl::KernelArg::ReadOnlyNoSize(weights2),
|
||||
ocl::KernelArg::WriteOnly(dst));
|
||||
|
||||
size_t globalsize[2] = { dst.cols, dst.rows };
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void cv::blendLinear( InputArray _src1, InputArray _src2, InputArray _weights1, InputArray _weights2, OutputArray _dst )
|
||||
{
|
||||
int type = _src1.type(), depth = CV_MAT_DEPTH(type);
|
||||
Size size = _src1.size();
|
||||
|
||||
CV_Assert(depth == CV_8U || depth == CV_32F);
|
||||
CV_Assert(size == _src2.size() && size == _weights1.size() && size == _weights2.size());
|
||||
CV_Assert(type == _src2.type() && _weights1.type() == CV_32FC1 && _weights2.type() == CV_32FC1);
|
||||
|
||||
_dst.create(size, type);
|
||||
|
||||
if (ocl::useOpenCL() && _dst.isUMat() && ocl_blendLinear(_src1, _src2, _weights1, _weights2, _dst))
|
||||
return;
|
||||
|
||||
Mat src1 = _src1.getMat(), src2 = _src2.getMat(), weights1 = _weights1.getMat(),
|
||||
weights2 = _weights2.getMat(), dst = _dst.getMat();
|
||||
|
||||
if (depth == CV_8U)
|
||||
{
|
||||
BlendLinearInvoker<uchar> invoker(src1, src2, weights1, weights2, dst);
|
||||
parallel_for_(Range(0, src1.rows), invoker, dst.total()/(double)(1<<16));
|
||||
}
|
||||
else if (depth == CV_32F)
|
||||
{
|
||||
BlendLinearInvoker<float> invoker(src1, src2, weights1, weights2, dst);
|
||||
parallel_for_(Range(0, src1.rows), invoker, dst.total()/(double)(1<<16));
|
||||
}
|
||||
}
|
@@ -413,15 +413,15 @@ static bool IPPDeriv(const Mat& src, Mat& dst, int ddepth, int dx, int dy, int k
|
||||
void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
|
||||
int ksize, double scale, double delta, int borderType )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
|
||||
if (ddepth < 0)
|
||||
ddepth = src.depth();
|
||||
_dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
|
||||
Mat dst = _dst.getMat();
|
||||
ddepth = sdepth;
|
||||
_dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (scale == 1.0 && delta == 0)
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
if (ksize == 3 && tegra::sobel3x3(src, dst, dx, dy, borderType))
|
||||
return;
|
||||
if (ksize == -1 && tegra::scharr(src, dst, dx, dy, borderType))
|
||||
@@ -430,13 +430,14 @@ void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
|
||||
#endif
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
if(dx < 3 && dy < 3 && src.channels() == 1 && borderType == 1)
|
||||
if(dx < 3 && dy < 3 && cn == 1 && borderType == BORDER_REPLICATE)
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
if(IPPDeriv(src, dst, ddepth, dx, dy, ksize,scale))
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
int ktype = std::max(CV_32F, std::max(ddepth, src.depth()));
|
||||
int ktype = std::max(CV_32F, std::max(ddepth, sdepth));
|
||||
|
||||
Mat kx, ky;
|
||||
getDerivKernels( kx, ky, dx, dy, ksize, false, ktype );
|
||||
@@ -449,33 +450,36 @@ void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
|
||||
else
|
||||
ky *= scale;
|
||||
}
|
||||
sepFilter2D( src, dst, ddepth, kx, ky, Point(-1,-1), delta, borderType );
|
||||
sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
|
||||
}
|
||||
|
||||
|
||||
void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
|
||||
double scale, double delta, int borderType )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
|
||||
if (ddepth < 0)
|
||||
ddepth = src.depth();
|
||||
_dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
|
||||
Mat dst = _dst.getMat();
|
||||
ddepth = sdepth;
|
||||
_dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (scale == 1.0 && delta == 0)
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
if (tegra::scharr(src, dst, dx, dy, borderType))
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
if(dx < 2 && dy < 2 && src.channels() == 1 && borderType == 1)
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
if(IPPDerivScharr(src, dst, ddepth, dx, dy, scale))
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
int ktype = std::max(CV_32F, std::max(ddepth, src.depth()));
|
||||
int ktype = std::max(CV_32F, std::max(ddepth, sdepth));
|
||||
|
||||
Mat kx, ky;
|
||||
getScharrKernels( kx, ky, dx, dy, false, ktype );
|
||||
@@ -488,22 +492,22 @@ void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
|
||||
else
|
||||
ky *= scale;
|
||||
}
|
||||
sepFilter2D( src, dst, ddepth, kx, ky, Point(-1,-1), delta, borderType );
|
||||
sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
|
||||
}
|
||||
|
||||
|
||||
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
|
||||
double scale, double delta, int borderType )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
|
||||
if (ddepth < 0)
|
||||
ddepth = src.depth();
|
||||
_dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );
|
||||
Mat dst = _dst.getMat();
|
||||
ddepth = sdepth;
|
||||
_dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (scale == 1.0 && delta == 0)
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
if (ksize == 1 && tegra::laplace1(src, dst, borderType))
|
||||
return;
|
||||
if (ksize == 3 && tegra::laplace3(src, dst, borderType))
|
||||
@@ -516,15 +520,18 @@ void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
|
||||
if( ksize == 1 || ksize == 3 )
|
||||
{
|
||||
float K[2][9] =
|
||||
{{0, 1, 0, 1, -4, 1, 0, 1, 0},
|
||||
{2, 0, 2, 0, -8, 0, 2, 0, 2}};
|
||||
{
|
||||
{ 0, 1, 0, 1, -4, 1, 0, 1, 0 },
|
||||
{ 2, 0, 2, 0, -8, 0, 2, 0, 2 }
|
||||
};
|
||||
Mat kernel(3, 3, CV_32F, K[ksize == 3]);
|
||||
if( scale != 1 )
|
||||
kernel *= scale;
|
||||
filter2D( src, dst, ddepth, kernel, Point(-1,-1), delta, borderType );
|
||||
filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType );
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
const size_t STRIPE_SIZE = 1 << 14;
|
||||
|
||||
int depth = src.depth();
|
||||
|
@@ -41,6 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels.hpp"
|
||||
|
||||
/****************************************************************************************\
|
||||
Base Image Filter
|
||||
@@ -3115,6 +3116,206 @@ template<typename ST, class CastOp, class VecOp> struct Filter2D : public BaseFi
|
||||
|
||||
}
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
#define DIVUP(total, grain) (((total) + (grain) - 1) / (grain))
|
||||
#define ROUNDUP(sz, n) ((sz) + (n) - 1 - (((sz) + (n) - 1) % (n)))
|
||||
|
||||
// prepare kernel: transpose and make double rows (+align). Returns size of aligned row
|
||||
// Samples:
|
||||
// a b c
|
||||
// Input: d e f
|
||||
// g h i
|
||||
// Output, last two zeros is the alignment:
|
||||
// a d g a d g 0 0
|
||||
// b e h b e h 0 0
|
||||
// c f i c f i 0 0
|
||||
template <typename T>
|
||||
static int _prepareKernelFilter2D(std::vector<T>& data, const Mat &kernel)
|
||||
{
|
||||
Mat _kernel; kernel.convertTo(_kernel, DataDepth<T>::value);
|
||||
int size_y_aligned = ROUNDUP(kernel.rows * 2, 4);
|
||||
data.clear(); data.resize(size_y_aligned * kernel.cols, 0);
|
||||
for (int x = 0; x < kernel.cols; x++)
|
||||
{
|
||||
for (int y = 0; y < kernel.rows; y++)
|
||||
{
|
||||
data[x * size_y_aligned + y] = _kernel.at<T>(y, x);
|
||||
data[x * size_y_aligned + y + kernel.rows] = _kernel.at<T>(y, x);
|
||||
}
|
||||
}
|
||||
return size_y_aligned;
|
||||
}
|
||||
|
||||
static bool ocl_filter2D( InputArray _src, OutputArray _dst, int ddepth,
|
||||
InputArray _kernel, Point anchor,
|
||||
double delta, int borderType )
|
||||
{
|
||||
if (abs(delta) > FLT_MIN)
|
||||
return false;
|
||||
|
||||
int type = _src.type();
|
||||
int cn = CV_MAT_CN(type);
|
||||
if ((1 != cn) && (2 != cn) && (4 != cn))
|
||||
return false;//TODO
|
||||
|
||||
int sdepth = CV_MAT_DEPTH(type);
|
||||
Size ksize = _kernel.size();
|
||||
if( anchor.x < 0 )
|
||||
anchor.x = ksize.width / 2;
|
||||
if( anchor.y < 0 )
|
||||
anchor.y = ksize.height / 2;
|
||||
if( ddepth < 0 )
|
||||
ddepth = sdepth;
|
||||
else if (ddepth != sdepth)
|
||||
return false;
|
||||
|
||||
bool isIsolatedBorder = (borderType & BORDER_ISOLATED) != 0;
|
||||
bool useDouble = (CV_64F == sdepth);
|
||||
const cv::ocl::Device &device = cv::ocl::Device::getDefault();
|
||||
int doubleFPConfig = device.doubleFPConfig();
|
||||
if (useDouble && (0 == doubleFPConfig))
|
||||
return false;
|
||||
|
||||
const char* btype = NULL;
|
||||
switch (borderType & ~BORDER_ISOLATED)
|
||||
{
|
||||
case BORDER_CONSTANT:
|
||||
btype = "BORDER_CONSTANT";
|
||||
break;
|
||||
case BORDER_REPLICATE:
|
||||
btype = "BORDER_REPLICATE";
|
||||
break;
|
||||
case BORDER_REFLECT:
|
||||
btype = "BORDER_REFLECT";
|
||||
break;
|
||||
case BORDER_WRAP:
|
||||
return false;
|
||||
case BORDER_REFLECT101:
|
||||
btype = "BORDER_REFLECT_101";
|
||||
break;
|
||||
}
|
||||
|
||||
cv::Mat kernelMat = _kernel.getMat();
|
||||
std::vector<float> kernelMatDataFloat;
|
||||
std::vector<double> kernelMatDataDouble;
|
||||
int kernel_size_y2_aligned = useDouble ?
|
||||
_prepareKernelFilter2D<double>(kernelMatDataDouble, kernelMat)
|
||||
: _prepareKernelFilter2D<float>(kernelMatDataFloat, kernelMat);
|
||||
|
||||
|
||||
cv::Size sz = _src.size();
|
||||
size_t globalsize[2] = {sz.width, sz.height};
|
||||
size_t localsize[2] = {0, 1};
|
||||
|
||||
ocl::Kernel kernel;
|
||||
UMat src; Size wholeSize;
|
||||
if (!isIsolatedBorder)
|
||||
{
|
||||
src = _src.getUMat();
|
||||
Point ofs;
|
||||
src.locateROI(wholeSize, ofs);
|
||||
}
|
||||
|
||||
size_t maxWorkItemSizes[32]; device.maxWorkItemSizes(maxWorkItemSizes);
|
||||
size_t tryWorkItems = maxWorkItemSizes[0];
|
||||
for (;;)
|
||||
{
|
||||
size_t BLOCK_SIZE = tryWorkItems;
|
||||
while (BLOCK_SIZE > 32 && BLOCK_SIZE >= (size_t)ksize.width * 2 && BLOCK_SIZE > (size_t)sz.width * 2)
|
||||
BLOCK_SIZE /= 2;
|
||||
#if 1 // TODO Mode with several blocks requires a much more VGPRs, so this optimization is not actual for the current devices
|
||||
size_t BLOCK_SIZE_Y = 1;
|
||||
#else
|
||||
size_t BLOCK_SIZE_Y = 8; // TODO Check heuristic value on devices
|
||||
while (BLOCK_SIZE_Y < BLOCK_SIZE / 8 && BLOCK_SIZE_Y * src.clCxt->getDeviceInfo().maxComputeUnits * 32 < (size_t)src.rows)
|
||||
BLOCK_SIZE_Y *= 2;
|
||||
#endif
|
||||
|
||||
if ((size_t)ksize.width > BLOCK_SIZE)
|
||||
return false;
|
||||
|
||||
int requiredTop = anchor.y;
|
||||
int requiredLeft = (int)BLOCK_SIZE; // not this: anchor.x;
|
||||
int requiredBottom = ksize.height - 1 - anchor.y;
|
||||
int requiredRight = (int)BLOCK_SIZE; // not this: ksize.width - 1 - anchor.x;
|
||||
int h = isIsolatedBorder ? sz.height : wholeSize.height;
|
||||
int w = isIsolatedBorder ? sz.width : wholeSize.width;
|
||||
bool extra_extrapolation = h < requiredTop || h < requiredBottom || w < requiredLeft || w < requiredRight;
|
||||
|
||||
if ((w < ksize.width) || (h < ksize.height))
|
||||
return false;
|
||||
|
||||
char build_options[1024];
|
||||
sprintf(build_options, "-D LOCAL_SIZE=%d -D BLOCK_SIZE_Y=%d -D DATA_DEPTH=%d -D DATA_CHAN=%d -D USE_DOUBLE=%d "
|
||||
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d -D KERNEL_SIZE_Y2_ALIGNED=%d "
|
||||
"-D %s -D %s -D %s",
|
||||
(int)BLOCK_SIZE, (int)BLOCK_SIZE_Y,
|
||||
sdepth, cn, useDouble ? 1 : 0,
|
||||
anchor.x, anchor.y, ksize.width, ksize.height, kernel_size_y2_aligned,
|
||||
btype,
|
||||
extra_extrapolation ? "EXTRA_EXTRAPOLATION" : "NO_EXTRA_EXTRAPOLATION",
|
||||
isIsolatedBorder ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED");
|
||||
|
||||
localsize[0] = BLOCK_SIZE;
|
||||
globalsize[0] = DIVUP(sz.width, BLOCK_SIZE - (ksize.width - 1)) * BLOCK_SIZE;
|
||||
globalsize[1] = DIVUP(sz.height, BLOCK_SIZE_Y);
|
||||
|
||||
cv::String errmsg;
|
||||
if (!kernel.create("filter2D", cv::ocl::imgproc::filter2D_oclsrc, build_options))
|
||||
return false;
|
||||
size_t kernelWorkGroupSize = kernel.workGroupSize();
|
||||
if (localsize[0] <= kernelWorkGroupSize)
|
||||
break;
|
||||
if (BLOCK_SIZE < kernelWorkGroupSize)
|
||||
return false;
|
||||
tryWorkItems = kernelWorkGroupSize;
|
||||
}
|
||||
|
||||
_dst.create(sz, CV_MAKETYPE(ddepth, cn));
|
||||
UMat dst = _dst.getUMat();
|
||||
if (src.empty())
|
||||
src = _src.getUMat();
|
||||
|
||||
int idxArg = 0;
|
||||
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrReadOnly(src));
|
||||
idxArg = kernel.set(idxArg, (int)src.step);
|
||||
|
||||
int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
|
||||
int srcOffsetY = (int)(src.offset / src.step);
|
||||
int srcEndX = (isIsolatedBorder ? (srcOffsetX + sz.width) : wholeSize.width);
|
||||
int srcEndY = (isIsolatedBorder ? (srcOffsetY + sz.height) : wholeSize.height);
|
||||
idxArg = kernel.set(idxArg, srcOffsetX);
|
||||
idxArg = kernel.set(idxArg, srcOffsetY);
|
||||
idxArg = kernel.set(idxArg, srcEndX);
|
||||
idxArg = kernel.set(idxArg, srcEndY);
|
||||
|
||||
idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
|
||||
float borderValue[4] = {0, 0, 0, 0};
|
||||
double borderValueDouble[4] = {0, 0, 0, 0};
|
||||
if ((borderType & ~BORDER_ISOLATED) == BORDER_CONSTANT)
|
||||
{
|
||||
int cnocl = (3 == cn) ? 4 : cn;
|
||||
if (useDouble)
|
||||
idxArg = kernel.set(idxArg, (void *)&borderValueDouble[0], sizeof(double) * cnocl);
|
||||
else
|
||||
idxArg = kernel.set(idxArg, (void *)&borderValue[0], sizeof(float) * cnocl);
|
||||
}
|
||||
if (useDouble)
|
||||
{
|
||||
UMat kernalDataUMat(kernelMatDataDouble, true);
|
||||
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrReadOnly(kernalDataUMat));
|
||||
}
|
||||
else
|
||||
{
|
||||
UMat kernalDataUMat(kernelMatDataFloat, true);
|
||||
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrReadOnly(kernalDataUMat));
|
||||
}
|
||||
return kernel.run(2, globalsize, localsize, true);
|
||||
}
|
||||
}
|
||||
|
||||
cv::Ptr<cv::BaseFilter> cv::getLinearFilter(int srcType, int dstType,
|
||||
InputArray filter_kernel, Point anchor,
|
||||
double delta, int bits)
|
||||
@@ -3230,6 +3431,10 @@ void cv::filter2D( InputArray _src, OutputArray _dst, int ddepth,
|
||||
InputArray _kernel, Point anchor,
|
||||
double delta, int borderType )
|
||||
{
|
||||
bool use_opencl = ocl::useOpenCL() && _dst.isUMat();
|
||||
if( use_opencl && ocl_filter2D(_src, _dst, ddepth, _kernel, anchor, delta, borderType))
|
||||
return;
|
||||
|
||||
Mat src = _src.getMat(), kernel = _kernel.getMat();
|
||||
|
||||
if( ddepth < 0 )
|
||||
|
59
modules/imgproc/src/opencl/bilateral.cl
Normal file
59
modules/imgproc/src/opencl/bilateral.cl
Normal file
@@ -0,0 +1,59 @@
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Rock Li, Rock.li@amd.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
|
||||
__kernel void bilateral(__global const uchar * src, int src_step, int src_offset,
|
||||
__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,
|
||||
__constant float * color_weight, __constant float * space_weight, __constant int * space_ofs)
|
||||
{
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (y < dst_rows && x < dst_cols)
|
||||
{
|
||||
int src_index = mad24(y + radius, src_step, x + radius + src_offset);
|
||||
int dst_index = mad24(y, dst_step, x + dst_offset);
|
||||
float sum = 0.f, wsum = 0.f;
|
||||
int val0 = convert_int(src[src_index]);
|
||||
|
||||
#pragma unroll
|
||||
for (int k = 0; k < maxk; k++ )
|
||||
{
|
||||
int val = convert_int(src[src_index + space_ofs[k]]);
|
||||
float w = space_weight[k] * color_weight[abs(val - val0)];
|
||||
sum += (float)(val) * w;
|
||||
wsum += w;
|
||||
}
|
||||
dst[dst_index] = convert_uchar_rtz(sum / wsum + 0.5f);
|
||||
}
|
||||
}
|
88
modules/imgproc/src/opencl/blend_linear.cl
Normal file
88
modules/imgproc/src/opencl/blend_linear.cl
Normal file
@@ -0,0 +1,88 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, MulticoreWare Inc., all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Liu Liujun, liujun@multicorewareinc.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef DOUBLE_SUPPORT
|
||||
#ifdef cl_amd_fp64
|
||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define noconvert
|
||||
|
||||
__kernel void blendLinear(__global const uchar * src1ptr, int src1_step, int src1_offset,
|
||||
__global const uchar * src2ptr, int src2_step, int src2_offset,
|
||||
__global const uchar * weight1, int weight1_step, int weight1_offset,
|
||||
__global const uchar * weight2, int weight2_step, int weight2_offset,
|
||||
__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols)
|
||||
{
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < dst_cols && y < dst_rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, src1_offset + x * cn * (int)sizeof(T));
|
||||
int src2_index = mad24(y, src2_step, src2_offset + x * cn * (int)sizeof(T));
|
||||
int weight1_index = mad24(y, weight1_step, weight1_offset + x * (int)sizeof(float));
|
||||
int weight2_index = mad24(y, weight2_step, weight2_offset + x * (int)sizeof(float));
|
||||
int dst_index = mad24(y, dst_step, dst_offset + x * cn * (int)sizeof(T));
|
||||
|
||||
float w1 = *(__global const float *)(weight1 + weight1_index),
|
||||
w2 = *(__global const float *)(weight2 + weight2_index);
|
||||
float den = w1 + w2 + 1e-5f;
|
||||
|
||||
__global const T * src1 = (__global const T *)(src1ptr + src1_index);
|
||||
__global const T * src2 = (__global const T *)(src2ptr + src2_index);
|
||||
__global T * dst = (__global T *)(dstptr + dst_index);
|
||||
|
||||
#pragma unroll
|
||||
for (int i = 0; i < cn; ++i)
|
||||
{
|
||||
float num = w1 * convert_float(src1[i]) + w2 * convert_float(src2[i]);
|
||||
dst[i] = convertToT(num / den);
|
||||
}
|
||||
}
|
||||
}
|
375
modules/imgproc/src/opencl/filter2D.cl
Normal file
375
modules/imgproc/src/opencl/filter2D.cl
Normal file
@@ -0,0 +1,375 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef BORDER_REPLICATE
|
||||
//BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
|
||||
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i))
|
||||
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr))
|
||||
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) :(i))
|
||||
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (b_edge)-1 :(addr))
|
||||
#endif
|
||||
|
||||
#ifdef BORDER_REFLECT
|
||||
//BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
|
||||
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i))
|
||||
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr))
|
||||
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i)-1 : (i))
|
||||
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-1+((b_edge)<<1) : (addr))
|
||||
#endif
|
||||
|
||||
#ifdef BORDER_REFLECT_101
|
||||
//BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
|
||||
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i))
|
||||
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr))
|
||||
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i) : (i))
|
||||
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-2+((b_edge)<<1) : (addr))
|
||||
#endif
|
||||
|
||||
//blur function does not support BORDER_WRAP
|
||||
#ifdef BORDER_WRAP
|
||||
//BORDER_WRAP: cdefgh|abcdefgh|abcdefg
|
||||
#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i))
|
||||
#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr))
|
||||
#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (i)+(b_edge) : (i))
|
||||
#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (i)-(b_edge) : (addr))
|
||||
#endif
|
||||
|
||||
#ifdef EXTRA_EXTRAPOLATION // border > src image size
|
||||
#ifdef BORDER_CONSTANT
|
||||
// None
|
||||
#elif defined BORDER_REPLICATE
|
||||
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \
|
||||
{ \
|
||||
x = max(min(x, maxX - 1), minX); \
|
||||
y = max(min(y, maxY - 1), minY); \
|
||||
}
|
||||
#elif defined BORDER_WRAP
|
||||
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \
|
||||
{ \
|
||||
if (x < minX) \
|
||||
x -= ((x - maxX + 1) / maxX) * maxX; \
|
||||
if (x >= maxX) \
|
||||
x %= maxX; \
|
||||
if (y < minY) \
|
||||
y -= ((y - maxY + 1) / maxY) * maxY; \
|
||||
if (y >= maxY) \
|
||||
y %= maxY; \
|
||||
}
|
||||
#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)
|
||||
#define EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, delta) \
|
||||
{ \
|
||||
if (maxX - minX == 1) \
|
||||
x = minX; \
|
||||
else \
|
||||
do \
|
||||
{ \
|
||||
if (x < minX) \
|
||||
x = minX - (x - minX) - 1 + delta; \
|
||||
else \
|
||||
x = maxX - 1 - (x - maxX) - delta; \
|
||||
} \
|
||||
while (x >= maxX || x < minX); \
|
||||
\
|
||||
if (maxY - minY == 1) \
|
||||
y = minY; \
|
||||
else \
|
||||
do \
|
||||
{ \
|
||||
if (y < minY) \
|
||||
y = minY - (y - minY) - 1 + delta; \
|
||||
else \
|
||||
y = maxY - 1 - (y - maxY) - delta; \
|
||||
} \
|
||||
while (y >= maxY || y < minY); \
|
||||
}
|
||||
#ifdef BORDER_REFLECT
|
||||
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 0)
|
||||
#elif defined(BORDER_REFLECT_101)
|
||||
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 1)
|
||||
#endif
|
||||
#else
|
||||
#error No extrapolation method
|
||||
#endif
|
||||
#else
|
||||
#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \
|
||||
{ \
|
||||
int _row = y - minY, _col = x - minX; \
|
||||
_row = ADDR_H(_row, 0, maxY - minY); \
|
||||
_row = ADDR_B(_row, maxY - minY, _row); \
|
||||
y = _row + minY; \
|
||||
\
|
||||
_col = ADDR_L(_col, 0, maxX - minX); \
|
||||
_col = ADDR_R(_col, maxX - minX, _col); \
|
||||
x = _col + minX; \
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_DOUBLE
|
||||
#ifdef cl_amd_fp64
|
||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#define FPTYPE double
|
||||
#define CONVERT_TO_FPTYPE CAT(convert_double, VEC_SIZE)
|
||||
#else
|
||||
#define FPTYPE float
|
||||
#define CONVERT_TO_FPTYPE CAT(convert_float, VEC_SIZE)
|
||||
#endif
|
||||
|
||||
#if DATA_DEPTH == 0
|
||||
#define BASE_TYPE uchar
|
||||
#elif DATA_DEPTH == 1
|
||||
#define BASE_TYPE char
|
||||
#elif DATA_DEPTH == 2
|
||||
#define BASE_TYPE ushort
|
||||
#elif DATA_DEPTH == 3
|
||||
#define BASE_TYPE short
|
||||
#elif DATA_DEPTH == 4
|
||||
#define BASE_TYPE int
|
||||
#elif DATA_DEPTH == 5
|
||||
#define BASE_TYPE float
|
||||
#elif DATA_DEPTH == 6
|
||||
#define BASE_TYPE double
|
||||
#else
|
||||
#error data_depth
|
||||
#endif
|
||||
|
||||
#define __CAT(x, y) x##y
|
||||
#define CAT(x, y) __CAT(x, y)
|
||||
|
||||
#define uchar1 uchar
|
||||
#define char1 char
|
||||
#define ushort1 ushort
|
||||
#define short1 short
|
||||
#define int1 int
|
||||
#define float1 float
|
||||
#define double1 double
|
||||
|
||||
#define convert_uchar1_sat_rte convert_uchar_sat_rte
|
||||
#define convert_char1_sat_rte convert_char_sat_rte
|
||||
#define convert_ushort1_sat_rte convert_ushort_sat_rte
|
||||
#define convert_short1_sat_rte convert_short_sat_rte
|
||||
#define convert_int1_sat_rte convert_int_sat_rte
|
||||
#define convert_float1
|
||||
#define convert_double1
|
||||
|
||||
#if DATA_DEPTH == 5 || DATA_DEPTH == 6
|
||||
#define CONVERT_TO_TYPE CAT(CAT(convert_, BASE_TYPE), VEC_SIZE)
|
||||
#else
|
||||
#define CONVERT_TO_TYPE CAT(CAT(CAT(convert_, BASE_TYPE), VEC_SIZE), _sat_rte)
|
||||
#endif
|
||||
|
||||
#define VEC_SIZE DATA_CHAN
|
||||
|
||||
#define VEC_TYPE CAT(BASE_TYPE, VEC_SIZE)
|
||||
#define TYPE VEC_TYPE
|
||||
|
||||
#define SCALAR_TYPE CAT(FPTYPE, VEC_SIZE)
|
||||
|
||||
#define INTERMEDIATE_TYPE CAT(FPTYPE, VEC_SIZE)
|
||||
|
||||
struct RectCoords
|
||||
{
|
||||
int x1, y1, x2, y2;
|
||||
};
|
||||
|
||||
//#define DEBUG
|
||||
#ifdef DEBUG
|
||||
#define DEBUG_ONLY(x) x
|
||||
#define ASSERT(condition) do { if (!(condition)) { printf("BUG in boxFilter kernel (global=%d,%d): " #condition "\n", get_global_id(0), get_global_id(1)); } } while (0)
|
||||
#else
|
||||
#define DEBUG_ONLY(x) (void)0
|
||||
#define ASSERT(condition) (void)0
|
||||
#endif
|
||||
|
||||
|
||||
inline INTERMEDIATE_TYPE readSrcPixel(int2 pos, __global const uchar* srcptr, int srcstep, const struct RectCoords srcCoords
|
||||
#ifdef BORDER_CONSTANT
|
||||
, SCALAR_TYPE borderValue
|
||||
#endif
|
||||
)
|
||||
{
|
||||
#ifdef BORDER_ISOLATED
|
||||
if(pos.x >= srcCoords.x1 && pos.y >= srcCoords.y1 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)
|
||||
#else
|
||||
if(pos.x >= 0 && pos.y >= 0 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)
|
||||
#endif
|
||||
{
|
||||
//__global TYPE* ptr = (__global TYPE*)((__global char*)src + pos.x * sizeof(TYPE) + pos.y * srcStepBytes);
|
||||
__global TYPE* ptr = (__global TYPE*)(srcptr + pos.y * srcstep + pos.x * sizeof(TYPE));
|
||||
return CONVERT_TO_FPTYPE(*ptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef BORDER_CONSTANT
|
||||
return borderValue;
|
||||
#else
|
||||
int selected_col = pos.x;
|
||||
int selected_row = pos.y;
|
||||
|
||||
EXTRAPOLATE(selected_col, selected_row,
|
||||
#ifdef BORDER_ISOLATED
|
||||
srcCoords.x1, srcCoords.y1,
|
||||
#else
|
||||
0, 0,
|
||||
#endif
|
||||
srcCoords.x2, srcCoords.y2
|
||||
);
|
||||
|
||||
// debug border mapping
|
||||
//printf("pos=%d,%d --> %d, %d\n", pos.x, pos.y, selected_col, selected_row);
|
||||
|
||||
pos = (int2)(selected_col, selected_row);
|
||||
if(pos.x >= 0 && pos.y >= 0 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)
|
||||
{
|
||||
//__global TYPE* ptr = (__global TYPE*)((__global char*)src + pos.x * sizeof(TYPE) + pos.y * srcStepBytes);
|
||||
__global TYPE* ptr = (__global TYPE*)(srcptr + pos.y * srcstep + pos.x * sizeof(TYPE));
|
||||
return CONVERT_TO_FPTYPE(*ptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
// for debug only
|
||||
DEBUG_ONLY(printf("BUG in boxFilter kernel\n"));
|
||||
return (FPTYPE)(0.0f);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// INPUT PARAMETER: BLOCK_SIZE_Y (via defines)
|
||||
|
||||
__kernel
|
||||
__attribute__((reqd_work_group_size(LOCAL_SIZE, 1, 1)))
|
||||
void filter2D(__global const uchar* srcptr, int srcstep, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,
|
||||
__global uchar* dstptr, int dststep, int dstoffset,
|
||||
int rows, int cols,
|
||||
#ifdef BORDER_CONSTANT
|
||||
SCALAR_TYPE borderValue,
|
||||
#endif
|
||||
__constant FPTYPE* kernelData // transposed: [KERNEL_SIZE_X][KERNEL_SIZE_Y2_ALIGNED]
|
||||
)
|
||||
{
|
||||
const struct RectCoords srcCoords = {srcOffsetX, srcOffsetY, srcEndX, srcEndY}; // for non-isolated border: offsetX, offsetY, wholeX, wholeY
|
||||
|
||||
const int local_id = get_local_id(0);
|
||||
const int x = local_id + (LOCAL_SIZE - (KERNEL_SIZE_X - 1)) * get_group_id(0) - ANCHOR_X;
|
||||
const int y = get_global_id(1) * BLOCK_SIZE_Y;
|
||||
|
||||
INTERMEDIATE_TYPE data[KERNEL_SIZE_Y];
|
||||
__local INTERMEDIATE_TYPE sumOfCols[LOCAL_SIZE];
|
||||
|
||||
int2 srcPos = (int2)(srcCoords.x1 + x, srcCoords.y1 + y - ANCHOR_Y);
|
||||
|
||||
int2 pos = (int2)(x, y);
|
||||
__global TYPE* dstPtr = (__global TYPE*)((__global char*)dstptr + pos.y * dststep + dstoffset + pos.x * sizeof(TYPE)); // Pointer can be out of bounds!
|
||||
bool writeResult = ((local_id >= ANCHOR_X) && (local_id < LOCAL_SIZE - (KERNEL_SIZE_X - 1 - ANCHOR_X)) &&
|
||||
(pos.x >= 0) && (pos.x < cols));
|
||||
|
||||
#if BLOCK_SIZE_Y > 1
|
||||
bool readAllpixels = true;
|
||||
int sy_index = 0; // current index in data[] array
|
||||
|
||||
dstRowsMax = min(rows, pos.y + BLOCK_SIZE_Y);
|
||||
for (;
|
||||
pos.y < dstRowsMax;
|
||||
pos.y++,
|
||||
dstPtr = (__global TYPE*)((__global char*)dstptr + dststep))
|
||||
#endif
|
||||
{
|
||||
ASSERT(pos.y < dstRowsMax);
|
||||
|
||||
for (
|
||||
#if BLOCK_SIZE_Y > 1
|
||||
int sy = readAllpixels ? 0 : -1; sy < (readAllpixels ? KERNEL_SIZE_Y : 0);
|
||||
#else
|
||||
int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y;
|
||||
#endif
|
||||
sy++, srcPos.y++)
|
||||
{
|
||||
data[sy + sy_index] = readSrcPixel(srcPos, srcptr, srcstep, srcCoords
|
||||
#ifdef BORDER_CONSTANT
|
||||
, borderValue
|
||||
#endif
|
||||
);
|
||||
}
|
||||
|
||||
INTERMEDIATE_TYPE total_sum = 0;
|
||||
for (int sx = 0; sx < KERNEL_SIZE_X; sx++)
|
||||
{
|
||||
{
|
||||
__constant FPTYPE* k = &kernelData[KERNEL_SIZE_Y2_ALIGNED * sx
|
||||
#if BLOCK_SIZE_Y > 1
|
||||
+ KERNEL_SIZE_Y - sy_index
|
||||
#endif
|
||||
];
|
||||
INTERMEDIATE_TYPE tmp_sum = 0;
|
||||
for (int sy = 0; sy < KERNEL_SIZE_Y; sy++)
|
||||
{
|
||||
tmp_sum += data[sy] * k[sy];
|
||||
}
|
||||
|
||||
sumOfCols[local_id] = tmp_sum;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
|
||||
int id = local_id + sx - ANCHOR_X;
|
||||
if (id >= 0 && id < LOCAL_SIZE)
|
||||
total_sum += sumOfCols[id];
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
|
||||
if (writeResult)
|
||||
{
|
||||
*dstPtr = CONVERT_TO_TYPE(total_sum);
|
||||
}
|
||||
|
||||
#if BLOCK_SIZE_Y > 1
|
||||
readAllpixels = false;
|
||||
#if BLOCK_SIZE_Y > KERNEL_SIZE_Y
|
||||
sy_index = (sy_index + 1 <= KERNEL_SIZE_Y) ? sy_index + 1 : 1;
|
||||
#else
|
||||
sy_index++;
|
||||
#endif
|
||||
#endif // BLOCK_SIZE_Y == 1
|
||||
}
|
||||
}
|
512
modules/imgproc/src/opencl/integral_sqrsum.cl
Normal file
512
modules/imgproc/src/opencl/integral_sqrsum.cl
Normal file
@@ -0,0 +1,512 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Shengen Yan,yanshengen@gmail.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef DOUBLE_SUPPORT
|
||||
#ifdef cl_amd_fp64
|
||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if sqdepth == 6
|
||||
#define CONVERT(step) ((step)>>1)
|
||||
#else
|
||||
#define CONVERT(step) ((step))
|
||||
#endif
|
||||
|
||||
#define LSIZE 256
|
||||
#define LSIZE_1 255
|
||||
#define LSIZE_2 254
|
||||
#define HF_LSIZE 128
|
||||
#define LOG_LSIZE 8
|
||||
#define LOG_NUM_BANKS 5
|
||||
#define NUM_BANKS 32
|
||||
#define GET_CONFLICT_OFFSET(lid) ((lid) >> LOG_NUM_BANKS)
|
||||
|
||||
#define noconvert
|
||||
|
||||
#if sdepth == 4
|
||||
|
||||
kernel void integral_cols(__global uchar4 *src, __global int *sum, __global TYPE *sqsum,
|
||||
int src_offset, int pre_invalid, int rows, int cols, int src_step, int dst_step, int dst1_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int* sum_p;
|
||||
__local TYPE* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + min(gid, cols - 1)]) : 0);
|
||||
src_t[1] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : 0);
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step /4, loc_s1 = loc_s0 + dst_step ;
|
||||
int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE),loc_sq1 = loc_sq0 + CONVERT(dst1_step);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_rows(__global int4 *srcsum, __global TYPE4 * srcsqsum,__global int *sum,
|
||||
__global TYPE *sqsum, int rows, int cols, int src_step, int src1_step, int sum_step,
|
||||
int sqsum_step, int sum_offset, int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsrc_t[2],sqsum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local int *sum_p;
|
||||
__local TYPE *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
src1_step = (src1_step / sizeof(TYPE)) >> 2 ;
|
||||
gid <<= 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid ] : (int4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid ] : (TYPE4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid + 1] : (int4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid + 1] : (TYPE4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = sqsrc_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = sqsrc_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
sqsum[sqsum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * sum_step;
|
||||
int loc1 = gid * CONVERT(sqsum_step);
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 4 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
|
||||
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
#elif sdepth == 5
|
||||
|
||||
kernel void integral_cols(__global uchar4 *src, __global float *sum, __global TYPE *sqsum,
|
||||
int src_offset, int pre_invalid, int rows, int cols, int src_step, int dst_step, int dst1_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float* sum_p;
|
||||
__local TYPE* sqsum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid, cols - 1)]) : (float4)0);
|
||||
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : (float4)0);
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
|
||||
// printf("%f\n", src_t[0].s0);
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE), loc_sq1 = loc_sq0 + CONVERT(dst1_step);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_rows(__global float4 *srcsum, __global TYPE4 * srcsqsum, __global float *sum ,
|
||||
__global TYPE *sqsum, int rows, int cols, int src_step, int src1_step, int sum_step,
|
||||
int sqsum_step, int sum_offset, int sqsum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
TYPE4 sqsrc_t[2],sqsum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
|
||||
__local float *sum_p;
|
||||
__local TYPE *sqsum_p;
|
||||
src_step = src_step >> 4;
|
||||
src1_step = (src1_step / sizeof(TYPE)) >> 2;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2] : (TYPE4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2 + 1] : (TYPE4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
lm_sqsum[0][bf_loc] = sqsrc_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
lm_sqsum[1][bf_loc] = sqsrc_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
lm_sqsum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
|
||||
lm_sqsum[lid >> 7][bi] += lm_sqsum[lid >> 7][ai];
|
||||
lm_sqsum[lid >> 7][ai] = lm_sqsum[lid >> 7][bi] - lm_sqsum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
sqsum[sqsum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
int loc1 = gid * 2 * CONVERT(sqsum_step);
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
|
||||
}
|
||||
}
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
int loc_sq0 = sqsum_offset + gid * 2 * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
lm_sqsum[0][bf_loc] += sqsum_t[0];
|
||||
lm_sqsum[1][bf_loc] += sqsum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
413
modules/imgproc/src/opencl/integral_sum.cl
Normal file
413
modules/imgproc/src/opencl/integral_sum.cl
Normal file
@@ -0,0 +1,413 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Shengen Yan,yanshengen@gmail.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifdef DOUBLE_SUPPORT
|
||||
#ifdef cl_amd_fp64
|
||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
|
||||
#elif defined (cl_khr_fp64)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define LSIZE 256
|
||||
#define LSIZE_1 255
|
||||
#define LSIZE_2 254
|
||||
#define HF_LSIZE 128
|
||||
#define LOG_LSIZE 8
|
||||
#define LOG_NUM_BANKS 5
|
||||
#define NUM_BANKS 32
|
||||
#define GET_CONFLICT_OFFSET(lid) ((lid) >> LOG_NUM_BANKS)
|
||||
|
||||
#if sdepth == 4
|
||||
|
||||
kernel void integral_sum_cols(__global uchar4 *src, __global int *sum,
|
||||
int src_offset, int pre_invalid, int rows, int cols, int src_step, int dst_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local int* sum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + gid]) : 0);
|
||||
src_t[1] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + gid + 1]) : 0);
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_sum_rows(__global int4 *srcsum, __global int *sum,
|
||||
int rows, int cols, int src_step, int sum_step, int sum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
int4 src_t[2], sum_t[2];
|
||||
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local int *sum_p;
|
||||
src_step = src_step >> 4;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : 0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : 0;
|
||||
|
||||
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
}
|
||||
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
#elif sdepth == 5
|
||||
|
||||
kernel void integral_sum_cols(__global uchar4 *src, __global float *sum,
|
||||
int src_offset, int pre_invalid, int rows, int cols, int src_step, int dst_step)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float* sum_p;
|
||||
src_step = src_step >> 2;
|
||||
gid = gid << 1;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + gid]) : (float4)0);
|
||||
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + gid + 1]) : (float4)0);
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
|
||||
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
|
||||
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
kernel void integral_sum_rows(__global float4 *srcsum, __global float *sum,
|
||||
int rows, int cols, int src_step, int sum_step, int sum_offset)
|
||||
{
|
||||
int lid = get_local_id(0);
|
||||
int gid = get_group_id(0);
|
||||
float4 src_t[2], sum_t[2];
|
||||
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
|
||||
__local float *sum_p;
|
||||
src_step = src_step >> 4;
|
||||
for(int i = 0; i < rows; i =i + LSIZE_1)
|
||||
{
|
||||
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
|
||||
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
|
||||
|
||||
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
|
||||
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
|
||||
lm_sum[0][bf_loc] = src_t[0];
|
||||
|
||||
lm_sum[1][bf_loc] = src_t[1];
|
||||
|
||||
int offset = 1;
|
||||
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
}
|
||||
offset <<= 1;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(lid < 2)
|
||||
{
|
||||
lm_sum[lid][LSIZE_2 + LOG_LSIZE] = 0;
|
||||
}
|
||||
for(int d = 1; d < LSIZE; d <<= 1)
|
||||
{
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
offset >>= 1;
|
||||
int ai = offset * (((lid & 127)<<1) +1) - 1,bi = ai + offset;
|
||||
ai += GET_CONFLICT_OFFSET(ai);
|
||||
bi += GET_CONFLICT_OFFSET(bi);
|
||||
|
||||
if((lid & 127) < d)
|
||||
{
|
||||
lm_sum[lid >> 7][bi] += lm_sum[lid >> 7][ai];
|
||||
lm_sum[lid >> 7][ai] = lm_sum[lid >> 7][bi] - lm_sum[lid >> 7][ai];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if(gid == 0 && (i + lid) <= rows)
|
||||
{
|
||||
sum[sum_offset + i + lid] = 0;
|
||||
}
|
||||
if(i + lid == 0)
|
||||
{
|
||||
int loc0 = gid * 2 * sum_step;
|
||||
for(int k = 1; k <= 8; k++)
|
||||
{
|
||||
if(gid * 8 + k > cols) break;
|
||||
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if(lid > 0 && (i+lid) <= rows)
|
||||
{
|
||||
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
|
||||
lm_sum[0][bf_loc] += sum_t[0];
|
||||
lm_sum[1][bf_loc] += sum_t[1];
|
||||
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + k >= cols) break;
|
||||
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
|
||||
}
|
||||
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
|
||||
for(int k = 0; k < 4; k++)
|
||||
{
|
||||
if(gid * 8 + 4 + k >= cols) break;
|
||||
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@@ -64,7 +64,7 @@ adjustRect( const uchar* src, size_t src_step, int pix_size,
|
||||
rect.x = win_size.width;
|
||||
}
|
||||
|
||||
if( ip.x + win_size.width < src_size.width )
|
||||
if( ip.x < src_size.width - win_size.width )
|
||||
rect.width = win_size.width;
|
||||
else
|
||||
{
|
||||
@@ -85,7 +85,7 @@ adjustRect( const uchar* src, size_t src_step, int pix_size,
|
||||
else
|
||||
rect.y = -ip.y;
|
||||
|
||||
if( ip.y + win_size.height < src_size.height )
|
||||
if( ip.y < src_size.height - win_size.height )
|
||||
rect.height = win_size.height;
|
||||
else
|
||||
{
|
||||
@@ -155,8 +155,8 @@ void getRectSubPix_Cn_(const _Tp* src, size_t src_step, Size src_size,
|
||||
src_step /= sizeof(src[0]);
|
||||
dst_step /= sizeof(dst[0]);
|
||||
|
||||
if( 0 <= ip.x && ip.x + win_size.width < src_size.width &&
|
||||
0 <= ip.y && ip.y + win_size.height < src_size.height )
|
||||
if( 0 <= ip.x && ip.x < src_size.width - win_size.width &&
|
||||
0 <= ip.y && ip.y < src_size.height - win_size.height)
|
||||
{
|
||||
// extracted rectangle is totally inside the image
|
||||
src += ip.y * src_step + ip.x*cn;
|
||||
|
@@ -41,6 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels.hpp"
|
||||
|
||||
/*
|
||||
* This file includes the code, contributed by Simon Perreault
|
||||
@@ -797,10 +798,10 @@ cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
|
||||
return kernel;
|
||||
}
|
||||
|
||||
namespace cv {
|
||||
|
||||
cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
|
||||
double sigma1, double sigma2,
|
||||
int borderType )
|
||||
static void createGaussianKernels( Mat & kx, Mat & ky, int type, Size ksize,
|
||||
double sigma1, double sigma2 )
|
||||
{
|
||||
int depth = CV_MAT_DEPTH(type);
|
||||
if( sigma2 <= 0 )
|
||||
@@ -818,12 +819,21 @@ cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
|
||||
sigma1 = std::max( sigma1, 0. );
|
||||
sigma2 = std::max( sigma2, 0. );
|
||||
|
||||
Mat kx = getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F) );
|
||||
Mat ky;
|
||||
kx = getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F) );
|
||||
if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON )
|
||||
ky = kx;
|
||||
else
|
||||
ky = getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F) );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
|
||||
double sigma1, double sigma2,
|
||||
int borderType )
|
||||
{
|
||||
Mat kx, ky;
|
||||
createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
|
||||
|
||||
return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType );
|
||||
}
|
||||
@@ -833,33 +843,34 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
|
||||
double sigma1, double sigma2,
|
||||
int borderType )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
_dst.create( src.size(), src.type() );
|
||||
Mat dst = _dst.getMat();
|
||||
int type = _src.type();
|
||||
Size size = _src.size();
|
||||
_dst.create( size, type );
|
||||
|
||||
if( borderType != BORDER_CONSTANT )
|
||||
{
|
||||
if( src.rows == 1 )
|
||||
if( size.height == 1 )
|
||||
ksize.height = 1;
|
||||
if( src.cols == 1 )
|
||||
if( size.width == 1 )
|
||||
ksize.width = 1;
|
||||
}
|
||||
|
||||
if( ksize.width == 1 && ksize.height == 1 )
|
||||
{
|
||||
src.copyTo(dst);
|
||||
_src.copyTo(_dst);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if(sigma1 == 0 && sigma2 == 0 && tegra::gaussian(src, dst, ksize, borderType))
|
||||
if(sigma1 == 0 && sigma2 == 0 && tegra::gaussian(_src.getMat(), _dst.getMat(), ksize, borderType))
|
||||
return;
|
||||
#endif
|
||||
|
||||
#if defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7)
|
||||
if(src.type() == CV_32FC1 && sigma1 == sigma2 && ksize.width == ksize.height && sigma1 != 0.0 )
|
||||
if( type == CV_32FC1 && sigma1 == sigma2 && ksize.width == ksize.height && sigma1 != 0.0 )
|
||||
{
|
||||
IppiSize roi = {src.cols, src.rows};
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
IppiSize roi = { src.cols, src.rows };
|
||||
int bufSize = 0;
|
||||
ippiFilterGaussGetBufferSize_32f_C1R(roi, ksize.width, &bufSize);
|
||||
AutoBuffer<uchar> buf(bufSize+128);
|
||||
@@ -872,11 +883,11 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
|
||||
}
|
||||
#endif
|
||||
|
||||
Ptr<FilterEngine> f = createGaussianFilter( src.type(), ksize, sigma1, sigma2, borderType );
|
||||
f->apply( src, dst );
|
||||
Mat kx, ky;
|
||||
createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
|
||||
sepFilter2D(_src, _dst, CV_MAT_DEPTH(type), kx, ky, Point(-1,-1), 0, borderType );
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
Median Filter
|
||||
\****************************************************************************************/
|
||||
@@ -1914,19 +1925,91 @@ private:
|
||||
};
|
||||
#endif
|
||||
|
||||
static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
|
||||
double sigma_color, double sigma_space,
|
||||
int borderType)
|
||||
{
|
||||
int type = _src.type(), cn = CV_MAT_CN(type);
|
||||
int i, j, maxk, radius;
|
||||
|
||||
if ( type != CV_8UC1 )
|
||||
return false;
|
||||
|
||||
if (sigma_color <= 0)
|
||||
sigma_color = 1;
|
||||
if (sigma_space <= 0)
|
||||
sigma_space = 1;
|
||||
|
||||
double gauss_color_coeff = -0.5 / (sigma_color * sigma_color);
|
||||
double gauss_space_coeff = -0.5 / (sigma_space * sigma_space);
|
||||
|
||||
if ( d <= 0 )
|
||||
radius = cvRound(sigma_space * 1.5);
|
||||
else
|
||||
radius = d / 2;
|
||||
radius = MAX(radius, 1);
|
||||
d = radius * 2 + 1;
|
||||
|
||||
UMat src = _src.getUMat(), dst = _dst.getUMat(), temp;
|
||||
if (src.u == dst.u)
|
||||
return false;
|
||||
|
||||
copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
|
||||
|
||||
std::vector<float> _color_weight(cn * 256);
|
||||
std::vector<float> _space_weight(d * d);
|
||||
std::vector<int> _space_ofs(d * d);
|
||||
float *color_weight = &_color_weight[0];
|
||||
float *space_weight = &_space_weight[0];
|
||||
int *space_ofs = &_space_ofs[0];
|
||||
|
||||
// initialize color-related bilateral filter coefficients
|
||||
for( i = 0; i < 256 * cn; i++ )
|
||||
color_weight[i] = (float)std::exp(i * i * gauss_color_coeff);
|
||||
|
||||
// initialize space-related bilateral filter coefficients
|
||||
for( i = -radius, maxk = 0; i <= radius; i++ )
|
||||
for( j = -radius; j <= radius; j++ )
|
||||
{
|
||||
double r = std::sqrt((double)i * i + (double)j * j);
|
||||
if ( r > radius )
|
||||
continue;
|
||||
space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff);
|
||||
space_ofs[maxk++] = (int)(i * temp.step + j);
|
||||
}
|
||||
|
||||
ocl::Kernel k("bilateral", ocl::imgproc::bilateral_oclsrc,
|
||||
format("-D radius=%d -D maxk=%d", radius, maxk));
|
||||
if (k.empty())
|
||||
return false;
|
||||
|
||||
Mat mcolor_weight(1, cn * 256, CV_32FC1, color_weight);
|
||||
Mat mspace_weight(1, d * d, CV_32FC1, space_weight);
|
||||
Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs);
|
||||
UMat ucolor_weight, uspace_weight, uspace_ofs;
|
||||
mcolor_weight.copyTo(ucolor_weight);
|
||||
mspace_weight.copyTo(uspace_weight);
|
||||
mspace_ofs.copyTo(uspace_ofs);
|
||||
|
||||
k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst),
|
||||
ocl::KernelArg::PtrReadOnly(ucolor_weight),
|
||||
ocl::KernelArg::PtrReadOnly(uspace_weight),
|
||||
ocl::KernelArg::PtrReadOnly(uspace_ofs));
|
||||
|
||||
size_t globalsize[2] = { dst.cols, dst.rows };
|
||||
return k.run(2, globalsize, NULL, false);
|
||||
}
|
||||
|
||||
static void
|
||||
bilateralFilter_8u( const Mat& src, Mat& dst, int d,
|
||||
double sigma_color, double sigma_space,
|
||||
int borderType )
|
||||
{
|
||||
|
||||
int cn = src.channels();
|
||||
int i, j, maxk, radius;
|
||||
Size size = src.size();
|
||||
|
||||
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) &&
|
||||
src.type() == dst.type() && src.size() == dst.size() &&
|
||||
src.data != dst.data );
|
||||
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data );
|
||||
|
||||
if( sigma_color <= 0 )
|
||||
sigma_color = 1;
|
||||
@@ -1973,7 +2056,7 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d,
|
||||
{
|
||||
j = -radius;
|
||||
|
||||
for( ;j <= radius; j++ )
|
||||
for( ; j <= radius; j++ )
|
||||
{
|
||||
double r = std::sqrt((double)i*i + (double)j*j);
|
||||
if( r > radius )
|
||||
@@ -2184,9 +2267,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
|
||||
float len, scale_index;
|
||||
Size size = src.size();
|
||||
|
||||
CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) &&
|
||||
src.type() == dst.type() && src.size() == dst.size() &&
|
||||
src.data != dst.data );
|
||||
CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data );
|
||||
|
||||
if( sigma_color <= 0 )
|
||||
sigma_color = 1;
|
||||
@@ -2267,9 +2348,13 @@ void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
|
||||
double sigmaColor, double sigmaSpace,
|
||||
int borderType )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
_dst.create( src.size(), src.type() );
|
||||
Mat dst = _dst.getMat();
|
||||
_dst.create( _src.size(), _src.type() );
|
||||
|
||||
if (ocl::useOpenCL() && _src.dims() <= 2 && _dst.isUMat() &&
|
||||
ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType))
|
||||
return;
|
||||
|
||||
Mat src = _src.getMat(), dst = _dst.getMat();
|
||||
|
||||
if( src.depth() == CV_8U )
|
||||
bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
|
||||
|
@@ -41,6 +41,8 @@
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include "opencl_kernels.hpp"
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
static IppStatus sts = ippInit();
|
||||
#endif
|
||||
@@ -215,30 +217,140 @@ static void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep,
|
||||
{ integral_(src, srcstep, sum, sumstep, sqsum, sqsumstep, tilted, tiltedstep, size, cn); }
|
||||
|
||||
DEF_INTEGRAL_FUNC(8u32s, uchar, int, double)
|
||||
DEF_INTEGRAL_FUNC(8u32f, uchar, float, double)
|
||||
DEF_INTEGRAL_FUNC(8u64f, uchar, double, double)
|
||||
DEF_INTEGRAL_FUNC(16u64f, ushort, double, double)
|
||||
DEF_INTEGRAL_FUNC(16s64f, short, double, double)
|
||||
DEF_INTEGRAL_FUNC(32f, float, float, double)
|
||||
DEF_INTEGRAL_FUNC(32f64f, float, double, double)
|
||||
DEF_INTEGRAL_FUNC(64f, double, double, double)
|
||||
DEF_INTEGRAL_FUNC(8u32f64f, uchar, float, double)
|
||||
DEF_INTEGRAL_FUNC(8u64f64f, uchar, double, double)
|
||||
DEF_INTEGRAL_FUNC(16u64f64f, ushort, double, double)
|
||||
DEF_INTEGRAL_FUNC(16s64f64f, short, double, double)
|
||||
DEF_INTEGRAL_FUNC(32f32f64f, float, float, double)
|
||||
DEF_INTEGRAL_FUNC(32f64f64f, float, double, double)
|
||||
DEF_INTEGRAL_FUNC(64f64f64f, double, double, double)
|
||||
|
||||
DEF_INTEGRAL_FUNC(8u32s32f, uchar, int, float)
|
||||
DEF_INTEGRAL_FUNC(8u32f32f, uchar, float, float)
|
||||
DEF_INTEGRAL_FUNC(32f32f32f, float, float, float)
|
||||
|
||||
typedef void (*IntegralFunc)(const uchar* src, size_t srcstep, uchar* sum, size_t sumstep,
|
||||
uchar* sqsum, size_t sqsumstep, uchar* tilted, size_t tstep,
|
||||
Size size, int cn );
|
||||
|
||||
enum { vlen = 4 };
|
||||
|
||||
static bool ocl_integral( InputArray _src, OutputArray _sum, int sdepth )
|
||||
{
|
||||
if ( _src.type() != CV_8UC1 || _src.step() % vlen != 0 || _src.offset() % vlen != 0 ||
|
||||
!(sdepth == CV_32S || sdepth == CV_32F) )
|
||||
return false;
|
||||
|
||||
ocl::Kernel k1("integral_sum_cols", ocl::imgproc::integral_sum_oclsrc,
|
||||
format("-D sdepth=%d", sdepth));
|
||||
if (k1.empty())
|
||||
return false;
|
||||
|
||||
Size size = _src.size(), t_size = Size(((size.height + vlen - 1) / vlen) * vlen, size.width),
|
||||
ssize(size.width + 1, size.height + 1);
|
||||
_sum.create(ssize, sdepth);
|
||||
UMat src = _src.getUMat(), t_sum(t_size, sdepth), sum = _sum.getUMat();
|
||||
t_sum = t_sum(Range::all(), Range(0, size.height));
|
||||
|
||||
int offset = (int)src.offset / vlen, pre_invalid = (int)src.offset % vlen;
|
||||
int vcols = (pre_invalid + src.cols + vlen - 1) / vlen;
|
||||
int sum_offset = (int)sum.offset / vlen;
|
||||
|
||||
k1.args(ocl::KernelArg::PtrReadOnly(src), ocl::KernelArg::PtrWriteOnly(t_sum),
|
||||
offset, pre_invalid, src.rows, src.cols, (int)src.step, (int)t_sum.step);
|
||||
size_t gt = ((vcols + 1) / 2) * 256, lt = 256;
|
||||
if (!k1.run(1, >, <, false))
|
||||
return false;
|
||||
|
||||
ocl::Kernel k2("integral_sum_rows", ocl::imgproc::integral_sum_oclsrc,
|
||||
format("-D sdepth=%d", sdepth));
|
||||
k2.args(ocl::KernelArg::PtrReadWrite(t_sum), ocl::KernelArg::PtrWriteOnly(sum),
|
||||
t_sum.rows, t_sum.cols, (int)t_sum.step, (int)sum.step, sum_offset);
|
||||
|
||||
size_t gt2 = t_sum.cols * 32, lt2 = 256;
|
||||
return k2.run(1, >2, <2, false);
|
||||
}
|
||||
|
||||
static bool ocl_integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, int sdepth, int sqdepth )
|
||||
{
|
||||
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
|
||||
|
||||
if ( _src.type() != CV_8UC1 || _src.step() % vlen != 0 || _src.offset() % vlen != 0 ||
|
||||
(!doubleSupport && (sdepth == CV_64F || sqdepth == CV_64F)) )
|
||||
return false;
|
||||
|
||||
char cvt[40];
|
||||
String opts = format("-D sdepth=%d -D sqdepth=%d -D TYPE=%s -D TYPE4=%s4 -D convert_TYPE4=%s%s",
|
||||
sdepth, sqdepth, ocl::typeToStr(sqdepth), ocl::typeToStr(sqdepth),
|
||||
ocl::convertTypeStr(sdepth, sqdepth, 4, cvt),
|
||||
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
|
||||
|
||||
ocl::Kernel k1("integral_cols", ocl::imgproc::integral_sqrsum_oclsrc, opts);
|
||||
if (k1.empty())
|
||||
return false;
|
||||
|
||||
Size size = _src.size(), dsize = Size(size.width + 1, size.height + 1),
|
||||
t_size = Size(((size.height + vlen - 1) / vlen) * vlen, size.width);
|
||||
UMat src = _src.getUMat(), t_sum(t_size, sdepth), t_sqsum(t_size, sqdepth);
|
||||
t_sum = t_sum(Range::all(), Range(0, size.height));
|
||||
t_sqsum = t_sqsum(Range::all(), Range(0, size.height));
|
||||
|
||||
_sum.create(dsize, sdepth);
|
||||
_sqsum.create(dsize, sqdepth);
|
||||
UMat sum = _sum.getUMat(), sqsum = _sqsum.getUMat();
|
||||
|
||||
int offset = (int)src.offset / vlen;
|
||||
int pre_invalid = src.offset % vlen;
|
||||
int vcols = (pre_invalid + src.cols + vlen - 1) / vlen;
|
||||
int sum_offset = (int)(sum.offset / sum.elemSize());
|
||||
int sqsum_offset = (int)(sqsum.offset / sqsum.elemSize());
|
||||
|
||||
k1.args(ocl::KernelArg::PtrReadOnly(src), ocl::KernelArg::PtrWriteOnly(t_sum),
|
||||
ocl::KernelArg::PtrWriteOnly(t_sqsum), offset, pre_invalid, src.rows,
|
||||
src.cols, (int)src.step, (int)t_sum.step, (int)t_sqsum.step);
|
||||
|
||||
size_t gt = ((vcols + 1) / 2) * 256, lt = 256;
|
||||
if (!k1.run(1, >, <, false))
|
||||
return false;
|
||||
|
||||
ocl::Kernel k2("integral_rows", ocl::imgproc::integral_sqrsum_oclsrc, opts);
|
||||
if (k2.empty())
|
||||
return false;
|
||||
|
||||
k2.args(ocl::KernelArg::PtrReadOnly(t_sum), ocl::KernelArg::PtrReadOnly(t_sqsum),
|
||||
ocl::KernelArg::PtrWriteOnly(sum), ocl::KernelArg::PtrWriteOnly(sqsum),
|
||||
t_sum.rows, t_sum.cols, (int)t_sum.step, (int)t_sqsum.step,
|
||||
(int)sum.step, (int)sqsum.step, sum_offset, sqsum_offset);
|
||||
|
||||
size_t gt2 = t_sum.cols * 32, lt2 = 256;
|
||||
return k2.run(1, >2, <2, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth )
|
||||
void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth, int sqdepth )
|
||||
{
|
||||
Mat src = _src.getMat(), sum, sqsum, tilted;
|
||||
int depth = src.depth(), cn = src.channels();
|
||||
Size isize(src.cols + 1, src.rows+1);
|
||||
|
||||
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
|
||||
if( sdepth <= 0 )
|
||||
sdepth = depth == CV_8U ? CV_32S : CV_64F;
|
||||
sdepth = CV_MAT_DEPTH(sdepth);
|
||||
if ( sqdepth <= 0 )
|
||||
sqdepth = CV_64F;
|
||||
sdepth = CV_MAT_DEPTH(sdepth), sqdepth = CV_MAT_DEPTH(sqdepth);
|
||||
|
||||
if (ocl::useOpenCL() && _sum.isUMat() && !_tilted.needed())
|
||||
{
|
||||
if (!_sqsum.needed())
|
||||
{
|
||||
if (ocl_integral(_src, _sum, sdepth))
|
||||
return;
|
||||
}
|
||||
else if (_sqsum.isUMat())
|
||||
{
|
||||
if (ocl_integral(_src, _sum, _sqsum, sdepth, sqdepth))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
|
||||
if( ( depth == CV_8U ) && ( !_tilted.needed() ) )
|
||||
@@ -250,9 +362,9 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
|
||||
IppiSize srcRoiSize = ippiSize( src.cols, src.rows );
|
||||
_sum.create( isize, CV_MAKETYPE( sdepth, cn ) );
|
||||
sum = _sum.getMat();
|
||||
if( _sqsum.needed() )
|
||||
if( _sqsum.needed() && sqdepth == CV_64F )
|
||||
{
|
||||
_sqsum.create( isize, CV_MAKETYPE( CV_64F, cn ) );
|
||||
_sqsum.create( isize, CV_MAKETYPE( sqdepth, cn ) );
|
||||
sqsum = _sqsum.getMat();
|
||||
ippiSqrIntegral_8u32f64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 );
|
||||
}
|
||||
@@ -270,9 +382,9 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
|
||||
IppiSize srcRoiSize = ippiSize( src.cols, src.rows );
|
||||
_sum.create( isize, CV_MAKETYPE( sdepth, cn ) );
|
||||
sum = _sum.getMat();
|
||||
if( _sqsum.needed() )
|
||||
if( _sqsum.needed() && sqdepth == CV_64F )
|
||||
{
|
||||
_sqsum.create( isize, CV_MAKETYPE( CV_64F, cn ) );
|
||||
_sqsum.create( isize, CV_MAKETYPE( sqdepth, cn ) );
|
||||
sqsum = _sqsum.getMat();
|
||||
ippiSqrIntegral_8u32s64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 );
|
||||
}
|
||||
@@ -286,8 +398,15 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
|
||||
}
|
||||
#endif
|
||||
|
||||
Size ssize = _src.size(), isize(ssize.width + 1, ssize.height + 1);
|
||||
_sum.create( isize, CV_MAKETYPE(sdepth, cn) );
|
||||
sum = _sum.getMat();
|
||||
Mat src = _src.getMat(), sum =_sum.getMat(), sqsum, tilted;
|
||||
|
||||
if( _sqsum.needed() )
|
||||
{
|
||||
_sqsum.create( isize, CV_MAKETYPE(sqdepth, cn) );
|
||||
sqsum = _sqsum.getMat();
|
||||
}
|
||||
|
||||
if( _tilted.needed() )
|
||||
{
|
||||
@@ -295,30 +414,29 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
|
||||
tilted = _tilted.getMat();
|
||||
}
|
||||
|
||||
if( _sqsum.needed() )
|
||||
{
|
||||
_sqsum.create( isize, CV_MAKETYPE(CV_64F, cn) );
|
||||
sqsum = _sqsum.getMat();
|
||||
}
|
||||
|
||||
IntegralFunc func = 0;
|
||||
|
||||
if( depth == CV_8U && sdepth == CV_32S )
|
||||
if( depth == CV_8U && sdepth == CV_32S && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)GET_OPTIMIZED(integral_8u32s);
|
||||
else if( depth == CV_8U && sdepth == CV_32F )
|
||||
func = (IntegralFunc)integral_8u32f;
|
||||
else if( depth == CV_8U && sdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_8u64f;
|
||||
else if( depth == CV_16U && sdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_16u64f;
|
||||
else if( depth == CV_16S && sdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_16s64f;
|
||||
else if( depth == CV_32F && sdepth == CV_32F )
|
||||
func = (IntegralFunc)integral_32f;
|
||||
else if( depth == CV_32F && sdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_32f64f;
|
||||
else if( depth == CV_64F && sdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_64f;
|
||||
else if( depth == CV_8U && sdepth == CV_32S && sqdepth == CV_32F )
|
||||
func = (IntegralFunc)integral_8u32s32f;
|
||||
else if( depth == CV_8U && sdepth == CV_32F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_8u32f64f;
|
||||
else if( depth == CV_8U && sdepth == CV_32F && sqdepth == CV_32F )
|
||||
func = (IntegralFunc)integral_8u32f32f;
|
||||
else if( depth == CV_8U && sdepth == CV_64F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_8u64f64f;
|
||||
else if( depth == CV_16U && sdepth == CV_64F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_16u64f64f;
|
||||
else if( depth == CV_16S && sdepth == CV_64F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_16s64f64f;
|
||||
else if( depth == CV_32F && sdepth == CV_32F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_32f32f64f;
|
||||
else if( depth == CV_32F && sdepth == CV_32F && sqdepth == CV_32F )
|
||||
func = (IntegralFunc)integral_32f32f32f;
|
||||
else if( depth == CV_32F && sdepth == CV_64F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_32f64f64f;
|
||||
else if( depth == CV_64F && sdepth == CV_64F && sqdepth == CV_64F )
|
||||
func = (IntegralFunc)integral_64f64f64f;
|
||||
else
|
||||
CV_Error( CV_StsUnsupportedFormat, "" );
|
||||
|
||||
@@ -331,9 +449,9 @@ void cv::integral( InputArray src, OutputArray sum, int sdepth )
|
||||
integral( src, sum, noArray(), noArray(), sdepth );
|
||||
}
|
||||
|
||||
void cv::integral( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth )
|
||||
void cv::integral( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth, int sqdepth )
|
||||
{
|
||||
integral( src, sum, sqsum, noArray(), sdepth );
|
||||
integral( src, sum, sqsum, noArray(), sdepth, sqdepth );
|
||||
}
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user