Merge branch '2.4'
This commit is contained in:
@@ -59,6 +59,8 @@ void cv::gpu::magnitudeSqr(const GpuMat&, const GpuMat&, GpuMat&, Stream&) { thr
|
||||
void cv::gpu::phase(const GpuMat&, const GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::normalize(const GpuMat&, GpuMat&, double, double, int, int, const GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
@@ -529,4 +531,47 @@ void cv::gpu::polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat&
|
||||
polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// normalize
|
||||
|
||||
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask)
|
||||
{
|
||||
GpuMat norm_buf;
|
||||
GpuMat cvt_buf;
|
||||
normalize(src, dst, a, b, norm_type, dtype, mask, norm_buf, cvt_buf);
|
||||
}
|
||||
|
||||
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
|
||||
{
|
||||
double scale = 1, shift = 0;
|
||||
if (norm_type == NORM_MINMAX)
|
||||
{
|
||||
double smin = 0, smax = 0;
|
||||
double dmin = std::min(a, b), dmax = std::max(a, b);
|
||||
minMax(src, &smin, &smax, mask, norm_buf);
|
||||
scale = (dmax - dmin) * (smax - smin > numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
|
||||
shift = dmin - smin * scale;
|
||||
}
|
||||
else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
|
||||
{
|
||||
scale = norm(src, norm_type, mask, norm_buf);
|
||||
scale = scale > numeric_limits<double>::epsilon() ? a / scale : 0.0;
|
||||
shift = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Error(CV_StsBadArg, "Unknown/unsupported norm type");
|
||||
}
|
||||
|
||||
if (mask.empty())
|
||||
{
|
||||
src.convertTo(dst, dtype, scale, shift);
|
||||
}
|
||||
else
|
||||
{
|
||||
src.convertTo(cvt_buf, dtype, scale, shift);
|
||||
cvt_buf.copyTo(dst, mask);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
||||
|
@@ -352,8 +352,8 @@ namespace sum
|
||||
}
|
||||
};
|
||||
|
||||
template <int BLOCK_SIZE, typename src_type, typename result_type, class Op>
|
||||
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Op op, const int twidth, const int theight)
|
||||
template <int BLOCK_SIZE, typename src_type, typename result_type, class Mask, class Op>
|
||||
__global__ void kernel(const PtrStepSz<src_type> src, result_type* result, const Mask mask, const Op op, const int twidth, const int theight)
|
||||
{
|
||||
typedef typename VecTraits<src_type>::elem_type T;
|
||||
typedef typename VecTraits<result_type>::elem_type R;
|
||||
@@ -375,9 +375,11 @@ namespace sum
|
||||
|
||||
for (int j = 0, x = x0; j < twidth && x < src.cols; ++j, x += blockDim.x)
|
||||
{
|
||||
const src_type srcVal = ptr[x];
|
||||
|
||||
sum = sum + op(saturate_cast<result_type>(srcVal));
|
||||
if (mask(y, x))
|
||||
{
|
||||
const src_type srcVal = ptr[x];
|
||||
sum = sum + op(saturate_cast<result_type>(srcVal));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,7 +412,7 @@ namespace sum
|
||||
}
|
||||
|
||||
template <typename T, typename R, int cn, template <typename> class Op>
|
||||
void caller(PtrStepSzb src_, void* buf_, double* out)
|
||||
void caller(PtrStepSzb src_, void* buf_, double* out, PtrStepSzb mask)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type src_type;
|
||||
typedef typename TypeVec<R, cn>::vec_type result_type;
|
||||
@@ -426,7 +428,10 @@ namespace sum
|
||||
|
||||
Op<result_type> op;
|
||||
|
||||
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, op, twidth, theight);
|
||||
if (mask.data)
|
||||
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, SingleMask(mask), op, twidth, theight);
|
||||
else
|
||||
kernel<threads_x * threads_y><<<grid, block>>>(src, buf, WithOutMask(), op, twidth, theight);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
@@ -450,88 +455,88 @@ namespace sum
|
||||
template <> struct SumType<double> { typedef double R; };
|
||||
|
||||
template <typename T, int cn>
|
||||
void run(PtrStepSzb src, void* buf, double* out)
|
||||
void run(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
|
||||
{
|
||||
typedef typename SumType<T>::R R;
|
||||
caller<T, R, cn, identity>(src, buf, out);
|
||||
caller<T, R, cn, identity>(src, buf, out, mask);
|
||||
}
|
||||
|
||||
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<short, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<short, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<short, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<short, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<int, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<int, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<int, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<int, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<float, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<float, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<float, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<float, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void run<double, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<double, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<double, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<double, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void run<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void run<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template <typename T, int cn>
|
||||
void runAbs(PtrStepSzb src, void* buf, double* out)
|
||||
void runAbs(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
|
||||
{
|
||||
typedef typename SumType<T>::R R;
|
||||
caller<T, R, cn, abs_func>(src, buf, out);
|
||||
caller<T, R, cn, abs_func>(src, buf, out, mask);
|
||||
}
|
||||
|
||||
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runAbs<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runAbs<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template <typename T> struct Sqr : unary_function<T, T>
|
||||
{
|
||||
@@ -542,45 +547,45 @@ namespace sum
|
||||
};
|
||||
|
||||
template <typename T, int cn>
|
||||
void runSqr(PtrStepSzb src, void* buf, double* out)
|
||||
void runSqr(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask)
|
||||
{
|
||||
caller<T, double, cn, Sqr>(src, buf, out);
|
||||
caller<T, double, cn, Sqr>(src, buf, out, mask);
|
||||
}
|
||||
|
||||
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<uchar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<uchar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<uchar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<uchar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<schar, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<schar, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<schar, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<schar, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<ushort, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<ushort, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<ushort, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<ushort, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<short, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<short, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<short, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<short, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<int, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<int, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<int, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<int, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<float, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<float, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<float, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<float, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
|
||||
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out);
|
||||
template void runSqr<double, 1>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<double, 2>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<double, 3>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
template void runSqr<double, 4>(PtrStepSzb src, void* buf, double* out, PtrStepSzb mask);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
@@ -42,51 +42,37 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
#if defined HAVE_CUDA
|
||||
|
||||
struct Stream::Impl
|
||||
{
|
||||
static cudaStream_t getStream(const Impl* impl) { return impl ? impl->stream : 0; }
|
||||
cudaStream_t stream;
|
||||
int ref_counter;
|
||||
};
|
||||
|
||||
#include "opencv2/gpu/stream_accessor.hpp"
|
||||
|
||||
CV_EXPORTS cudaStream_t cv::gpu::StreamAccessor::getStream(const Stream& stream)
|
||||
{
|
||||
return Stream::Impl::getStream(stream.impl);
|
||||
};
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
||||
|
||||
|
||||
#if !defined (HAVE_CUDA)
|
||||
|
||||
void cv::gpu::Stream::create() { throw_nogpu(); }
|
||||
void cv::gpu::Stream::release() { throw_nogpu(); }
|
||||
cv::gpu::Stream::Stream() : impl(0) { throw_nogpu(); }
|
||||
cv::gpu::Stream::~Stream() { throw_nogpu(); }
|
||||
cv::gpu::Stream::Stream(const Stream& /*stream*/) { throw_nogpu(); }
|
||||
Stream& cv::gpu::Stream::operator=(const Stream& /*stream*/) { throw_nogpu(); return *this; }
|
||||
bool cv::gpu::Stream::queryIfComplete() { throw_nogpu(); return true; }
|
||||
cv::gpu::Stream::Stream() { throw_nogpu(); }
|
||||
cv::gpu::Stream::~Stream() {}
|
||||
cv::gpu::Stream::Stream(const Stream&) { throw_nogpu(); }
|
||||
Stream& cv::gpu::Stream::operator=(const Stream&) { throw_nogpu(); return *this; }
|
||||
bool cv::gpu::Stream::queryIfComplete() { throw_nogpu(); return false; }
|
||||
void cv::gpu::Stream::waitForCompletion() { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& /*src*/, Mat& /*dst*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& /*src*/, CudaMem& /*dst*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueUpload(const CudaMem& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueUpload(const Mat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueCopy(const GpuMat& /*src*/, GpuMat& /*dst*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat& /*src*/, Scalar /*val*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat& /*src*/, Scalar /*val*/, const GpuMat& /*mask*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueConvert(const GpuMat& /*src*/, GpuMat& /*dst*/, int /*type*/, double /*a*/, double /*b*/) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat&, Mat&) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat&, CudaMem&) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueUpload(const CudaMem&, GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueUpload(const Mat&, GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueCopy(const GpuMat&, GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat&, Scalar) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat&, Scalar, const GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueConvert(const GpuMat&, GpuMat&, int, double, double) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::enqueueHostCallback(StreamCallback, void*) { throw_nogpu(); }
|
||||
Stream& cv::gpu::Stream::Null() { throw_nogpu(); static Stream s; return s; }
|
||||
cv::gpu::Stream::operator bool() const { throw_nogpu(); return false; }
|
||||
cv::gpu::Stream::Stream(Impl*) { throw_nogpu(); }
|
||||
void cv::gpu::Stream::create() { throw_nogpu(); }
|
||||
void cv::gpu::Stream::release() { throw_nogpu(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
#include "opencv2/gpu/stream_accessor.hpp"
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream);
|
||||
@@ -95,14 +81,247 @@ namespace cv { namespace gpu
|
||||
void setTo(GpuMat& src, Scalar s, const GpuMat& mask, cudaStream_t stream);
|
||||
}}
|
||||
|
||||
struct Stream::Impl
|
||||
{
|
||||
static cudaStream_t getStream(const Impl* impl)
|
||||
{
|
||||
return impl ? impl->stream : 0;
|
||||
}
|
||||
|
||||
cudaStream_t stream;
|
||||
int ref_counter;
|
||||
};
|
||||
|
||||
cudaStream_t cv::gpu::StreamAccessor::getStream(const Stream& stream)
|
||||
{
|
||||
return Stream::Impl::getStream(stream.impl);
|
||||
}
|
||||
|
||||
cv::gpu::Stream::Stream() : impl(0)
|
||||
{
|
||||
create();
|
||||
}
|
||||
|
||||
cv::gpu::Stream::~Stream()
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
cv::gpu::Stream::Stream(const Stream& stream) : impl(stream.impl)
|
||||
{
|
||||
if (impl)
|
||||
CV_XADD(&impl->ref_counter, 1);
|
||||
}
|
||||
|
||||
Stream& cv::gpu::Stream::operator =(const Stream& stream)
|
||||
{
|
||||
if (this != &stream)
|
||||
{
|
||||
release();
|
||||
impl = stream.impl;
|
||||
if (impl)
|
||||
CV_XADD(&impl->ref_counter, 1);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool cv::gpu::Stream::queryIfComplete()
|
||||
{
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
cudaError_t err = cudaStreamQuery(stream);
|
||||
|
||||
if (err == cudaErrorNotReady || err == cudaSuccess)
|
||||
return err == cudaSuccess;
|
||||
|
||||
cudaSafeCall(err);
|
||||
return false;
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::waitForCompletion()
|
||||
{
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
cudaSafeCall( cudaStreamSynchronize(stream) );
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& src, Mat& dst)
|
||||
{
|
||||
// if not -> allocation will be done, but after that dst will not point to page locked memory
|
||||
CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
size_t bwidth = src.cols * src.elemSize();
|
||||
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, cudaMemcpyDeviceToHost, stream) );
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& src, CudaMem& dst)
|
||||
{
|
||||
dst.create(src.size(), src.type(), CudaMem::ALLOC_PAGE_LOCKED);
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
size_t bwidth = src.cols * src.elemSize();
|
||||
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, cudaMemcpyDeviceToHost, stream) );
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueUpload(const CudaMem& src, GpuMat& dst)
|
||||
{
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
size_t bwidth = src.cols * src.elemSize();
|
||||
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, cudaMemcpyHostToDevice, stream) );
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueUpload(const Mat& src, GpuMat& dst)
|
||||
{
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
size_t bwidth = src.cols * src.elemSize();
|
||||
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, cudaMemcpyHostToDevice, stream) );
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueCopy(const GpuMat& src, GpuMat& dst)
|
||||
{
|
||||
dst.create(src.size(), src.type());
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
size_t bwidth = src.cols * src.elemSize();
|
||||
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, cudaMemcpyDeviceToDevice, stream) );
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat& src, Scalar val)
|
||||
{
|
||||
const int sdepth = src.depth();
|
||||
|
||||
if (sdepth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
|
||||
if (val[0] == 0.0 && val[1] == 0.0 && val[2] == 0.0 && val[3] == 0.0)
|
||||
{
|
||||
cudaSafeCall( cudaMemset2DAsync(src.data, src.step, 0, src.cols * src.elemSize(), src.rows, stream) );
|
||||
return;
|
||||
}
|
||||
|
||||
if (sdepth == CV_8U)
|
||||
{
|
||||
int cn = src.channels();
|
||||
|
||||
if (cn == 1 || (cn == 2 && val[0] == val[1]) || (cn == 3 && val[0] == val[1] && val[0] == val[2]) || (cn == 4 && val[0] == val[1] && val[0] == val[2] && val[0] == val[3]))
|
||||
{
|
||||
int ival = saturate_cast<uchar>(val[0]);
|
||||
cudaSafeCall( cudaMemset2DAsync(src.data, src.step, ival, src.cols * src.elemSize(), src.rows, stream) );
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
setTo(src, val, stream);
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask)
|
||||
{
|
||||
const int sdepth = src.depth();
|
||||
|
||||
if (sdepth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
CV_Assert(mask.type() == CV_8UC1);
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
|
||||
setTo(src, val, mask, stream);
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueConvert(const GpuMat& src, GpuMat& dst, int dtype, double alpha, double beta)
|
||||
{
|
||||
if (dtype < 0)
|
||||
dtype = src.type();
|
||||
else
|
||||
dtype = CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels());
|
||||
|
||||
const int sdepth = src.depth();
|
||||
const int ddepth = CV_MAT_DEPTH(dtype);
|
||||
|
||||
if (sdepth == CV_64F || ddepth == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
|
||||
}
|
||||
|
||||
bool noScale = fabs(alpha - 1) < numeric_limits<double>::epsilon() && fabs(beta) < numeric_limits<double>::epsilon();
|
||||
|
||||
if (sdepth == ddepth && noScale)
|
||||
{
|
||||
enqueueCopy(src, dst);
|
||||
return;
|
||||
}
|
||||
|
||||
dst.create(src.size(), dtype);
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
convertTo(src, dst, alpha, beta, stream);
|
||||
}
|
||||
|
||||
#if CUDA_VERSION >= 5000
|
||||
|
||||
namespace
|
||||
{
|
||||
template<class S, class D> void devcopy(const S& src, D& dst, cudaStream_t s, cudaMemcpyKind k)
|
||||
struct CallbackData
|
||||
{
|
||||
dst.create(src.size(), src.type());
|
||||
size_t bwidth = src.cols * src.elemSize();
|
||||
cudaSafeCall( cudaMemcpy2DAsync(dst.data, dst.step, src.data, src.step, bwidth, src.rows, k, s) );
|
||||
cv::gpu::Stream::StreamCallback callback;
|
||||
void* userData;
|
||||
Stream stream;
|
||||
};
|
||||
|
||||
void CUDART_CB cudaStreamCallback(cudaStream_t, cudaError_t status, void* userData)
|
||||
{
|
||||
CallbackData* data = reinterpret_cast<CallbackData*>(userData);
|
||||
data->callback(data->stream, static_cast<int>(status), data->userData);
|
||||
delete data;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void cv::gpu::Stream::enqueueHostCallback(StreamCallback callback, void* userData)
|
||||
{
|
||||
#if CUDA_VERSION >= 5000
|
||||
CallbackData* data = new CallbackData;
|
||||
data->callback = callback;
|
||||
data->userData = userData;
|
||||
data->stream = *this;
|
||||
|
||||
cudaStream_t stream = Impl::getStream(impl);
|
||||
|
||||
cudaSafeCall( cudaStreamAddCallback(stream, cudaStreamCallback, data, 0) );
|
||||
#else
|
||||
(void) callback;
|
||||
(void) userData;
|
||||
CV_Error(CV_StsNotImplemented, "This function requires CUDA 5.0");
|
||||
#endif
|
||||
}
|
||||
|
||||
cv::gpu::Stream& cv::gpu::Stream::Null()
|
||||
{
|
||||
static Stream s((Impl*) 0);
|
||||
return s;
|
||||
}
|
||||
|
||||
cv::gpu::Stream::operator bool() const
|
||||
{
|
||||
return impl && impl->stream;
|
||||
}
|
||||
|
||||
cv::gpu::Stream::Stream(Impl* impl_) : impl(impl_)
|
||||
{
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::create()
|
||||
@@ -113,7 +332,7 @@ void cv::gpu::Stream::create()
|
||||
cudaStream_t stream;
|
||||
cudaSafeCall( cudaStreamCreate( &stream ) );
|
||||
|
||||
impl = (Stream::Impl*)fastMalloc(sizeof(Stream::Impl));
|
||||
impl = (Stream::Impl*) fastMalloc(sizeof(Stream::Impl));
|
||||
|
||||
impl->stream = stream;
|
||||
impl->ref_counter = 1;
|
||||
@@ -121,133 +340,11 @@ void cv::gpu::Stream::create()
|
||||
|
||||
void cv::gpu::Stream::release()
|
||||
{
|
||||
if( impl && CV_XADD(&impl->ref_counter, -1) == 1 )
|
||||
if (impl && CV_XADD(&impl->ref_counter, -1) == 1)
|
||||
{
|
||||
cudaSafeCall( cudaStreamDestroy( impl->stream ) );
|
||||
cv::fastFree( impl );
|
||||
cudaSafeCall( cudaStreamDestroy(impl->stream) );
|
||||
cv::fastFree(impl);
|
||||
}
|
||||
}
|
||||
|
||||
cv::gpu::Stream::Stream() : impl(0) { create(); }
|
||||
cv::gpu::Stream::~Stream() { release(); }
|
||||
|
||||
cv::gpu::Stream::Stream(const Stream& stream) : impl(stream.impl)
|
||||
{
|
||||
if( impl )
|
||||
CV_XADD(&impl->ref_counter, 1);
|
||||
}
|
||||
Stream& cv::gpu::Stream::operator=(const Stream& stream)
|
||||
{
|
||||
if( this != &stream )
|
||||
{
|
||||
if( stream.impl )
|
||||
CV_XADD(&stream.impl->ref_counter, 1);
|
||||
|
||||
release();
|
||||
impl = stream.impl;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool cv::gpu::Stream::queryIfComplete()
|
||||
{
|
||||
cudaError_t err = cudaStreamQuery( Impl::getStream(impl) );
|
||||
|
||||
if (err == cudaErrorNotReady || err == cudaSuccess)
|
||||
return err == cudaSuccess;
|
||||
|
||||
cudaSafeCall(err);
|
||||
return false;
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::waitForCompletion() { cudaSafeCall( cudaStreamSynchronize( Impl::getStream(impl) ) ); }
|
||||
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& src, Mat& dst)
|
||||
{
|
||||
// if not -> allocation will be done, but after that dst will not point to page locked memory
|
||||
CV_Assert(src.cols == dst.cols && src.rows == dst.rows && src.type() == dst.type() );
|
||||
devcopy(src, dst, Impl::getStream(impl), cudaMemcpyDeviceToHost);
|
||||
}
|
||||
void cv::gpu::Stream::enqueueDownload(const GpuMat& src, CudaMem& dst) { devcopy(src, dst, Impl::getStream(impl), cudaMemcpyDeviceToHost); }
|
||||
|
||||
void cv::gpu::Stream::enqueueUpload(const CudaMem& src, GpuMat& dst){ devcopy(src, dst, Impl::getStream(impl), cudaMemcpyHostToDevice); }
|
||||
void cv::gpu::Stream::enqueueUpload(const Mat& src, GpuMat& dst) { devcopy(src, dst, Impl::getStream(impl), cudaMemcpyHostToDevice); }
|
||||
void cv::gpu::Stream::enqueueCopy(const GpuMat& src, GpuMat& dst) { devcopy(src, dst, Impl::getStream(impl), cudaMemcpyDeviceToDevice); }
|
||||
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat& src, Scalar s)
|
||||
{
|
||||
CV_Assert((src.depth() != CV_64F) ||
|
||||
(TargetArchs::builtWith(NATIVE_DOUBLE) && DeviceInfo().supports(NATIVE_DOUBLE)));
|
||||
|
||||
if (s[0] == 0.0 && s[1] == 0.0 && s[2] == 0.0 && s[3] == 0.0)
|
||||
{
|
||||
cudaSafeCall( cudaMemset2DAsync(src.data, src.step, 0, src.cols * src.elemSize(), src.rows, Impl::getStream(impl)) );
|
||||
return;
|
||||
}
|
||||
if (src.depth() == CV_8U)
|
||||
{
|
||||
int cn = src.channels();
|
||||
|
||||
if (cn == 1 || (cn == 2 && s[0] == s[1]) || (cn == 3 && s[0] == s[1] && s[0] == s[2]) || (cn == 4 && s[0] == s[1] && s[0] == s[2] && s[0] == s[3]))
|
||||
{
|
||||
int val = saturate_cast<uchar>(s[0]);
|
||||
cudaSafeCall( cudaMemset2DAsync(src.data, src.step, val, src.cols * src.elemSize(), src.rows, Impl::getStream(impl)) );
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
setTo(src, s, Impl::getStream(impl));
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask)
|
||||
{
|
||||
CV_Assert((src.depth() != CV_64F) ||
|
||||
(TargetArchs::builtWith(NATIVE_DOUBLE) && DeviceInfo().supports(NATIVE_DOUBLE)));
|
||||
|
||||
CV_Assert(mask.type() == CV_8UC1);
|
||||
|
||||
setTo(src, val, mask, Impl::getStream(impl));
|
||||
}
|
||||
|
||||
void cv::gpu::Stream::enqueueConvert(const GpuMat& src, GpuMat& dst, int rtype, double alpha, double beta)
|
||||
{
|
||||
CV_Assert((src.depth() != CV_64F && CV_MAT_DEPTH(rtype) != CV_64F) ||
|
||||
(TargetArchs::builtWith(NATIVE_DOUBLE) && DeviceInfo().supports(NATIVE_DOUBLE)));
|
||||
|
||||
bool noScale = fabs(alpha-1) < std::numeric_limits<double>::epsilon() && fabs(beta) < std::numeric_limits<double>::epsilon();
|
||||
|
||||
if( rtype < 0 )
|
||||
rtype = src.type();
|
||||
else
|
||||
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), src.channels());
|
||||
|
||||
int sdepth = src.depth(), ddepth = CV_MAT_DEPTH(rtype);
|
||||
if( sdepth == ddepth && noScale )
|
||||
{
|
||||
src.copyTo(dst);
|
||||
return;
|
||||
}
|
||||
|
||||
GpuMat temp;
|
||||
const GpuMat* psrc = &src;
|
||||
if( sdepth != ddepth && psrc == &dst )
|
||||
psrc = &(temp = src);
|
||||
|
||||
dst.create( src.size(), rtype );
|
||||
convertTo(src, dst, alpha, beta, Impl::getStream(impl));
|
||||
}
|
||||
|
||||
cv::gpu::Stream::operator bool() const
|
||||
{
|
||||
return impl && impl->stream;
|
||||
}
|
||||
|
||||
cv::gpu::Stream::Stream(Impl* impl_) : impl(impl_) {}
|
||||
|
||||
cv::gpu::Stream& cv::gpu::Stream::Null()
|
||||
{
|
||||
static Stream s((Impl*)0);
|
||||
return s;
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
||||
|
@@ -121,7 +121,9 @@ void cv::gpu::HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, f
|
||||
buf.accum.setTo(Scalar::all(0));
|
||||
|
||||
DeviceInfo devInfo;
|
||||
linesAccum_gpu(srcPoints, pointsCount, buf.accum, rho, theta, devInfo.sharedMemPerBlock(), devInfo.supports(FEATURE_SET_COMPUTE_20));
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall(cudaGetDeviceProperties(&prop, devInfo.deviceID()));
|
||||
linesAccum_gpu(srcPoints, pointsCount, buf.accum, rho, theta, prop.sharedMemPerBlock, devInfo.supports(FEATURE_SET_COMPUTE_20));
|
||||
|
||||
ensureSizeIsEnough(2, maxLines, CV_32FC2, lines);
|
||||
|
||||
@@ -194,7 +196,9 @@ void cv::gpu::HoughLinesP(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf,
|
||||
buf.accum.setTo(Scalar::all(0));
|
||||
|
||||
DeviceInfo devInfo;
|
||||
linesAccum_gpu(srcPoints, pointsCount, buf.accum, rho, theta, devInfo.sharedMemPerBlock(), devInfo.supports(FEATURE_SET_COMPUTE_20));
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall(cudaGetDeviceProperties(&prop, devInfo.deviceID()));
|
||||
linesAccum_gpu(srcPoints, pointsCount, buf.accum, rho, theta, prop.sharedMemPerBlock, devInfo.supports(FEATURE_SET_COMPUTE_20));
|
||||
|
||||
ensureSizeIsEnough(1, maxLines, CV_32SC4, lines);
|
||||
|
||||
|
@@ -51,13 +51,17 @@ void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&) { throw_nogpu(); }
|
||||
void cv::gpu::meanStdDev(const GpuMat&, Scalar&, Scalar&, GpuMat&) { throw_nogpu(); }
|
||||
double cv::gpu::norm(const GpuMat&, int) { throw_nogpu(); return 0.0; }
|
||||
double cv::gpu::norm(const GpuMat&, int, GpuMat&) { throw_nogpu(); return 0.0; }
|
||||
double cv::gpu::norm(const GpuMat&, int, const GpuMat&, GpuMat&) { throw_nogpu(); return 0.0; }
|
||||
double cv::gpu::norm(const GpuMat&, const GpuMat&, int) { throw_nogpu(); return 0.0; }
|
||||
Scalar cv::gpu::sum(const GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::sum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::sum(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::absSum(const GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::absSum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::absSum(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::sqrSum(const GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::sqrSum(const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
Scalar cv::gpu::sqrSum(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); return Scalar(); }
|
||||
void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::minMax(const GpuMat&, double*, double*, const GpuMat&, GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::minMaxLoc(const GpuMat&, double*, double*, Point*, Point*, const GpuMat&) { throw_nogpu(); }
|
||||
@@ -150,24 +154,30 @@ void cv::gpu::meanStdDev(const GpuMat& src, Scalar& mean, Scalar& stddev, GpuMat
|
||||
double cv::gpu::norm(const GpuMat& src, int normType)
|
||||
{
|
||||
GpuMat buf;
|
||||
return norm(src, normType, buf);
|
||||
return norm(src, normType, GpuMat(), buf);
|
||||
}
|
||||
|
||||
double cv::gpu::norm(const GpuMat& src, int normType, GpuMat& buf)
|
||||
{
|
||||
return norm(src, normType, GpuMat(), buf);
|
||||
}
|
||||
|
||||
double cv::gpu::norm(const GpuMat& src, int normType, const GpuMat& mask, GpuMat& buf)
|
||||
{
|
||||
CV_Assert(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2);
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size() && src.channels() == 1));
|
||||
|
||||
GpuMat src_single_channel = src.reshape(1);
|
||||
|
||||
if (normType == NORM_L1)
|
||||
return absSum(src_single_channel, buf)[0];
|
||||
return absSum(src_single_channel, mask, buf)[0];
|
||||
|
||||
if (normType == NORM_L2)
|
||||
return std::sqrt(sqrSum(src_single_channel, buf)[0]);
|
||||
return std::sqrt(sqrSum(src_single_channel, mask, buf)[0]);
|
||||
|
||||
// NORM_INF
|
||||
double min_val, max_val;
|
||||
minMax(src_single_channel, &min_val, &max_val, GpuMat(), buf);
|
||||
minMax(src_single_channel, &min_val, &max_val, mask, buf);
|
||||
return std::max(std::abs(min_val), std::abs(max_val));
|
||||
}
|
||||
|
||||
@@ -209,24 +219,29 @@ namespace sum
|
||||
void getBufSize(int cols, int rows, int cn, int& bufcols, int& bufrows);
|
||||
|
||||
template <typename T, int cn>
|
||||
void run(PtrStepSzb src, void* buf, double* sum);
|
||||
void run(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
|
||||
|
||||
template <typename T, int cn>
|
||||
void runAbs(PtrStepSzb src, void* buf, double* sum);
|
||||
void runAbs(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
|
||||
|
||||
template <typename T, int cn>
|
||||
void runSqr(PtrStepSzb src, void* buf, double* sum);
|
||||
void runSqr(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sum(const GpuMat& src)
|
||||
{
|
||||
GpuMat buf;
|
||||
return sum(src, buf);
|
||||
return sum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum);
|
||||
return sum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
|
||||
static const func_t funcs[7][5] =
|
||||
{
|
||||
{0, ::sum::run<uchar , 1>, ::sum::run<uchar , 2>, ::sum::run<uchar , 3>, ::sum::run<uchar , 4>},
|
||||
@@ -238,6 +253,8 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
{0, ::sum::run<double, 1>, ::sum::run<double, 2>, ::sum::run<double, 3>, ::sum::run<double, 4>}
|
||||
};
|
||||
|
||||
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
|
||||
|
||||
if (src.depth() == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
@@ -252,7 +269,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
const func_t func = funcs[src.depth()][src.channels()];
|
||||
|
||||
double result[4];
|
||||
func(src, buf.data, result);
|
||||
func(src, buf.data, result, mask);
|
||||
|
||||
return Scalar(result[0], result[1], result[2], result[3]);
|
||||
}
|
||||
@@ -260,12 +277,17 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
Scalar cv::gpu::absSum(const GpuMat& src)
|
||||
{
|
||||
GpuMat buf;
|
||||
return absSum(src, buf);
|
||||
return absSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum);
|
||||
return absSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
|
||||
static const func_t funcs[7][5] =
|
||||
{
|
||||
{0, ::sum::runAbs<uchar , 1>, ::sum::runAbs<uchar , 2>, ::sum::runAbs<uchar , 3>, ::sum::runAbs<uchar , 4>},
|
||||
@@ -277,6 +299,8 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
|
||||
{0, ::sum::runAbs<double, 1>, ::sum::runAbs<double, 2>, ::sum::runAbs<double, 3>, ::sum::runAbs<double, 4>}
|
||||
};
|
||||
|
||||
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
|
||||
|
||||
if (src.depth() == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
@@ -291,7 +315,7 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
|
||||
const func_t func = funcs[src.depth()][src.channels()];
|
||||
|
||||
double result[4];
|
||||
func(src, buf.data, result);
|
||||
func(src, buf.data, result, mask);
|
||||
|
||||
return Scalar(result[0], result[1], result[2], result[3]);
|
||||
}
|
||||
@@ -299,12 +323,17 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
|
||||
Scalar cv::gpu::sqrSum(const GpuMat& src)
|
||||
{
|
||||
GpuMat buf;
|
||||
return sqrSum(src, buf);
|
||||
return sqrSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum);
|
||||
return sqrSum(src, GpuMat(), buf);
|
||||
}
|
||||
|
||||
Scalar cv::gpu::sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf)
|
||||
{
|
||||
typedef void (*func_t)(PtrStepSzb src, void* buf, double* sum, PtrStepSzb mask);
|
||||
static const func_t funcs[7][5] =
|
||||
{
|
||||
{0, ::sum::runSqr<uchar , 1>, ::sum::runSqr<uchar , 2>, ::sum::runSqr<uchar , 3>, ::sum::runSqr<uchar , 4>},
|
||||
@@ -316,6 +345,8 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
|
||||
{0, ::sum::runSqr<double, 1>, ::sum::runSqr<double, 2>, ::sum::runSqr<double, 3>, ::sum::runSqr<double, 4>}
|
||||
};
|
||||
|
||||
CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == src.size()) );
|
||||
|
||||
if (src.depth() == CV_64F)
|
||||
{
|
||||
if (!deviceSupports(NATIVE_DOUBLE))
|
||||
@@ -330,7 +361,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
|
||||
const func_t func = funcs[src.depth()][src.channels()];
|
||||
|
||||
double result[4];
|
||||
func(src, buf.data, result);
|
||||
func(src, buf.data, result, mask);
|
||||
|
||||
return Scalar(result[0], result[1], result[2], result[3]);
|
||||
}
|
||||
|
Reference in New Issue
Block a user