fix gpu module compilation under linux
This commit is contained in:
parent
4cdcf37139
commit
863d61e9eb
@ -84,6 +84,12 @@ void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool,
|
|||||||
|
|
||||||
#else /* !defined (HAVE_CUDA) */
|
#else /* !defined (HAVE_CUDA) */
|
||||||
|
|
||||||
|
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
|
||||||
|
|
||||||
|
#if (defined(_WIN32) || defined(_WIN64)) && (NPP_VERSION >= 32)
|
||||||
|
# define NPP_HAVE_COMPLEX_TYPE
|
||||||
|
#endif
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// add subtract multiply divide
|
// add subtract multiply divide
|
||||||
|
|
||||||
@ -102,7 +108,11 @@ namespace
|
|||||||
{
|
{
|
||||||
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
|
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
|
||||||
|
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);
|
||||||
|
#else
|
||||||
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1);
|
||||||
|
#endif
|
||||||
|
|
||||||
dst.create( src1.size(), src1.type() );
|
dst.create( src1.size(), src1.type() );
|
||||||
|
|
||||||
@ -122,11 +132,13 @@ namespace
|
|||||||
src2.ptr<Npp8u>(), src2.step,
|
src2.ptr<Npp8u>(), src2.step,
|
||||||
dst.ptr<Npp8u>(), dst.step, sz, 0) );
|
dst.ptr<Npp8u>(), dst.step, sz, 0) );
|
||||||
break;
|
break;
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
case CV_32SC1:
|
case CV_32SC1:
|
||||||
nppSafeCall( npp_func_32sc1(src1.ptr<Npp32s>(), src1.step,
|
nppSafeCall( npp_func_32sc1(src1.ptr<Npp32s>(), src1.step,
|
||||||
src2.ptr<Npp32s>(), src2.step,
|
src2.ptr<Npp32s>(), src2.step,
|
||||||
dst.ptr<Npp32s>(), dst.step, sz) );
|
dst.ptr<Npp32s>(), dst.step, sz) );
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
case CV_32FC1:
|
case CV_32FC1:
|
||||||
nppSafeCall( npp_func_32fc1(src1.ptr<Npp32f>(), src1.step,
|
nppSafeCall( npp_func_32fc1(src1.ptr<Npp32f>(), src1.step,
|
||||||
src2.ptr<Npp32f>(), src2.step,
|
src2.ptr<Npp32f>(), src2.step,
|
||||||
@ -143,11 +155,13 @@ namespace
|
|||||||
typedef NppStatus (*func_ptr)(const Npp32f *pSrc, int nSrcStep, Npp32f nValue, Npp32f *pDst,
|
typedef NppStatus (*func_ptr)(const Npp32f *pSrc, int nSrcStep, Npp32f nValue, Npp32f *pDst,
|
||||||
int nDstStep, NppiSize oSizeROI);
|
int nDstStep, NppiSize oSizeROI);
|
||||||
};
|
};
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
template<> struct NppArithmScalarFunc<2>
|
template<> struct NppArithmScalarFunc<2>
|
||||||
{
|
{
|
||||||
typedef NppStatus (*func_ptr)(const Npp32fc *pSrc, int nSrcStep, Npp32fc nValue, Npp32fc *pDst,
|
typedef NppStatus (*func_ptr)(const Npp32fc *pSrc, int nSrcStep, Npp32fc nValue, Npp32fc *pDst,
|
||||||
int nDstStep, NppiSize oSizeROI);
|
int nDstStep, NppiSize oSizeROI);
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
template<int SCN, typename NppArithmScalarFunc<SCN>::func_ptr func> struct NppArithmScalar;
|
template<int SCN, typename NppArithmScalarFunc<SCN>::func_ptr func> struct NppArithmScalar;
|
||||||
template<typename NppArithmScalarFunc<1>::func_ptr func> struct NppArithmScalar<1, func>
|
template<typename NppArithmScalarFunc<1>::func_ptr func> struct NppArithmScalar<1, func>
|
||||||
@ -163,6 +177,7 @@ namespace
|
|||||||
nppSafeCall( func(src.ptr<Npp32f>(), src.step, (Npp32f)sc[0], dst.ptr<Npp32f>(), dst.step, sz) );
|
nppSafeCall( func(src.ptr<Npp32f>(), src.step, (Npp32f)sc[0], dst.ptr<Npp32f>(), dst.step, sz) );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
template<typename NppArithmScalarFunc<2>::func_ptr func> struct NppArithmScalar<2, func>
|
template<typename NppArithmScalarFunc<2>::func_ptr func> struct NppArithmScalar<2, func>
|
||||||
{
|
{
|
||||||
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
static void calc(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
||||||
@ -180,66 +195,119 @@ namespace
|
|||||||
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, nValue, dst.ptr<Npp32fc>(), dst.step, sz) );
|
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, nValue, dst.ptr<Npp32fc>(), dst.step, sz) );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
void cv::gpu::add(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R);
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, nppiAdd_32s_C1R, nppiAdd_32f_C1R);
|
||||||
|
#else
|
||||||
|
nppArithmCaller(src1, src2, dst, nppiAdd_8u_C1RSfs, nppiAdd_8u_C4RSfs, 0, nppiAdd_32f_C1R);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
void cv::gpu::subtract(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R);
|
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, nppiSub_32s_C1R, nppiSub_32f_C1R);
|
||||||
|
#else
|
||||||
|
nppArithmCaller(src2, src1, dst, nppiSub_8u_C1RSfs, nppiSub_8u_C4RSfs, 0, nppiSub_32f_C1R);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
void cv::gpu::multiply(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R);
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, nppiMul_32s_C1R, nppiMul_32f_C1R);
|
||||||
|
#else
|
||||||
|
nppArithmCaller(src1, src2, dst, nppiMul_8u_C1RSfs, nppiMul_8u_C4RSfs, 0, nppiMul_32f_C1R);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
void cv::gpu::divide(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R);
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, nppiDiv_32s_C1R, nppiDiv_32f_C1R);
|
||||||
|
#else
|
||||||
|
nppArithmCaller(src2, src1, dst, nppiDiv_8u_C1RSfs, nppiDiv_8u_C4RSfs, 0, nppiDiv_32f_C1R);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
void cv::gpu::add(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
||||||
static const caller_t callers[] = {NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc};
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiAddC_32f_C1R>::calc, NppArithmScalar<2, nppiAddC_32fc_C1R>::calc};
|
||||||
|
|
||||||
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
||||||
|
|
||||||
callers[src.channels()](src, sc, dst);
|
callers[src.channels()](src, sc, dst);
|
||||||
|
#else
|
||||||
|
# if NPP_VERSION >= 32
|
||||||
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
NppArithmScalar<1, nppiAddC_32f_C1R>::calc(src, sc, dst);
|
||||||
|
# else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
void cv::gpu::subtract(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
||||||
static const caller_t callers[] = {NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc};
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiSubC_32f_C1R>::calc, NppArithmScalar<2, nppiSubC_32fc_C1R>::calc};
|
||||||
|
|
||||||
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
||||||
|
|
||||||
callers[src.channels()](src, sc, dst);
|
callers[src.channels()](src, sc, dst);
|
||||||
|
#else
|
||||||
|
# if NPP_VERSION >= 32
|
||||||
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
NppArithmScalar<1, nppiSubC_32f_C1R>::calc(src, sc, dst);
|
||||||
|
# else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
void cv::gpu::multiply(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
||||||
static const caller_t callers[] = {NppArithmScalar<1, nppiMulC_32f_C1R>::calc, NppArithmScalar<2, nppiMulC_32fc_C1R>::calc};
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiMulC_32f_C1R>::calc, NppArithmScalar<2, nppiMulC_32fc_C1R>::calc};
|
||||||
|
|
||||||
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
||||||
|
|
||||||
callers[src.channels()](src, sc, dst);
|
callers[src.channels()](src, sc, dst);
|
||||||
|
#else
|
||||||
|
# if NPP_VERSION >= 32
|
||||||
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
NppArithmScalar<1, nppiMulC_32f_C1R>::calc(src, sc, dst);
|
||||||
|
# else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
void cv::gpu::divide(const GpuMat& src, const Scalar& sc, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
typedef void (*caller_t)(const GpuMat& src, const Scalar& sc, GpuMat& dst);
|
||||||
static const caller_t callers[] = {NppArithmScalar<1, nppiDivC_32f_C1R>::calc, NppArithmScalar<2, nppiDivC_32fc_C1R>::calc};
|
static const caller_t callers[] = {0, NppArithmScalar<1, nppiDivC_32f_C1R>::calc, NppArithmScalar<2, nppiDivC_32fc_C1R>::calc};
|
||||||
|
|
||||||
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
CV_Assert(src.type() == CV_32FC1 || src.type() == CV_32FC2);
|
||||||
|
|
||||||
callers[src.channels()](src, sc, dst);
|
callers[src.channels()](src, sc, dst);
|
||||||
|
#else
|
||||||
|
# if NPP_VERSION >= 32
|
||||||
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
NppArithmScalar<1, nppiDivC_32f_C1R>::calc(src, sc, dst);
|
||||||
|
# else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -265,7 +333,11 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
|||||||
{
|
{
|
||||||
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
|
CV_DbgAssert(src1.size() == src2.size() && src1.type() == src2.type());
|
||||||
|
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32SC1 || src1.type() == CV_32FC1);
|
||||||
|
#else
|
||||||
|
CV_Assert(src1.type() == CV_8UC1 || src1.type() == CV_8UC4 || src1.type() == CV_32FC1);
|
||||||
|
#endif
|
||||||
|
|
||||||
dst.create( src1.size(), src1.type() );
|
dst.create( src1.size(), src1.type() );
|
||||||
|
|
||||||
@ -285,11 +357,13 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
|||||||
src2.ptr<Npp8u>(), src2.step,
|
src2.ptr<Npp8u>(), src2.step,
|
||||||
dst.ptr<Npp8u>(), dst.step, sz) );
|
dst.ptr<Npp8u>(), dst.step, sz) );
|
||||||
break;
|
break;
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
case CV_32SC1:
|
case CV_32SC1:
|
||||||
nppSafeCall( nppiAbsDiff_32s_C1R(src1.ptr<Npp32s>(), src1.step,
|
nppSafeCall( nppiAbsDiff_32s_C1R(src1.ptr<Npp32s>(), src1.step,
|
||||||
src2.ptr<Npp32s>(), src2.step,
|
src2.ptr<Npp32s>(), src2.step,
|
||||||
dst.ptr<Npp32s>(), dst.step, sz) );
|
dst.ptr<Npp32s>(), dst.step, sz) );
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
case CV_32FC1:
|
case CV_32FC1:
|
||||||
nppSafeCall( nppiAbsDiff_32f_C1R(src1.ptr<Npp32f>(), src1.step,
|
nppSafeCall( nppiAbsDiff_32f_C1R(src1.ptr<Npp32f>(), src1.step,
|
||||||
src2.ptr<Npp32f>(), src2.step,
|
src2.ptr<Npp32f>(), src2.step,
|
||||||
@ -302,6 +376,7 @@ void cv::gpu::absdiff(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)
|
|||||||
|
|
||||||
void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst)
|
void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_32FC1);
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
|
||||||
dst.create( src.size(), src.type() );
|
dst.create( src.size(), src.type() );
|
||||||
@ -311,6 +386,9 @@ void cv::gpu::absdiff(const GpuMat& src, const Scalar& s, GpuMat& dst)
|
|||||||
sz.height = src.rows;
|
sz.height = src.rows;
|
||||||
|
|
||||||
nppSafeCall( nppiAbsDiffC_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz, (Npp32f)s[0]) );
|
nppSafeCall( nppiAbsDiffC_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz, (Npp32f)s[0]) );
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -444,13 +522,16 @@ void cv::gpu::flip(const GpuMat& src, GpuMat& dst, int flipCode)
|
|||||||
|
|
||||||
Scalar cv::gpu::sum(const GpuMat& src)
|
Scalar cv::gpu::sum(const GpuMat& src)
|
||||||
{
|
{
|
||||||
CV_Assert(!"disabled until fix crash");
|
|
||||||
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4);
|
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4);
|
||||||
|
|
||||||
NppiSize sz;
|
NppiSize sz;
|
||||||
sz.width = src.cols;
|
sz.width = src.cols;
|
||||||
sz.height = src.rows;
|
sz.height = src.rows;
|
||||||
|
|
||||||
|
Scalar res;
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
|
CV_Assert(!"disabled until fix crash");
|
||||||
|
|
||||||
int bufsz;
|
int bufsz;
|
||||||
|
|
||||||
if (src.type() == CV_8UC1)
|
if (src.type() == CV_8UC1)
|
||||||
@ -458,19 +539,23 @@ Scalar cv::gpu::sum(const GpuMat& src)
|
|||||||
nppiReductionGetBufferHostSize_8u_C1R(sz, &bufsz);
|
nppiReductionGetBufferHostSize_8u_C1R(sz, &bufsz);
|
||||||
GpuMat buf(1, bufsz, CV_32S);
|
GpuMat buf(1, bufsz, CV_32S);
|
||||||
|
|
||||||
Scalar res;
|
|
||||||
nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) );
|
nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) );
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
nppiReductionGetBufferHostSize_8u_C4R(sz, &bufsz);
|
nppiReductionGetBufferHostSize_8u_C4R(sz, &bufsz);
|
||||||
GpuMat buf(1, bufsz, CV_32S);
|
GpuMat buf(1, bufsz, CV_32S);
|
||||||
|
|
||||||
Scalar res;
|
|
||||||
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) );
|
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, buf.ptr<Npp32s>(), res.val) );
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
if (src.type() == CV_8UC1)
|
||||||
|
nppSafeCall( nppiSum_8u_C1R(src.ptr<Npp8u>(), src.step, sz, res.val) );
|
||||||
|
else
|
||||||
|
nppSafeCall( nppiSum_8u_C4R(src.ptr<Npp8u>(), src.step, sz, res.val) );
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -501,18 +586,26 @@ namespace
|
|||||||
sz.width = src.cols;
|
sz.width = src.cols;
|
||||||
sz.height = src.rows;
|
sz.height = src.rows;
|
||||||
|
|
||||||
Npp8u* cuMin = nppsMalloc_8u(4);
|
Npp8u* cuMem;
|
||||||
Npp8u* cuMax = nppsMalloc_8u(4);
|
|
||||||
|
|
||||||
nppSafeCall( nppiMinMax_8u_C4R(src.ptr<Npp8u>(), src.step, sz, cuMin, cuMax) );
|
#if NPP_VERSION >= 32
|
||||||
|
cuMem = nppsMalloc_8u(8);
|
||||||
|
#else
|
||||||
|
cudaSafeCall( cudaMalloc((void**)&cuMem, 8 * sizeof(Npp8u)) );
|
||||||
|
#endif
|
||||||
|
|
||||||
|
nppSafeCall( nppiMinMax_8u_C4R(src.ptr<Npp8u>(), src.step, sz, cuMem, cuMem + 4) );
|
||||||
|
|
||||||
if (minVal)
|
if (minVal)
|
||||||
cudaMemcpy(minVal, cuMin, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost);
|
cudaMemcpy(minVal, cuMem, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost);
|
||||||
if (maxVal)
|
if (maxVal)
|
||||||
cudaMemcpy(maxVal, cuMax, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost);
|
cudaMemcpy(maxVal, cuMem + 4, 4 * sizeof(Npp8u), cudaMemcpyDeviceToHost);
|
||||||
|
|
||||||
nppsFree(cuMin);
|
#if NPP_VERSION >= 32
|
||||||
nppsFree(cuMax);
|
nppsFree(cuMem);
|
||||||
|
#else
|
||||||
|
cudaSafeCall( cudaFree(cuMem) );
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -591,6 +684,7 @@ void cv::gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst)
|
|||||||
|
|
||||||
void cv::gpu::exp(const GpuMat& src, GpuMat& dst)
|
void cv::gpu::exp(const GpuMat& src, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_32FC1);
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
|
||||||
dst.create(src.size(), src.type());
|
dst.create(src.size(), src.type());
|
||||||
@ -600,6 +694,9 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst)
|
|||||||
sz.height = src.rows;
|
sz.height = src.rows;
|
||||||
|
|
||||||
nppSafeCall( nppiExp_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );
|
nppSafeCall( nppiExp_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -607,6 +704,7 @@ void cv::gpu::exp(const GpuMat& src, GpuMat& dst)
|
|||||||
|
|
||||||
void cv::gpu::log(const GpuMat& src, GpuMat& dst)
|
void cv::gpu::log(const GpuMat& src, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_32FC1);
|
CV_Assert(src.type() == CV_32FC1);
|
||||||
|
|
||||||
dst.create(src.size(), src.type());
|
dst.create(src.size(), src.type());
|
||||||
@ -616,11 +714,15 @@ void cv::gpu::log(const GpuMat& src, GpuMat& dst)
|
|||||||
sz.height = src.rows;
|
sz.height = src.rows;
|
||||||
|
|
||||||
nppSafeCall( nppiLn_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );
|
nppSafeCall( nppiLn_32f_C1R(src.ptr<Npp32f>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// NPP magnitide
|
// NPP magnitide
|
||||||
|
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI);
|
typedef NppStatus (*nppMagnitude_t)(const Npp32fc* pSrc, int nSrcStep, Npp32f* pDst, int nDstStep, NppiSize oSizeROI);
|
||||||
@ -638,15 +740,24 @@ namespace
|
|||||||
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );
|
nppSafeCall( func(src.ptr<Npp32fc>(), src.step, dst.ptr<Npp32f>(), dst.step, sz) );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst)
|
void cv::gpu::magnitude(const GpuMat& src, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
::npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R);
|
::npp_magnitude(src, dst, nppiMagnitude_32fc32f_C1R);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst)
|
void cv::gpu::magnitudeSqr(const GpuMat& src, GpuMat& dst)
|
||||||
{
|
{
|
||||||
|
#ifdef NPP_HAVE_COMPLEX_TYPE
|
||||||
::npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R);
|
::npp_magnitude(src, dst, nppiMagnitudeSqr_32fc32f_C1R);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -48,8 +48,11 @@ void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Gpu
|
|||||||
|
|
||||||
#else /* !defined (HAVE_CUDA) */
|
#else /* !defined (HAVE_CUDA) */
|
||||||
|
|
||||||
|
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
|
||||||
|
|
||||||
void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf)
|
void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(leftTransp.type() == CV_32S && rightTransp.type() == CV_32S);
|
CV_Assert(leftTransp.type() == CV_32S && rightTransp.type() == CV_32S);
|
||||||
CV_Assert(terminals.type() == CV_32S && bottom.type() == CV_32S && top.type() == CV_32S);
|
CV_Assert(terminals.type() == CV_32S && bottom.type() == CV_32S && top.type() == CV_32S);
|
||||||
CV_Assert(terminals.size() == leftTransp.size());
|
CV_Assert(terminals.size() == leftTransp.size());
|
||||||
@ -71,6 +74,9 @@ void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTrans
|
|||||||
|
|
||||||
nppSafeCall( nppiGraphcut_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(), top.ptr<Npp32s>(), bottom.ptr<Npp32s>(),
|
nppSafeCall( nppiGraphcut_32s8u(terminals.ptr<Npp32s>(), leftTransp.ptr<Npp32s>(), rightTransp.ptr<Npp32s>(), top.ptr<Npp32s>(), bottom.ptr<Npp32s>(),
|
||||||
terminals.step, leftTransp.step, sznpp, labels.ptr<Npp8u>(), labels.step, buf.ptr<Npp8u>()) );
|
terminals.step, leftTransp.step, sznpp, labels.ptr<Npp8u>(), labels.step, buf.ptr<Npp8u>()) );
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,6 +71,8 @@ void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*) { throw_nogpu();
|
|||||||
|
|
||||||
#else /* !defined (HAVE_CUDA) */
|
#else /* !defined (HAVE_CUDA) */
|
||||||
|
|
||||||
|
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace imgproc
|
namespace cv { namespace gpu { namespace imgproc
|
||||||
{
|
{
|
||||||
void remap_gpu_1c(const DevMem2D& src, const DevMem2Df& xmap, const DevMem2Df& ymap, DevMem2D dst);
|
void remap_gpu_1c(const DevMem2D& src, const DevMem2Df& xmap, const DevMem2Df& ymap, DevMem2D dst);
|
||||||
@ -577,6 +579,7 @@ void cv::gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, cons
|
|||||||
|
|
||||||
void cv::gpu::Canny(const GpuMat& image, GpuMat& edges, double threshold1, double threshold2, int apertureSize)
|
void cv::gpu::Canny(const GpuMat& image, GpuMat& edges, double threshold1, double threshold2, int apertureSize)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(!"disabled until fix crash");
|
CV_Assert(!"disabled until fix crash");
|
||||||
CV_Assert(image.type() == CV_8UC1);
|
CV_Assert(image.type() == CV_8UC1);
|
||||||
|
|
||||||
@ -600,6 +603,9 @@ void cv::gpu::Canny(const GpuMat& image, GpuMat& edges, double threshold1, doubl
|
|||||||
|
|
||||||
nppSafeCall( nppiCanny_32f8u_C1R(srcDx.ptr<Npp32f>(), srcDx.step, srcDy.ptr<Npp32f>(), srcDy.step,
|
nppSafeCall( nppiCanny_32f8u_C1R(srcDx.ptr<Npp32f>(), srcDx.step, srcDy.ptr<Npp32f>(), srcDy.step,
|
||||||
edges.ptr<Npp8u>(), edges.step, sz, (Npp32f)threshold1, (Npp32f)threshold2, buf.ptr<Npp8u>()) );
|
edges.ptr<Npp8u>(), edges.step, sz, (Npp32f)threshold1, (Npp32f)threshold2, buf.ptr<Npp8u>()) );
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -783,13 +789,18 @@ namespace
|
|||||||
|
|
||||||
void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
|
void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
Mat host_levels(1, nLevels, CV_32SC1);
|
Mat host_levels(1, nLevels, CV_32SC1);
|
||||||
nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
|
nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
|
||||||
levels.upload(host_levels);
|
levels.upload(host_levels);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel)
|
void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
|
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
|
||||||
|
|
||||||
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, int levels, int lowerLevel, int upperLevel);
|
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, int levels, int lowerLevel, int upperLevel);
|
||||||
@ -802,10 +813,14 @@ void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerL
|
|||||||
};
|
};
|
||||||
|
|
||||||
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);
|
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4])
|
void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4])
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );
|
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );
|
||||||
|
|
||||||
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], int levels[4], int lowerLevel[4], int upperLevel[4]);
|
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], int levels[4], int lowerLevel[4], int upperLevel[4]);
|
||||||
@ -818,10 +833,14 @@ void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int l
|
|||||||
};
|
};
|
||||||
|
|
||||||
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);
|
hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)
|
void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);
|
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);
|
||||||
|
|
||||||
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels);
|
typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels);
|
||||||
@ -836,10 +855,14 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels)
|
|||||||
};
|
};
|
||||||
|
|
||||||
hist_callers[src.depth()](src, hist, levels);
|
hist_callers[src.depth()](src, hist, levels);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4])
|
void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4])
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);
|
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);
|
||||||
|
|
||||||
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4]);
|
typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4]);
|
||||||
@ -854,6 +877,9 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4
|
|||||||
};
|
};
|
||||||
|
|
||||||
hist_callers[src.depth()](src, hist, levels);
|
hist_callers[src.depth()](src, hist, levels);
|
||||||
|
#else
|
||||||
|
CV_Assert(!"This function doesn't supported");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !defined (HAVE_CUDA) */
|
#endif /* !defined (HAVE_CUDA) */
|
||||||
|
@ -77,6 +77,8 @@ namespace cv
|
|||||||
|
|
||||||
#else /* !defined (HAVE_CUDA) */
|
#else /* !defined (HAVE_CUDA) */
|
||||||
|
|
||||||
|
#define NPP_VERSION (10 * NPP_VERSION_MAJOR + NPP_VERSION_MINOR)
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
namespace gpu
|
namespace gpu
|
||||||
@ -232,7 +234,11 @@ void cv::gpu::GpuMat::convertTo( GpuMat& dst, int rtype, double alpha, double be
|
|||||||
{NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C4R>::cvt},
|
{NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16U, nppiConvert_8u16u_C4R>::cvt},
|
||||||
{NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C4R>::cvt},
|
{NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,NppCvt<CV_8U, CV_16S, nppiConvert_8u16s_C4R>::cvt},
|
||||||
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
{NppCvt<CV_8U, CV_32F, nppiConvert_8u32f_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{NppCvt<CV_8U, CV_32F, nppiConvert_8u32f_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
|
#else
|
||||||
|
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
|
#endif
|
||||||
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
{0,0,0,0}
|
{0,0,0,0}
|
||||||
},
|
},
|
||||||
@ -277,7 +283,11 @@ void cv::gpu::GpuMat::convertTo( GpuMat& dst, int rtype, double alpha, double be
|
|||||||
{0,0,0,0}
|
{0,0,0,0}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
{NppCvt<CV_32F, CV_8U, nppiConvert_32f8u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{NppCvt<CV_32F, CV_8U, nppiConvert_32f8u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
|
#else
|
||||||
|
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
|
#endif
|
||||||
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{convertToKernelCaller,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
{NppCvt<CV_32F, CV_16U, nppiConvert_32f16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{NppCvt<CV_32F, CV_16U, nppiConvert_32f16u_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
{NppCvt<CV_32F, CV_16S, nppiConvert_32f16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
{NppCvt<CV_32F, CV_16S, nppiConvert_32f16s_C1R>::cvt,convertToKernelCaller,convertToKernelCaller,convertToKernelCaller},
|
||||||
@ -421,10 +431,26 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
|
|||||||
{
|
{
|
||||||
{NppSet<CV_8U, 1, nppiSet_8u_C1R>::set,kernelSet,kernelSet,NppSet<CV_8U, 4, nppiSet_8u_C4R>::set},
|
{NppSet<CV_8U, 1, nppiSet_8u_C1R>::set,kernelSet,kernelSet,NppSet<CV_8U, 4, nppiSet_8u_C4R>::set},
|
||||||
{kernelSet,kernelSet,kernelSet,kernelSet},
|
{kernelSet,kernelSet,kernelSet,kernelSet},
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
{NppSet<CV_16U, 1, nppiSet_16u_C1R>::set,kernelSet,kernelSet,NppSet<CV_16U, 4, nppiSet_16u_C4R>::set},
|
{NppSet<CV_16U, 1, nppiSet_16u_C1R>::set,kernelSet,kernelSet,NppSet<CV_16U, 4, nppiSet_16u_C4R>::set},
|
||||||
|
#else
|
||||||
|
{kernelSet,kernelSet,kernelSet,kernelSet},
|
||||||
|
#endif
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
{NppSet<CV_16S, 1, nppiSet_16s_C1R>::set,kernelSet,kernelSet,NppSet<CV_16S, 4, nppiSet_16s_C4R>::set},
|
{NppSet<CV_16S, 1, nppiSet_16s_C1R>::set,kernelSet,kernelSet,NppSet<CV_16S, 4, nppiSet_16s_C4R>::set},
|
||||||
|
#else
|
||||||
|
{kernelSet,kernelSet,kernelSet,kernelSet},
|
||||||
|
#endif
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,NppSet<CV_32S, 4, nppiSet_32s_C4R>::set},
|
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,NppSet<CV_32S, 4, nppiSet_32s_C4R>::set},
|
||||||
|
#else
|
||||||
|
{NppSet<CV_32S, 1, nppiSet_32s_C1R>::set,kernelSet,kernelSet,kernelSet},
|
||||||
|
#endif
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,NppSet<CV_32F, 4, nppiSet_32f_C4R>::set},
|
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,NppSet<CV_32F, 4, nppiSet_32f_C4R>::set},
|
||||||
|
#else
|
||||||
|
{NppSet<CV_32F, 1, nppiSet_32f_C1R>::set,kernelSet,kernelSet,kernelSet},
|
||||||
|
#endif
|
||||||
{kernelSet,kernelSet,kernelSet,kernelSet},
|
{kernelSet,kernelSet,kernelSet,kernelSet},
|
||||||
{0,0,0,0}
|
{0,0,0,0}
|
||||||
};
|
};
|
||||||
@ -432,6 +458,7 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
#if NPP_VERSION >= 32
|
||||||
typedef void (*set_caller_t)(GpuMat& src, const Scalar& s, const GpuMat& mask);
|
typedef void (*set_caller_t)(GpuMat& src, const Scalar& s, const GpuMat& mask);
|
||||||
static const set_caller_t set_callers[8][4] =
|
static const set_caller_t set_callers[8][4] =
|
||||||
{
|
{
|
||||||
@ -445,6 +472,9 @@ GpuMat& GpuMat::setTo(const Scalar& s, const GpuMat& mask)
|
|||||||
{0,0,0,0}
|
{0,0,0,0}
|
||||||
};
|
};
|
||||||
set_callers[depth()][channels()-1](*this, s, mask);
|
set_callers[depth()][channels()-1](*this, s, mask);
|
||||||
|
#else
|
||||||
|
kernelSetMask(*this, s, mask);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
return *this;
|
return *this;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user