fixed gpu::pyrUp (now it matches cpu analog)
fixed several warnings
This commit is contained in:
parent
484fe1d598
commit
6397fa5b38
@ -713,14 +713,12 @@ gpu::pyrDown
|
||||
-------------------
|
||||
Smoothes an image and downsamples it.
|
||||
|
||||
.. ocv:function:: void gpu::pyrDown(const GpuMat& src, GpuMat& dst, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null())
|
||||
.. ocv:function:: void gpu::pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image. Will have ``Size((src.cols+1)/2, (src.rows+1)/2)`` size and the same type as ``src`` .
|
||||
|
||||
:param borderType: Pixel extrapolation method (see :ocv:func:`borderInterpolate` ). ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`pyrDown`
|
||||
@ -731,14 +729,12 @@ gpu::pyrUp
|
||||
-------------------
|
||||
Upsamples an image and then smoothes it.
|
||||
|
||||
.. ocv:function:: void gpu::pyrUp(const GpuMat& src, GpuMat& dst, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null())
|
||||
.. ocv:function:: void gpu::pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
|
||||
|
||||
:param src: Source image.
|
||||
|
||||
:param dst: Destination image. Will have ``Size(src.cols*2, src.rows*2)`` size and the same type as ``src`` .
|
||||
|
||||
:param borderType: Pixel extrapolation method (see :ocv:func:`borderInterpolate` ). ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
|
||||
|
||||
:param stream: Stream for the asynchronous version.
|
||||
|
||||
.. seealso:: :ocv:func:`pyrUp`
|
||||
|
@ -836,10 +836,10 @@ private:
|
||||
CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream& stream = Stream::Null());
|
||||
|
||||
//! smoothes the source image and downsamples it
|
||||
CV_EXPORTS void pyrDown(const GpuMat& src, GpuMat& dst, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS void pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! upsamples the source image and then smoothes it
|
||||
CV_EXPORTS void pyrUp(const GpuMat& src, GpuMat& dst, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null());
|
||||
CV_EXPORTS void pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
|
||||
|
||||
//! performs linear blending of two images
|
||||
//! to avoid accuracy errors sum of weigths shouldn't be very close to zero
|
||||
|
@ -124,7 +124,7 @@ namespace cv { namespace gpu { namespace device
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, template <typename> class B> void pyrDown_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, cudaStream_t stream)
|
||||
template <typename T, template <typename> class B> void pyrDown_caller(DevMem2D_<T> src, DevMem2D_<T> dst, cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(256);
|
||||
const dim3 grid(divUp(src.cols, block.x), dst.rows);
|
||||
@ -138,48 +138,39 @@ namespace cv { namespace gpu { namespace device
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <typename T, int cn> void pyrDown_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream)
|
||||
template <typename T> void pyrDown_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type type;
|
||||
|
||||
typedef void (*caller_t)(const DevMem2D_<type>& src, const DevMem2D_<type>& dst, cudaStream_t stream);
|
||||
|
||||
static const caller_t callers[] =
|
||||
{
|
||||
pyrDown_caller<type, BrdReflect101>, pyrDown_caller<type, BrdReplicate>, pyrDown_caller<type, BrdConstant>, pyrDown_caller<type, BrdReflect>, pyrDown_caller<type, BrdWrap>
|
||||
};
|
||||
|
||||
callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);
|
||||
pyrDown_caller<T, BrdReflect101>(static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<T> >(dst), stream);
|
||||
}
|
||||
|
||||
template void pyrDown_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<uchar>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<uchar2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrDown_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<schar>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<char2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<char3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<char4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrDown_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<ushort>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<ushort2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrDown_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<short>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<short2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<short3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<short4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrDown_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<int>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<int2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<int3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<int4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrDown_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrDown_gpu<float>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrDown_gpu<float2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<float3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrDown_gpu<float4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
} // namespace imgproc
|
||||
}}} // namespace cv { namespace gpu { namespace device
|
||||
|
@ -50,10 +50,9 @@ namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
template <class SrcPtr, typename D> __global__ void pyrUp(const SrcPtr src, DevMem2D_<D> dst)
|
||||
template <typename T> __global__ void pyrUp(const DevMem2D_<T> src, DevMem2D_<T> dst)
|
||||
{
|
||||
typedef typename SrcPtr::elem_type src_t;
|
||||
typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
|
||||
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
|
||||
|
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
@ -63,8 +62,14 @@ namespace cv { namespace gpu { namespace device
|
||||
|
||||
if (threadIdx.x < 10 && threadIdx.y < 10)
|
||||
{
|
||||
const int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1;
|
||||
const int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1;
|
||||
int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1;
|
||||
int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1;
|
||||
|
||||
srcx = ::abs(srcx);
|
||||
srcx = ::min(src.cols - 1, srcx);
|
||||
|
||||
srcy = ::abs(srcy);
|
||||
srcy = ::min(src.rows - 1, srcy);
|
||||
|
||||
s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx));
|
||||
}
|
||||
@ -134,66 +139,54 @@ namespace cv { namespace gpu { namespace device
|
||||
sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x];
|
||||
|
||||
if (x < dst.cols && y < dst.rows)
|
||||
dst(y, x) = saturate_cast<D>(4.0f * sum);
|
||||
dst(y, x) = saturate_cast<T>(4.0f * sum);
|
||||
}
|
||||
|
||||
template <typename T, template <typename> class B> void pyrUp_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, cudaStream_t stream)
|
||||
template <typename T> void pyrUp_caller(DevMem2D_<T> src, DevMem2D_<T> dst, cudaStream_t stream)
|
||||
{
|
||||
const dim3 block(16, 16);
|
||||
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
|
||||
|
||||
B<T> b(src.rows, src.cols);
|
||||
BorderReader< PtrStep<T>, B<T> > srcReader(src, b);
|
||||
|
||||
pyrUp<<<grid, block, 0, stream>>>(srcReader, dst);
|
||||
pyrUp<<<grid, block, 0, stream>>>(src, dst);
|
||||
cudaSafeCall( cudaGetLastError() );
|
||||
|
||||
if (stream == 0)
|
||||
cudaSafeCall( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template <typename T, int cn> void pyrUp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream)
|
||||
template <typename T> void pyrUp_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream)
|
||||
{
|
||||
typedef typename TypeVec<T, cn>::vec_type type;
|
||||
|
||||
typedef void (*caller_t)(const DevMem2D_<type>& src, const DevMem2D_<type>& dst, cudaStream_t stream);
|
||||
|
||||
static const caller_t callers[] =
|
||||
{
|
||||
pyrUp_caller<type, BrdReflect101>, pyrUp_caller<type, BrdReplicate>, pyrUp_caller<type, BrdConstant>, pyrUp_caller<type, BrdReflect>, pyrUp_caller<type, BrdWrap>
|
||||
};
|
||||
|
||||
callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);
|
||||
pyrUp_caller<T>(static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<T> >(dst), stream);
|
||||
}
|
||||
|
||||
template void pyrUp_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<uchar>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<uchar2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrUp_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<schar>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<char2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<char3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<char4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrUp_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<ushort>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<ushort2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrUp_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<short>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<short2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<short3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<short4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrUp_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<int>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<int2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<int3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<int4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
template void pyrUp_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
template void pyrUp_gpu<float>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
//template void pyrUp_gpu<float2>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<float3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template void pyrUp_gpu<float4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
} // namespace imgproc
|
||||
}}} // namespace cv { namespace gpu { namespace device
|
||||
|
@ -87,8 +87,6 @@ void cv::gpu::dft(const GpuMat&, GpuMat&, Size, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::ConvolveBuf::create(Size, Size) { throw_nogpu(); }
|
||||
void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool) { throw_nogpu(); }
|
||||
void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::pyrDown(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::pyrUp(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::Canny(const GpuMat&, GpuMat&, double, double, int, bool) { throw_nogpu(); }
|
||||
void cv::gpu::Canny(const GpuMat&, CannyBuf&, GpuMat&, double, double, int, bool) { throw_nogpu(); }
|
||||
void cv::gpu::Canny(const GpuMat&, const GpuMat&, GpuMat&, double, double, bool) { throw_nogpu(); }
|
||||
@ -96,8 +94,6 @@ void cv::gpu::Canny(const GpuMat&, const GpuMat&, CannyBuf&, GpuMat&, double, do
|
||||
cv::gpu::CannyBuf::CannyBuf(const GpuMat&, const GpuMat&) { throw_nogpu(); }
|
||||
void cv::gpu::CannyBuf::create(const Size&, int) { throw_nogpu(); }
|
||||
void cv::gpu::CannyBuf::release() { throw_nogpu(); }
|
||||
void cv::gpu::ImagePyramid::build(const GpuMat&, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::ImagePyramid::getLayer(GpuMat&, Size, Stream&) const { throw_nogpu(); }
|
||||
|
||||
#else /* !defined (HAVE_CUDA) */
|
||||
|
||||
@ -541,8 +537,8 @@ void cv::gpu::integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, S
|
||||
roiSize.width = src.cols;
|
||||
roiSize.height = src.rows;
|
||||
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
||||
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
|
||||
@ -570,8 +566,8 @@ void cv::gpu::sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& s)
|
||||
roiSize.width = src.cols;
|
||||
roiSize.height = src.rows;
|
||||
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
||||
cudaDeviceProp prop;
|
||||
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
|
||||
|
||||
Ncv32u bufSize;
|
||||
ncvSafeCall(nppiStSqrIntegralGetSize_8u64u(roiSize, &bufSize, prop));
|
||||
@ -651,8 +647,8 @@ namespace
|
||||
{
|
||||
typedef typename NppTypeTraits<SDEPTH>::npp_t src_t;
|
||||
|
||||
typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s * pHist,
|
||||
int nLevels, Npp32s nLowerLevel, Npp32s nUpperLevel, Npp8u * pBuffer);
|
||||
typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s * pHist,
|
||||
int nLevels, Npp32s nLowerLevel, Npp32s nUpperLevel, Npp8u * pBuffer);
|
||||
};
|
||||
template<int SDEPTH> struct NppHistogramEvenFuncC4
|
||||
{
|
||||
@ -1421,83 +1417,6 @@ void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result,
|
||||
#endif
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// pyrDown
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
template <typename T, int cn> void pyrDown_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::pyrDown(const GpuMat& src, GpuMat& dst, int borderType, Stream& stream)
|
||||
{
|
||||
using namespace ::cv::gpu::device::imgproc;
|
||||
|
||||
typedef void (*func_t)(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{pyrDown_gpu<uchar, 1>, pyrDown_gpu<uchar, 2>, pyrDown_gpu<uchar, 3>, pyrDown_gpu<uchar, 4>},
|
||||
{pyrDown_gpu<schar, 1>, pyrDown_gpu<schar, 2>, pyrDown_gpu<schar, 3>, pyrDown_gpu<schar, 4>},
|
||||
{pyrDown_gpu<ushort, 1>, pyrDown_gpu<ushort, 2>, pyrDown_gpu<ushort, 3>, pyrDown_gpu<ushort, 4>},
|
||||
{pyrDown_gpu<short, 1>, pyrDown_gpu<short, 2>, pyrDown_gpu<short, 3>, pyrDown_gpu<short, 4>},
|
||||
{pyrDown_gpu<int, 1>, pyrDown_gpu<int, 2>, pyrDown_gpu<int, 3>, pyrDown_gpu<int, 4>},
|
||||
{pyrDown_gpu<float, 1>, pyrDown_gpu<float, 2>, pyrDown_gpu<float, 3>, pyrDown_gpu<float, 4>},
|
||||
};
|
||||
|
||||
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
|
||||
|
||||
CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
|
||||
int gpuBorderType;
|
||||
CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
|
||||
|
||||
dst.create((src.rows + 1) / 2, (src.cols + 1) / 2, src.type());
|
||||
|
||||
funcs[src.depth()][src.channels() - 1](src, dst, gpuBorderType, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// pyrUp
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
template <typename T, int cn> void pyrUp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::pyrUp(const GpuMat& src, GpuMat& dst, int borderType, Stream& stream)
|
||||
{
|
||||
using namespace ::cv::gpu::device::imgproc;
|
||||
|
||||
typedef void (*func_t)(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{pyrUp_gpu<uchar, 1>, pyrUp_gpu<uchar, 2>, pyrUp_gpu<uchar, 3>, pyrUp_gpu<uchar, 4>},
|
||||
{pyrUp_gpu<schar, 1>, pyrUp_gpu<schar, 2>, pyrUp_gpu<schar, 3>, pyrUp_gpu<schar, 4>},
|
||||
{pyrUp_gpu<ushort, 1>, pyrUp_gpu<ushort, 2>, pyrUp_gpu<ushort, 3>, pyrUp_gpu<ushort, 4>},
|
||||
{pyrUp_gpu<short, 1>, pyrUp_gpu<short, 2>, pyrUp_gpu<short, 3>, pyrUp_gpu<short, 4>},
|
||||
{pyrUp_gpu<int, 1>, pyrUp_gpu<int, 2>, pyrUp_gpu<int, 3>, pyrUp_gpu<int, 4>},
|
||||
{pyrUp_gpu<float, 1>, pyrUp_gpu<float, 2>, pyrUp_gpu<float, 3>, pyrUp_gpu<float, 4>},
|
||||
};
|
||||
|
||||
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
|
||||
|
||||
CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
|
||||
int gpuBorderType;
|
||||
CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
|
||||
|
||||
dst.create(src.rows*2, src.cols*2, src.type());
|
||||
|
||||
funcs[src.depth()][src.channels() - 1](src, dst, gpuBorderType, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Canny
|
||||
@ -1646,129 +1565,6 @@ void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& d
|
||||
CannyCaller(buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// ImagePyramid
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace pyramid
|
||||
{
|
||||
template <typename T> void kernelDownsampleX2_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template <typename T> void kernelInterpolateFrom1_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::ImagePyramid::build(const GpuMat& img, int numLayers, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::device::pyramid;
|
||||
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[7][4] =
|
||||
{
|
||||
{kernelDownsampleX2_gpu<uchar1>, /*kernelDownsampleX2_gpu<uchar2>*/ 0, kernelDownsampleX2_gpu<uchar3>, kernelDownsampleX2_gpu<uchar4>},
|
||||
{/*kernelDownsampleX2_gpu<char1>*/0, /*kernelDownsampleX2_gpu<char2>*/ 0, /*kernelDownsampleX2_gpu<char3>*/ 0, /*kernelDownsampleX2_gpu<char4>*/ 0},
|
||||
{kernelDownsampleX2_gpu<ushort1>, /*kernelDownsampleX2_gpu<ushort2>*/ 0, kernelDownsampleX2_gpu<ushort3>, kernelDownsampleX2_gpu<ushort4>},
|
||||
{/*kernelDownsampleX2_gpu<short1>*/ 0, /*kernelDownsampleX2_gpu<short2>*/ 0, /*kernelDownsampleX2_gpu<short3>*/ 0, /*kernelDownsampleX2_gpu<short4>*/ 0},
|
||||
{/*kernelDownsampleX2_gpu<int1>*/ 0, /*kernelDownsampleX2_gpu<int2>*/ 0, /*kernelDownsampleX2_gpu<int3>*/ 0, /*kernelDownsampleX2_gpu<int4>*/ 0},
|
||||
{kernelDownsampleX2_gpu<float1>, /*kernelDownsampleX2_gpu<float2>*/ 0, kernelDownsampleX2_gpu<float3>, kernelDownsampleX2_gpu<float4>},
|
||||
{/*kernelDownsampleX2_gpu<double1>*/ 0, /*kernelDownsampleX2_gpu<double2>*/ 0, /*kernelDownsampleX2_gpu<double3>*/ 0, /*kernelDownsampleX2_gpu<double4>*/ 0}
|
||||
};
|
||||
|
||||
CV_Assert(img.channels() == 1 || img.channels() == 3 || img.channels() == 4);
|
||||
CV_Assert(img.depth() == CV_8U || img.depth() == CV_16U || img.depth() == CV_32F);
|
||||
|
||||
layer0_ = img;
|
||||
Size szLastLayer = img.size();
|
||||
nLayers_ = 1;
|
||||
|
||||
if (numLayers <= 0)
|
||||
numLayers = 255; //it will cut-off when any of the dimensions goes 1
|
||||
|
||||
pyramid_.resize(numLayers);
|
||||
|
||||
for (int i = 0; i < numLayers - 1; ++i)
|
||||
{
|
||||
Size szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2);
|
||||
|
||||
if (szCurLayer.width == 0 || szCurLayer.height == 0)
|
||||
break;
|
||||
|
||||
ensureSizeIsEnough(szCurLayer, img.type(), pyramid_[i]);
|
||||
nLayers_++;
|
||||
|
||||
const GpuMat& prevLayer = i == 0 ? layer0_ : pyramid_[i - 1];
|
||||
|
||||
func_t func = funcs[img.depth()][img.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
func(prevLayer, pyramid_[i], StreamAccessor::getStream(stream));
|
||||
|
||||
szLastLayer = szCurLayer;
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::ImagePyramid::getLayer(GpuMat& outImg, Size outRoi, Stream& stream) const
|
||||
{
|
||||
using namespace cv::gpu::device::pyramid;
|
||||
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[7][4] =
|
||||
{
|
||||
{kernelInterpolateFrom1_gpu<uchar1>, /*kernelInterpolateFrom1_gpu<uchar2>*/ 0, kernelInterpolateFrom1_gpu<uchar3>, kernelInterpolateFrom1_gpu<uchar4>},
|
||||
{/*kernelInterpolateFrom1_gpu<char1>*/0, /*kernelInterpolateFrom1_gpu<char2>*/ 0, /*kernelInterpolateFrom1_gpu<char3>*/ 0, /*kernelInterpolateFrom1_gpu<char4>*/ 0},
|
||||
{kernelInterpolateFrom1_gpu<ushort1>, /*kernelInterpolateFrom1_gpu<ushort2>*/ 0, kernelInterpolateFrom1_gpu<ushort3>, kernelInterpolateFrom1_gpu<ushort4>},
|
||||
{/*kernelInterpolateFrom1_gpu<short1>*/ 0, /*kernelInterpolateFrom1_gpu<short2>*/ 0, /*kernelInterpolateFrom1_gpu<short3>*/ 0, /*kernelInterpolateFrom1_gpu<short4>*/ 0},
|
||||
{/*kernelInterpolateFrom1_gpu<int1>*/ 0, /*kernelInterpolateFrom1_gpu<int2>*/ 0, /*kernelInterpolateFrom1_gpu<int3>*/ 0, /*kernelInterpolateFrom1_gpu<int4>*/ 0},
|
||||
{kernelInterpolateFrom1_gpu<float1>, /*kernelInterpolateFrom1_gpu<float2>*/ 0, kernelInterpolateFrom1_gpu<float3>, kernelInterpolateFrom1_gpu<float4>},
|
||||
{/*kernelInterpolateFrom1_gpu<double1>*/ 0, /*kernelInterpolateFrom1_gpu<double2>*/ 0, /*kernelInterpolateFrom1_gpu<double3>*/ 0, /*kernelInterpolateFrom1_gpu<double4>*/ 0}
|
||||
};
|
||||
|
||||
CV_Assert(outRoi.width <= layer0_.cols && outRoi.height <= layer0_.rows && outRoi.width > 0 && outRoi.height > 0);
|
||||
|
||||
ensureSizeIsEnough(outRoi, layer0_.type(), outImg);
|
||||
|
||||
if (outRoi.width == layer0_.cols && outRoi.height == layer0_.rows)
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueCopy(layer0_, outImg);
|
||||
else
|
||||
layer0_.copyTo(outImg);
|
||||
}
|
||||
|
||||
float lastScale = 1.0f;
|
||||
float curScale;
|
||||
GpuMat lastLayer = layer0_;
|
||||
GpuMat curLayer;
|
||||
|
||||
for (int i = 0; i < nLayers_ - 1; ++i)
|
||||
{
|
||||
curScale = lastScale * 0.5f;
|
||||
curLayer = pyramid_[i];
|
||||
|
||||
if (outRoi.width == curLayer.cols && outRoi.height == curLayer.rows)
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueCopy(curLayer, outImg);
|
||||
else
|
||||
curLayer.copyTo(outImg);
|
||||
}
|
||||
|
||||
if (outRoi.width >= curLayer.cols && outRoi.height >= curLayer.rows)
|
||||
break;
|
||||
|
||||
lastScale = curScale;
|
||||
lastLayer = curLayer;
|
||||
}
|
||||
|
||||
func_t func = funcs[outImg.depth()][outImg.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
func(lastLayer, outImg, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
#endif /* !defined (HAVE_CUDA) */
|
||||
|
||||
|
||||
|
249
modules/gpu/src/pyramids.cpp
Normal file
249
modules/gpu/src/pyramids.cpp
Normal file
@ -0,0 +1,249 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#ifndef HAVE_CUDA
|
||||
|
||||
void cv::gpu::pyrDown(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::pyrUp(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::ImagePyramid::build(const GpuMat&, int, Stream&) { throw_nogpu(); }
|
||||
void cv::gpu::ImagePyramid::getLayer(GpuMat&, Size, Stream&) const { throw_nogpu(); }
|
||||
|
||||
#else // HAVE_CUDA
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// pyrDown
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
template <typename T> void pyrDown_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::device::imgproc;
|
||||
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{pyrDown_gpu<uchar> , 0 /*pyrDown_gpu<uchar2>*/ , pyrDown_gpu<uchar3> , pyrDown_gpu<uchar4> },
|
||||
{0 /*pyrDown_gpu<schar>*/, 0 /*pyrDown_gpu<schar2>*/ , 0 /*pyrDown_gpu<schar3>*/, 0 /*pyrDown_gpu<schar4>*/},
|
||||
{pyrDown_gpu<ushort> , 0 /*pyrDown_gpu<ushort2>*/, pyrDown_gpu<ushort3> , pyrDown_gpu<ushort4> },
|
||||
{pyrDown_gpu<short> , 0 /*pyrDown_gpu<short2>*/ , pyrDown_gpu<short3> , pyrDown_gpu<short4> },
|
||||
{0 /*pyrDown_gpu<int>*/ , 0 /*pyrDown_gpu<int2>*/ , 0 /*pyrDown_gpu<int3>*/ , 0 /*pyrDown_gpu<int4>*/ },
|
||||
{pyrDown_gpu<float> , 0 /*pyrDown_gpu<float2>*/ , pyrDown_gpu<float3> , pyrDown_gpu<float4> }
|
||||
};
|
||||
|
||||
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
|
||||
|
||||
const func_t func = funcs[src.depth()][src.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
dst.create((src.rows + 1) / 2, (src.cols + 1) / 2, src.type());
|
||||
|
||||
func(src, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// pyrUp
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace imgproc
|
||||
{
|
||||
template <typename T> void pyrUp_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::device::imgproc;
|
||||
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{pyrUp_gpu<uchar> , 0 /*pyrUp_gpu<uchar2>*/ , pyrUp_gpu<uchar3> , pyrUp_gpu<uchar4> },
|
||||
{0 /*pyrUp_gpu<schar>*/, 0 /*pyrUp_gpu<schar2>*/ , 0 /*pyrUp_gpu<schar3>*/, 0 /*pyrUp_gpu<schar4>*/},
|
||||
{pyrUp_gpu<ushort> , 0 /*pyrUp_gpu<ushort2>*/, pyrUp_gpu<ushort3> , pyrUp_gpu<ushort4> },
|
||||
{pyrUp_gpu<short> , 0 /*pyrUp_gpu<short2>*/ , pyrUp_gpu<short3> , pyrUp_gpu<short4> },
|
||||
{0 /*pyrUp_gpu<int>*/ , 0 /*pyrUp_gpu<int2>*/ , 0 /*pyrUp_gpu<int3>*/ , 0 /*pyrUp_gpu<int4>*/ },
|
||||
{pyrUp_gpu<float> , 0 /*pyrUp_gpu<float2>*/ , pyrUp_gpu<float3> , pyrUp_gpu<float4> }
|
||||
};
|
||||
|
||||
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
|
||||
|
||||
const func_t func = funcs[src.depth()][src.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
dst.create(src.rows * 2, src.cols * 2, src.type());
|
||||
|
||||
func(src, dst, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// ImagePyramid
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
namespace pyramid
|
||||
{
|
||||
template <typename T> void kernelDownsampleX2_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
template <typename T> void kernelInterpolateFrom1_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
}
|
||||
}}}
|
||||
|
||||
void cv::gpu::ImagePyramid::build(const GpuMat& img, int numLayers, Stream& stream)
|
||||
{
|
||||
using namespace cv::gpu::device::pyramid;
|
||||
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{kernelDownsampleX2_gpu<uchar1> , 0 /*kernelDownsampleX2_gpu<uchar2>*/ , kernelDownsampleX2_gpu<uchar3> , kernelDownsampleX2_gpu<uchar4> },
|
||||
{0 /*kernelDownsampleX2_gpu<char1>*/ , 0 /*kernelDownsampleX2_gpu<char2>*/ , 0 /*kernelDownsampleX2_gpu<char3>*/ , 0 /*kernelDownsampleX2_gpu<char4>*/ },
|
||||
{kernelDownsampleX2_gpu<ushort1> , 0 /*kernelDownsampleX2_gpu<ushort2>*/, kernelDownsampleX2_gpu<ushort3> , kernelDownsampleX2_gpu<ushort4> },
|
||||
{0 /*kernelDownsampleX2_gpu<short1>*/ , 0 /*kernelDownsampleX2_gpu<short2>*/ , 0 /*kernelDownsampleX2_gpu<short3>*/, 0 /*kernelDownsampleX2_gpu<short4>*/},
|
||||
{0 /*kernelDownsampleX2_gpu<int1>*/ , 0 /*kernelDownsampleX2_gpu<int2>*/ , 0 /*kernelDownsampleX2_gpu<int3>*/ , 0 /*kernelDownsampleX2_gpu<int4>*/ },
|
||||
{kernelDownsampleX2_gpu<float1> , 0 /*kernelDownsampleX2_gpu<float2>*/ , kernelDownsampleX2_gpu<float3> , kernelDownsampleX2_gpu<float4> }
|
||||
};
|
||||
|
||||
CV_Assert(img.depth() <= CV_32F && img.channels() <= 4);
|
||||
|
||||
const func_t func = funcs[img.depth()][img.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
layer0_ = img;
|
||||
Size szLastLayer = img.size();
|
||||
nLayers_ = 1;
|
||||
|
||||
if (numLayers <= 0)
|
||||
numLayers = 255; //it will cut-off when any of the dimensions goes 1
|
||||
|
||||
pyramid_.resize(numLayers);
|
||||
|
||||
for (int i = 0; i < numLayers - 1; ++i)
|
||||
{
|
||||
Size szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2);
|
||||
|
||||
if (szCurLayer.width == 0 || szCurLayer.height == 0)
|
||||
break;
|
||||
|
||||
ensureSizeIsEnough(szCurLayer, img.type(), pyramid_[i]);
|
||||
nLayers_++;
|
||||
|
||||
const GpuMat& prevLayer = i == 0 ? layer0_ : pyramid_[i - 1];
|
||||
|
||||
func(prevLayer, pyramid_[i], StreamAccessor::getStream(stream));
|
||||
|
||||
szLastLayer = szCurLayer;
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::ImagePyramid::getLayer(GpuMat& outImg, Size outRoi, Stream& stream) const
|
||||
{
|
||||
using namespace cv::gpu::device::pyramid;
|
||||
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
|
||||
|
||||
static const func_t funcs[6][4] =
|
||||
{
|
||||
{kernelInterpolateFrom1_gpu<uchar1> , 0 /*kernelInterpolateFrom1_gpu<uchar2>*/ , kernelInterpolateFrom1_gpu<uchar3> , kernelInterpolateFrom1_gpu<uchar4> },
|
||||
{0 /*kernelInterpolateFrom1_gpu<char1>*/ , 0 /*kernelInterpolateFrom1_gpu<char2>*/ , 0 /*kernelInterpolateFrom1_gpu<char3>*/ , 0 /*kernelInterpolateFrom1_gpu<char4>*/ },
|
||||
{kernelInterpolateFrom1_gpu<ushort1> , 0 /*kernelInterpolateFrom1_gpu<ushort2>*/, kernelInterpolateFrom1_gpu<ushort3> , kernelInterpolateFrom1_gpu<ushort4> },
|
||||
{0 /*kernelInterpolateFrom1_gpu<short1>*/, 0 /*kernelInterpolateFrom1_gpu<short2>*/ , 0 /*kernelInterpolateFrom1_gpu<short3>*/, 0 /*kernelInterpolateFrom1_gpu<short4>*/},
|
||||
{0 /*kernelInterpolateFrom1_gpu<int1>*/ , 0 /*kernelInterpolateFrom1_gpu<int2>*/ , 0 /*kernelInterpolateFrom1_gpu<int3>*/ , 0 /*kernelInterpolateFrom1_gpu<int4>*/ },
|
||||
{kernelInterpolateFrom1_gpu<float1> , 0 /*kernelInterpolateFrom1_gpu<float2>*/ , kernelInterpolateFrom1_gpu<float3> , kernelInterpolateFrom1_gpu<float4> }
|
||||
};
|
||||
|
||||
CV_Assert(outRoi.width <= layer0_.cols && outRoi.height <= layer0_.rows && outRoi.width > 0 && outRoi.height > 0);
|
||||
|
||||
ensureSizeIsEnough(outRoi, layer0_.type(), outImg);
|
||||
|
||||
const func_t func = funcs[outImg.depth()][outImg.channels() - 1];
|
||||
CV_Assert(func != 0);
|
||||
|
||||
if (outRoi.width == layer0_.cols && outRoi.height == layer0_.rows)
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueCopy(layer0_, outImg);
|
||||
else
|
||||
layer0_.copyTo(outImg);
|
||||
}
|
||||
|
||||
float lastScale = 1.0f;
|
||||
float curScale;
|
||||
GpuMat lastLayer = layer0_;
|
||||
GpuMat curLayer;
|
||||
|
||||
for (int i = 0; i < nLayers_ - 1; ++i)
|
||||
{
|
||||
curScale = lastScale * 0.5f;
|
||||
curLayer = pyramid_[i];
|
||||
|
||||
if (outRoi.width == curLayer.cols && outRoi.height == curLayer.rows)
|
||||
{
|
||||
if (stream)
|
||||
stream.enqueueCopy(curLayer, outImg);
|
||||
else
|
||||
curLayer.copyTo(outImg);
|
||||
}
|
||||
|
||||
if (outRoi.width >= curLayer.cols && outRoi.height >= curLayer.rows)
|
||||
break;
|
||||
|
||||
lastScale = curScale;
|
||||
lastLayer = curLayer;
|
||||
}
|
||||
|
||||
func(lastLayer, outImg, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
#endif // HAVE_CUDA
|
@ -230,7 +230,7 @@ TEST_P(ProjectPoints, Accuracy)
|
||||
|
||||
d_dst.download(dst);
|
||||
|
||||
ASSERT_EQ(dst_gold.size(), dst.cols);
|
||||
ASSERT_EQ(dst_gold.size(), static_cast<size_t>(dst.cols));
|
||||
ASSERT_EQ(1, dst.rows);
|
||||
ASSERT_EQ(CV_32FC2, dst.type());
|
||||
|
||||
|
@ -218,7 +218,7 @@ TEST_P(BruteForceMatcher, Match)
|
||||
|
||||
matcher.match(loadMat(query), loadMat(train), matches);
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
for (size_t i = 0; i < matches.size(); i++)
|
||||
@ -259,7 +259,7 @@ TEST_P(BruteForceMatcher, MatchAdd)
|
||||
|
||||
isMaskSupported = matcher.isMaskSupported();
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
for (size_t i = 0; i < matches.size(); i++)
|
||||
@ -292,7 +292,7 @@ TEST_P(BruteForceMatcher, KnnMatch2)
|
||||
cv::gpu::BruteForceMatcher_GPU_base matcher(distType);
|
||||
matcher.knnMatch(loadMat(query), loadMat(train), matches, knn);
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
for (size_t i = 0; i < matches.size(); i++)
|
||||
@ -324,7 +324,7 @@ TEST_P(BruteForceMatcher, KnnMatch3)
|
||||
cv::gpu::BruteForceMatcher_GPU_base matcher(distType);
|
||||
matcher.knnMatch(loadMat(query), loadMat(train), matches, knn);
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
for (size_t i = 0; i < matches.size(); i++)
|
||||
@ -375,7 +375,7 @@ TEST_P(BruteForceMatcher, KnnMatchAdd2)
|
||||
|
||||
isMaskSupported = matcher.isMaskSupported();
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
int shift = isMaskSupported ? 1 : 0;
|
||||
@ -437,7 +437,7 @@ TEST_P(BruteForceMatcher, KnnMatchAdd3)
|
||||
|
||||
isMaskSupported = matcher.isMaskSupported();
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
int shift = isMaskSupported ? 1 : 0;
|
||||
@ -485,7 +485,7 @@ TEST_P(BruteForceMatcher, RadiusMatch)
|
||||
|
||||
matcher.radiusMatch(loadMat(query), loadMat(train), matches, radius);
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
for (size_t i = 0; i < matches.size(); i++)
|
||||
@ -536,7 +536,7 @@ TEST_P(BruteForceMatcher, RadiusMatchAdd)
|
||||
|
||||
isMaskSupported = matcher.isMaskSupported();
|
||||
|
||||
ASSERT_EQ(queryDescCount, matches.size());
|
||||
ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
|
||||
|
||||
int badCount = 0;
|
||||
int shift = isMaskSupported ? 1 : 0;
|
||||
@ -598,7 +598,6 @@ struct FAST : TestWithParam<cv::gpu::DeviceInfo>
|
||||
image = readImage("features2d/aloe.png", CV_LOAD_IMAGE_GRAYSCALE);
|
||||
ASSERT_FALSE(image.empty());
|
||||
|
||||
cv::RNG& rng = cvtest::TS::ptr()->get_rng();
|
||||
threshold = 30;
|
||||
|
||||
cv::FAST(image, keypoints_gold, threshold);
|
||||
|
@ -2279,8 +2279,6 @@ PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, Border, int, int)
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
cv::RNG& rng = TS::ptr()->get_rng();
|
||||
|
||||
cv::Mat img = readImage("stereobm/aloe-L.png", CV_LOAD_IMAGE_GRAYSCALE);
|
||||
ASSERT_FALSE(img.empty());
|
||||
|
||||
@ -3101,105 +3099,6 @@ INSTANTIATE_TEST_CASE_P(ImgProc, Blend, Combine(
|
||||
testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// pyrDown
|
||||
|
||||
PARAM_TEST_CASE(PyrDown, cv::gpu::DeviceInfo, MatType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
int type;
|
||||
bool useRoi;
|
||||
|
||||
cv::Mat src;
|
||||
|
||||
cv::Mat dst_gold;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
type = GET_PARAM(1);
|
||||
useRoi = GET_PARAM(2);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
cv::RNG& rng = TS::ptr()->get_rng();
|
||||
|
||||
cv::Size size(rng.uniform(100, 200), rng.uniform(100, 200));
|
||||
|
||||
src = randomMat(rng, size, type, 0.0, 255.0, false);
|
||||
|
||||
cv::pyrDown(src, dst_gold);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PyrDown, Accuracy)
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
cv::gpu::GpuMat d_dst;
|
||||
|
||||
cv::gpu::pyrDown(loadMat(src, useRoi), d_dst);
|
||||
|
||||
d_dst.download(dst);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-4 : 1.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(ImgProc, PyrDown, Combine(
|
||||
ALL_DEVICES,
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// pyrUp
|
||||
|
||||
PARAM_TEST_CASE(PyrUp, cv::gpu::DeviceInfo, MatType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
int type;
|
||||
bool useRoi;
|
||||
|
||||
cv::Mat src;
|
||||
|
||||
cv::Mat dst_gold;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
type = GET_PARAM(1);
|
||||
useRoi = GET_PARAM(2);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
|
||||
cv::RNG& rng = TS::ptr()->get_rng();
|
||||
|
||||
cv::Size size(rng.uniform(200, 400), rng.uniform(200, 400));
|
||||
|
||||
src = randomMat(rng, size, type, 0.0, 255.0, false);
|
||||
|
||||
cv::pyrUp(src, dst_gold);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PyrUp, Accuracy)
|
||||
{
|
||||
cv::Mat dst;
|
||||
|
||||
cv::gpu::GpuMat d_dst;
|
||||
|
||||
cv::gpu::pyrUp(loadMat(src, useRoi), d_dst, cv::BORDER_REFLECT);
|
||||
|
||||
d_dst.download(dst);
|
||||
|
||||
// results differs only on border left and top border due different border extrapolation type
|
||||
EXPECT_MAT_NEAR(dst_gold(cv::Range(1, dst_gold.rows), cv::Range(1, dst_gold.cols)), dst(cv::Range(1, dst_gold.rows), cv::Range(1, dst_gold.cols)), src.depth() == CV_32F ? 1e-4 : 1.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(ImgProc, PyrUp, Combine(
|
||||
ALL_DEVICES,
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Canny
|
||||
|
||||
|
126
modules/gpu/test/test_pyramids.cpp
Normal file
126
modules/gpu/test/test_pyramids.cpp
Normal file
@ -0,0 +1,126 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// pyrDown
|
||||
|
||||
PARAM_TEST_CASE(PyrDown, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PyrDown, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(cv::Size((size.width + 1) / 2, (size.height + 1) / 2), type, useRoi);
|
||||
cv::gpu::pyrDown(loadMat(src, useRoi), dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::pyrDown(src, dst_gold);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-4 : 1.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, PyrDown, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// pyrUp
|
||||
|
||||
PARAM_TEST_CASE(PyrUp, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
|
||||
{
|
||||
cv::gpu::DeviceInfo devInfo;
|
||||
cv::Size size;
|
||||
int type;
|
||||
bool useRoi;
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
devInfo = GET_PARAM(0);
|
||||
size = GET_PARAM(1);
|
||||
type = GET_PARAM(2);
|
||||
useRoi = GET_PARAM(3);
|
||||
|
||||
cv::gpu::setDevice(devInfo.deviceID());
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(PyrUp, Accuracy)
|
||||
{
|
||||
cv::Mat src = randomMat(size, type);
|
||||
|
||||
cv::gpu::GpuMat dst = createMat(cv::Size(size.width * 2, size.height * 2), type, useRoi);
|
||||
cv::gpu::pyrUp(loadMat(src, useRoi), dst);
|
||||
|
||||
cv::Mat dst_gold;
|
||||
cv::pyrUp(src, dst_gold);
|
||||
|
||||
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-4 : 1.0);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, PyrUp, testing::Combine(
|
||||
ALL_DEVICES,
|
||||
DIFFERENT_SIZES,
|
||||
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_16UC1), MatType(CV_16UC3), MatType(CV_16UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
|
||||
WHOLE_SUBMAT));
|
||||
|
||||
#endif // HAVE_CUDA
|
@ -167,6 +167,8 @@ CV_FLAGS(DftFlags, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX
|
||||
|
||||
#define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
|
||||
|
||||
#define WHOLE testing::Values(UseRoi(false))
|
||||
#define SUBMAT testing::Values(UseRoi(true))
|
||||
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
|
||||
|
||||
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
|
||||
|
Loading…
x
Reference in New Issue
Block a user