Removed PtrElemStep, Marked DevMem1D as deprecated, now should use PtrStepSz now
This commit is contained in:
@@ -1,161 +1,160 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other GpuMaterials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CORE_DevMem2D_HPP__
|
||||
#define __OPENCV_CORE_DevMem2D_HPP__
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#ifdef __CUDACC__
|
||||
#define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
|
||||
#else
|
||||
#define __CV_GPU_HOST_DEVICE__
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace gpu
|
||||
{
|
||||
// Simple lightweight structures that encapsulates information about an image on device.
|
||||
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
|
||||
|
||||
template <bool expr> struct StaticAssert;
|
||||
template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
|
||||
|
||||
template<typename T> struct DevPtr
|
||||
{
|
||||
typedef T elem_type;
|
||||
typedef int index_type;
|
||||
|
||||
enum { elem_size = sizeof(elem_type) };
|
||||
|
||||
T* data;
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
|
||||
__CV_GPU_HOST_DEVICE__ operator T*() { return data; }
|
||||
__CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
|
||||
};
|
||||
|
||||
template<typename T> struct PtrSz : public DevPtr<T>
|
||||
{
|
||||
__CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
|
||||
|
||||
size_t size;
|
||||
};
|
||||
|
||||
template<typename T> struct PtrStep : public DevPtr<T>
|
||||
{
|
||||
__CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
|
||||
|
||||
/** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
|
||||
size_t step;
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
|
||||
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
|
||||
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
|
||||
};
|
||||
|
||||
template <typename T> struct PtrStepSz : public PtrStep<T>
|
||||
{
|
||||
__CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
|
||||
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
|
||||
|
||||
int cols;
|
||||
int rows;
|
||||
};
|
||||
|
||||
template <typename T> struct DevMem2D_ : public PtrStepSz<T>
|
||||
{
|
||||
DevMem2D_() {}
|
||||
DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
|
||||
|
||||
template <typename U>
|
||||
explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
|
||||
};
|
||||
|
||||
template<typename T> struct PtrElemStep_ : public PtrStep<T>
|
||||
{
|
||||
PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
|
||||
{
|
||||
StaticAssert<256 % sizeof(T) == 0>::check();
|
||||
|
||||
PtrStep<T>::step /= PtrStep<T>::elem_size;
|
||||
}
|
||||
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
|
||||
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
|
||||
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
|
||||
};
|
||||
|
||||
template<typename T> struct PtrStep_ : public PtrStep<T>
|
||||
{
|
||||
PtrStep_() {}
|
||||
PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
|
||||
};
|
||||
|
||||
typedef DevMem2D_<unsigned char> DevMem2Db;
|
||||
typedef DevMem2Db DevMem2D;
|
||||
typedef DevMem2D_<float> DevMem2Df;
|
||||
typedef DevMem2D_<int> DevMem2Di;
|
||||
|
||||
typedef PtrStep<unsigned char> PtrStepb;
|
||||
typedef PtrStep<float> PtrStepf;
|
||||
typedef PtrStep<int> PtrStepi;
|
||||
|
||||
typedef PtrElemStep_<unsigned char> PtrElemStep;
|
||||
typedef PtrElemStep_<float> PtrElemStepf;
|
||||
typedef PtrElemStep_<int> PtrElemStepi;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif /* __OPENCV_GPU_DevMem2D_HPP__ */
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other GpuMaterials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_CORE_DEVPTRS_HPP__
|
||||
#define __OPENCV_CORE_DEVPTRS_HPP__
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#ifdef __CUDACC__
|
||||
#define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
|
||||
#else
|
||||
#define __CV_GPU_HOST_DEVICE__
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace gpu
|
||||
{
|
||||
// Simple lightweight structures that encapsulates information about an image on device.
|
||||
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
|
||||
|
||||
template <bool expr> struct StaticAssert;
|
||||
template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
|
||||
|
||||
template<typename T> struct DevPtr
|
||||
{
|
||||
typedef T elem_type;
|
||||
typedef int index_type;
|
||||
|
||||
enum { elem_size = sizeof(elem_type) };
|
||||
|
||||
T* data;
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
|
||||
__CV_GPU_HOST_DEVICE__ operator T*() { return data; }
|
||||
__CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
|
||||
};
|
||||
|
||||
template<typename T> struct PtrSz : public DevPtr<T>
|
||||
{
|
||||
__CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
|
||||
|
||||
size_t size;
|
||||
};
|
||||
|
||||
template<typename T> struct PtrStep : public DevPtr<T>
|
||||
{
|
||||
__CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
|
||||
|
||||
/** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
|
||||
size_t step;
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
|
||||
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
|
||||
|
||||
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
|
||||
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
|
||||
};
|
||||
|
||||
template <typename T> struct PtrStepSz : public PtrStep<T>
|
||||
{
|
||||
__CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
|
||||
__CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
|
||||
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
|
||||
|
||||
template <typename U>
|
||||
explicit PtrStepSz(const PtrStepSz<U>& d) : PtrStep<T>((T*)d.data, d.step), cols(d.cols), rows(d.rows){}
|
||||
|
||||
int cols;
|
||||
int rows;
|
||||
};
|
||||
|
||||
typedef PtrStepSz<unsigned char> PtrStepSzb;
|
||||
typedef PtrStepSz<float> PtrStepSzf;
|
||||
typedef PtrStepSz<int> PtrStepSzi;
|
||||
|
||||
typedef PtrStep<unsigned char> PtrStepb;
|
||||
typedef PtrStep<float> PtrStepf;
|
||||
typedef PtrStep<int> PtrStepi;
|
||||
|
||||
|
||||
#if defined __GNUC__
|
||||
#define __CV_GPU_DEPR_BEFORE__
|
||||
#define __CV_GPU_DEPR_AFTER__ __attribute__ ((deprecated))
|
||||
#elif defined(__MSVC__) //|| defined(__CUDACC__)
|
||||
#pragma deprecated(DevMem2D_)
|
||||
#define __CV_GPU_DEPR_BEFORE__ __declspec(deprecated)
|
||||
#define __CV_GPU_DEPR_AFTER__
|
||||
#else
|
||||
#define __CV_GPU_DEPR_BEFORE__
|
||||
#define __CV_GPU_DEPR_AFTER__
|
||||
#endif
|
||||
|
||||
template <typename T> struct __CV_GPU_DEPR_BEFORE__ DevMem2D_ : public PtrStepSz<T>
|
||||
{
|
||||
DevMem2D_() {}
|
||||
DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
|
||||
|
||||
template <typename U>
|
||||
explicit __CV_GPU_DEPR_BEFORE__ DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
|
||||
} __CV_GPU_DEPR_AFTER__ ;
|
||||
|
||||
typedef DevMem2D_<unsigned char> DevMem2Db;
|
||||
typedef DevMem2Db DevMem2D;
|
||||
typedef DevMem2D_<float> DevMem2Df;
|
||||
typedef DevMem2D_<int> DevMem2Di;
|
||||
|
||||
//#undef __CV_GPU_DEPR_BEFORE__
|
||||
//#undef __CV_GPU_DEPR_AFTER__
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif /* __OPENCV_CORE_DEVPTRS_HPP__ */
|
@@ -46,7 +46,7 @@
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/core/devmem2d.hpp"
|
||||
#include "opencv2/core/cuda_devptrs.hpp"
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
@@ -268,10 +268,14 @@ namespace cv { namespace gpu
|
||||
template<typename _Tp> _Tp* ptr(int y = 0);
|
||||
template<typename _Tp> const _Tp* ptr(int y = 0) const;
|
||||
|
||||
template <typename _Tp> operator DevMem2D_<_Tp>() const;
|
||||
template <typename _Tp> operator PtrStep_<_Tp>() const;
|
||||
template <typename _Tp> operator PtrStepSz<_Tp>() const;
|
||||
template <typename _Tp> operator PtrStep<_Tp>() const;
|
||||
|
||||
// Deprecated function
|
||||
__CV_GPU_DEPR_BEFORE__ template <typename _Tp> operator DevMem2D_<_Tp>() const __CV_GPU_DEPR_AFTER__;
|
||||
#undef __CV_GPU_DEPR_BEFORE__
|
||||
#undef __CV_GPU_DEPR_AFTER__
|
||||
|
||||
/*! includes several bit-fields:
|
||||
- the magic signature
|
||||
- continuity flag
|
||||
@@ -502,14 +506,9 @@ namespace cv { namespace gpu
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class T> inline GpuMat::operator DevMem2D_<T>() const
|
||||
template <class T> inline GpuMat::operator PtrStepSz<T>() const
|
||||
{
|
||||
return DevMem2D_<T>(rows, cols, (T*)data, step);
|
||||
}
|
||||
|
||||
template <class T> inline GpuMat::operator PtrStep_<T>() const
|
||||
{
|
||||
return PtrStep_<T>(static_cast< DevMem2D_<T> >(*this));
|
||||
return PtrStepSz<T>(rows, cols, (T*)data, step);
|
||||
}
|
||||
|
||||
template <class T> inline GpuMat::operator PtrStep<T>() const
|
||||
@@ -517,6 +516,11 @@ namespace cv { namespace gpu
|
||||
return PtrStep<T>((T*)data, step);
|
||||
}
|
||||
|
||||
template <class T> inline GpuMat::operator DevMem2D_<T>() const
|
||||
{
|
||||
return DevMem2D_<T>(rows, cols, (T*)data, step);
|
||||
}
|
||||
|
||||
inline GpuMat createContinuous(int rows, int cols, int type)
|
||||
{
|
||||
GpuMat m;
|
||||
|
@@ -44,6 +44,18 @@
|
||||
#include "opencv2/gpu/device/transform.hpp"
|
||||
#include "opencv2/gpu/device/functional.hpp"
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
void writeScalar(const uchar*);
|
||||
void writeScalar(const schar*);
|
||||
void writeScalar(const ushort*);
|
||||
void writeScalar(const short int*);
|
||||
void writeScalar(const int*);
|
||||
void writeScalar(const float*);
|
||||
void writeScalar(const double*);
|
||||
void convert_gpu(PtrStepSzb, int, PtrStepSzb, int, double, double, cudaStream_t);
|
||||
}}}
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
template <typename T> struct shift_and_sizeof;
|
||||
@@ -59,17 +71,17 @@ namespace cv { namespace gpu { namespace device
|
||||
////////////////////////////////// CopyTo /////////////////////////////////
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename T> void copyToWithMask(DevMem2Db src, DevMem2Db dst, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream)
|
||||
template <typename T> void copyToWithMask(PtrStepSzb src, PtrStepSzb dst, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream)
|
||||
{
|
||||
if (colorMask)
|
||||
cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, identity<T>(), SingleMask(mask), stream);
|
||||
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<T>)dst, identity<T>(), SingleMask(mask), stream);
|
||||
else
|
||||
cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<T>)dst, identity<T>(), SingleMaskChannels(mask, cn), stream);
|
||||
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<T>)dst, identity<T>(), SingleMaskChannels(mask, cn), stream);
|
||||
}
|
||||
|
||||
void copyToWithMask_gpu(DevMem2Db src, DevMem2Db dst, size_t elemSize1, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream)
|
||||
void copyToWithMask_gpu(PtrStepSzb src, PtrStepSzb dst, size_t elemSize1, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*func_t)(DevMem2Db src, DevMem2Db dst, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream);
|
||||
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream);
|
||||
|
||||
static func_t tab[] =
|
||||
{
|
||||
@@ -164,7 +176,7 @@ namespace cv { namespace gpu { namespace device
|
||||
}
|
||||
}
|
||||
template <typename T>
|
||||
void set_to_gpu(DevMem2Db mat, const T* scalar, DevMem2Db mask, int channels, cudaStream_t stream)
|
||||
void set_to_gpu(PtrStepSzb mat, const T* scalar, PtrStepSzb mask, int channels, cudaStream_t stream)
|
||||
{
|
||||
writeScalar(scalar);
|
||||
|
||||
@@ -178,16 +190,16 @@ namespace cv { namespace gpu { namespace device
|
||||
cudaSafeCall ( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template void set_to_gpu<uchar >(DevMem2Db mat, const uchar* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<schar >(DevMem2Db mat, const schar* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<ushort>(DevMem2Db mat, const ushort* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<short >(DevMem2Db mat, const short* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<int >(DevMem2Db mat, const int* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<float >(DevMem2Db mat, const float* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<double>(DevMem2Db mat, const double* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<uchar >(PtrStepSzb mat, const uchar* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<schar >(PtrStepSzb mat, const schar* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<ushort>(PtrStepSzb mat, const ushort* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<short >(PtrStepSzb mat, const short* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<int >(PtrStepSzb mat, const int* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<float >(PtrStepSzb mat, const float* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<double>(PtrStepSzb mat, const double* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
|
||||
template <typename T>
|
||||
void set_to_gpu(DevMem2Db mat, const T* scalar, int channels, cudaStream_t stream)
|
||||
void set_to_gpu(PtrStepSzb mat, const T* scalar, int channels, cudaStream_t stream)
|
||||
{
|
||||
writeScalar(scalar);
|
||||
|
||||
@@ -201,13 +213,13 @@ namespace cv { namespace gpu { namespace device
|
||||
cudaSafeCall ( cudaDeviceSynchronize() );
|
||||
}
|
||||
|
||||
template void set_to_gpu<uchar >(DevMem2Db mat, const uchar* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<schar >(DevMem2Db mat, const schar* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<ushort>(DevMem2Db mat, const ushort* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<short >(DevMem2Db mat, const short* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<int >(DevMem2Db mat, const int* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<float >(DevMem2Db mat, const float* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<double>(DevMem2Db mat, const double* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<uchar >(PtrStepSzb mat, const uchar* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<schar >(PtrStepSzb mat, const schar* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<ushort>(PtrStepSzb mat, const ushort* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<short >(PtrStepSzb mat, const short* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<int >(PtrStepSzb mat, const int* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<float >(PtrStepSzb mat, const float* scalar, int channels, cudaStream_t stream);
|
||||
template void set_to_gpu<double>(PtrStepSzb mat, const double* scalar, int channels, cudaStream_t stream);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////////// ConvertTo ////////////////////////////////
|
||||
@@ -274,12 +286,12 @@ namespace cv { namespace gpu { namespace device
|
||||
};
|
||||
|
||||
template<typename T, typename D>
|
||||
void cvt_(DevMem2Db src, DevMem2Db dst, double alpha, double beta, cudaStream_t stream)
|
||||
void cvt_(PtrStepSzb src, PtrStepSzb dst, double alpha, double beta, cudaStream_t stream)
|
||||
{
|
||||
cudaSafeCall( cudaSetDoubleForDevice(&alpha) );
|
||||
cudaSafeCall( cudaSetDoubleForDevice(&beta) );
|
||||
Convertor<T, D> op(alpha, beta);
|
||||
cv::gpu::device::transform((DevMem2D_<T>)src, (DevMem2D_<D>)dst, op, WithOutMask(), stream);
|
||||
cv::gpu::device::transform((PtrStepSz<T>)src, (PtrStepSz<D>)dst, op, WithOutMask(), stream);
|
||||
}
|
||||
|
||||
#if defined __clang__
|
||||
@@ -287,9 +299,9 @@ namespace cv { namespace gpu { namespace device
|
||||
# pragma clang diagnostic ignored "-Wmissing-declarations"
|
||||
#endif
|
||||
|
||||
void convert_gpu(DevMem2Db src, int sdepth, DevMem2Db dst, int ddepth, double alpha, double beta, cudaStream_t stream)
|
||||
void convert_gpu(PtrStepSzb src, int sdepth, PtrStepSzb dst, int ddepth, double alpha, double beta, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*caller_t)(DevMem2Db src, DevMem2Db dst, double alpha, double beta, cudaStream_t stream);
|
||||
typedef void (*caller_t)(PtrStepSzb src, PtrStepSzb dst, double alpha, double beta, cudaStream_t stream);
|
||||
|
||||
static const caller_t tab[8][8] =
|
||||
{
|
||||
|
@@ -761,15 +761,15 @@ namespace
|
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{
|
||||
void copyToWithMask_gpu(DevMem2Db src, DevMem2Db dst, size_t elemSize1, int cn, DevMem2Db mask, bool colorMask, cudaStream_t stream);
|
||||
void copyToWithMask_gpu(PtrStepSzb src, PtrStepSzb dst, size_t elemSize1, int cn, PtrStepSzb mask, bool colorMask, cudaStream_t stream);
|
||||
|
||||
template <typename T>
|
||||
void set_to_gpu(DevMem2Db mat, const T* scalar, int channels, cudaStream_t stream);
|
||||
void set_to_gpu(PtrStepSzb mat, const T* scalar, int channels, cudaStream_t stream);
|
||||
|
||||
template <typename T>
|
||||
void set_to_gpu(DevMem2Db mat, const T* scalar, DevMem2Db mask, int channels, cudaStream_t stream);
|
||||
void set_to_gpu(PtrStepSzb mat, const T* scalar, PtrStepSzb mask, int channels, cudaStream_t stream);
|
||||
|
||||
void convert_gpu(DevMem2Db src, int sdepth, DevMem2Db dst, int ddepth, double alpha, double beta, cudaStream_t stream);
|
||||
void convert_gpu(PtrStepSzb src, int sdepth, PtrStepSzb dst, int ddepth, double alpha, double beta, cudaStream_t stream);
|
||||
}}}
|
||||
|
||||
namespace
|
||||
@@ -787,9 +787,22 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
CV_EXPORTS void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream = 0)
|
||||
CV_EXPORTS void copyWithMask(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, const cv::gpu::GpuMat&, CUstream_st*);
|
||||
CV_EXPORTS void convertTo(const cv::gpu::GpuMat&, cv::gpu::GpuMat&);
|
||||
CV_EXPORTS void convertTo(const cv::gpu::GpuMat&, cv::gpu::GpuMat&, double, double, CUstream_st*);
|
||||
CV_EXPORTS void setTo(cv::gpu::GpuMat&, cv::Scalar, CUstream_st*);
|
||||
CV_EXPORTS void setTo(cv::gpu::GpuMat&, cv::Scalar, const cv::gpu::GpuMat&, CUstream_st*);
|
||||
CV_EXPORTS void setTo(cv::gpu::GpuMat&, cv::Scalar);
|
||||
CV_EXPORTS void setTo(cv::gpu::GpuMat&, cv::Scalar, const cv::gpu::GpuMat&);
|
||||
}}
|
||||
|
||||
|
||||
namespace cv { namespace gpu
|
||||
{
|
||||
void copyWithMask(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream = 0)
|
||||
{
|
||||
CV_Assert(src.size() == dst.size() && src.type() == dst.type());
|
||||
CV_Assert(src.size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == src.channels()));
|
||||
@@ -797,17 +810,17 @@ namespace cv { namespace gpu
|
||||
cv::gpu::device::copyToWithMask_gpu(src.reshape(1), dst.reshape(1), src.elemSize1(), src.channels(), mask.reshape(1), mask.channels() != 1, stream);
|
||||
}
|
||||
|
||||
CV_EXPORTS void convertTo(const GpuMat& src, GpuMat& dst)
|
||||
void convertTo(const GpuMat& src, GpuMat& dst)
|
||||
{
|
||||
cv::gpu::device::convert_gpu(src.reshape(1), src.depth(), dst.reshape(1), dst.depth(), 1.0, 0.0, 0);
|
||||
}
|
||||
|
||||
CV_EXPORTS void convertTo(const GpuMat& src, GpuMat& dst, double alpha, double beta, cudaStream_t stream = 0)
|
||||
void convertTo(const GpuMat& src, GpuMat& dst, double alpha, double beta, cudaStream_t stream = 0)
|
||||
{
|
||||
cv::gpu::device::convert_gpu(src.reshape(1), src.depth(), dst.reshape(1), dst.depth(), alpha, beta, stream);
|
||||
}
|
||||
|
||||
CV_EXPORTS void setTo(GpuMat& src, Scalar s, cudaStream_t stream)
|
||||
void setTo(GpuMat& src, Scalar s, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*caller_t)(GpuMat& src, Scalar s, cudaStream_t stream);
|
||||
|
||||
@@ -820,7 +833,7 @@ namespace cv { namespace gpu
|
||||
callers[src.depth()](src, s, stream);
|
||||
}
|
||||
|
||||
CV_EXPORTS void setTo(GpuMat& src, Scalar s, const GpuMat& mask, cudaStream_t stream)
|
||||
void setTo(GpuMat& src, Scalar s, const GpuMat& mask, cudaStream_t stream)
|
||||
{
|
||||
typedef void (*caller_t)(GpuMat& src, Scalar s, const GpuMat& mask, cudaStream_t stream);
|
||||
|
||||
@@ -833,12 +846,12 @@ namespace cv { namespace gpu
|
||||
callers[src.depth()](src, s, mask, stream);
|
||||
}
|
||||
|
||||
CV_EXPORTS void setTo(GpuMat& src, Scalar s)
|
||||
void setTo(GpuMat& src, Scalar s)
|
||||
{
|
||||
setTo(src, s, 0);
|
||||
}
|
||||
|
||||
CV_EXPORTS void setTo(GpuMat& src, Scalar s, const GpuMat& mask)
|
||||
void setTo(GpuMat& src, Scalar s, const GpuMat& mask)
|
||||
{
|
||||
setTo(src, s, mask, 0);
|
||||
}
|
||||
|
Reference in New Issue
Block a user