renamed gpu namespace -> cuda

This commit is contained in:
Vladislav Vinogradov 2013-08-28 15:45:13 +04:00
parent e12496d150
commit e895b7455e
343 changed files with 3882 additions and 3882 deletions

View File

@ -490,7 +490,7 @@ namespace ogl
class CV_EXPORTS Arrays;
}
namespace gpu
namespace cuda
{
class CV_EXPORTS GpuMat;
class CV_EXPORTS CudaMem;

View File

@ -43,7 +43,7 @@
#ifndef __OPENCV_GPU_DEVICE_BLOCK_HPP__
#define __OPENCV_GPU_DEVICE_BLOCK_HPP__
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
struct Block
{

View File

@ -47,7 +47,7 @@
#include "vec_traits.hpp"
#include "vec_math.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
//////////////////////////////////////////////////////////////
// BrdConstant
@ -709,6 +709,6 @@ namespace cv { namespace gpu { namespace cudev
int width;
D val;
};
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__

View File

@ -45,7 +45,7 @@
#include "detail/color_detail.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
// All OPENCV_GPU_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements
// template <typename T> class ColorSpace1_to_ColorSpace2_traits
@ -296,6 +296,6 @@ namespace cv { namespace gpu { namespace cudev
OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
#undef OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__

View File

@ -56,7 +56,7 @@
#endif
#endif
namespace cv { namespace gpu {
namespace cv { namespace cuda {
static inline void checkCudaError(cudaError_t err, const char* file, const int line, const char* func)
{
if (cudaSuccess != err)
@ -66,13 +66,13 @@ namespace cv { namespace gpu {
#ifndef cudaSafeCall
#if defined(__GNUC__)
#define cudaSafeCall(expr) cv::gpu::checkCudaError(expr, __FILE__, __LINE__, __func__)
#define cudaSafeCall(expr) cv::cuda::checkCudaError(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define cudaSafeCall(expr) cv::gpu::checkCudaError(expr, __FILE__, __LINE__, "")
#define cudaSafeCall(expr) cv::cuda::checkCudaError(expr, __FILE__, __LINE__, "")
#endif
#endif
namespace cv { namespace gpu
namespace cv { namespace cuda
{
template <typename T> static inline bool isAligned(const T* ptr, size_t size)
{
@ -85,7 +85,7 @@ namespace cv { namespace gpu
}
}}
namespace cv { namespace gpu
namespace cv { namespace cuda
{
namespace cudev
{

View File

@ -45,7 +45,7 @@
#include "common.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
@ -100,6 +100,6 @@ namespace cv { namespace gpu { namespace cudev
#undef OPENCV_GPU_ASM_PTR
#endif // __CUDA_ARCH__ >= 200
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__

View File

@ -49,7 +49,7 @@
#include "../limits.hpp"
#include "../functional.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
#ifndef CV_DESCALE
#define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n))
@ -146,7 +146,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2RGB<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2RGB<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -219,7 +219,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2RGB5x5<scn, bidx, green_bits> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2RGB5x5<scn, bidx, green_bits> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -300,7 +300,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB5x52RGB<dcn, bidx, green_bits> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB5x52RGB<dcn, bidx, green_bits> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -346,7 +346,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::Gray2RGB<T, dcn> functor_type; \
typedef ::cv::cuda::cudev::color_detail::Gray2RGB<T, dcn> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -388,7 +388,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::Gray2RGB5x5<green_bits> functor_type; \
typedef ::cv::cuda::cudev::color_detail::Gray2RGB5x5<green_bits> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -430,7 +430,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \
struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB5x52Gray<green_bits> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB5x52Gray<green_bits> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -481,7 +481,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2Gray<T, scn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2Gray<T, scn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -532,7 +532,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2YUV<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2YUV<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -620,7 +620,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::YUV2RGB<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::YUV2RGB<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -699,7 +699,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2YCrCb<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2YCrCb<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -778,7 +778,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::YCrCb2RGB<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::YCrCb2RGB<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -854,7 +854,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2XYZ<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2XYZ<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -929,7 +929,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::XYZ2RGB<T, scn, dcn, bidx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::XYZ2RGB<T, scn, dcn, bidx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1067,7 +1067,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HSV<T, scn, dcn, bidx, 180> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HSV<T, scn, dcn, bidx, 180> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1075,7 +1075,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <typename T> struct name ## _full_traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HSV<T, scn, dcn, bidx, 256> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HSV<T, scn, dcn, bidx, 256> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1083,7 +1083,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1091,7 +1091,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _full_traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1207,7 +1207,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::HSV2RGB<T, scn, dcn, bidx, 180> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HSV2RGB<T, scn, dcn, bidx, 180> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1215,7 +1215,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <typename T> struct name ## _full_traits \
{ \
typedef ::cv::gpu::cudev::color_detail::HSV2RGB<T, scn, dcn, bidx, 255> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HSV2RGB<T, scn, dcn, bidx, 255> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1223,7 +1223,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1231,7 +1231,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _full_traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1340,7 +1340,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HLS<T, scn, dcn, bidx, 180> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HLS<T, scn, dcn, bidx, 180> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1348,7 +1348,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <typename T> struct name ## _full_traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HLS<T, scn, dcn, bidx, 256> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HLS<T, scn, dcn, bidx, 256> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1356,7 +1356,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1364,7 +1364,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _full_traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1480,7 +1480,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::HLS2RGB<T, scn, dcn, bidx, 180> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HLS2RGB<T, scn, dcn, bidx, 180> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1488,7 +1488,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <typename T> struct name ## _full_traits \
{ \
typedef ::cv::gpu::cudev::color_detail::HLS2RGB<T, scn, dcn, bidx, 255> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HLS2RGB<T, scn, dcn, bidx, 255> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1496,7 +1496,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1504,7 +1504,7 @@ namespace cv { namespace gpu { namespace cudev
}; \
template <> struct name ## _full_traits<float> \
{ \
typedef ::cv::gpu::cudev::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \
typedef ::cv::cuda::cudev::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1649,7 +1649,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2Lab<T, scn, dcn, srgb, blueIdx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2Lab<T, scn, dcn, srgb, blueIdx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1762,7 +1762,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::Lab2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::Lab2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1861,7 +1861,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::RGB2Luv<T, scn, dcn, srgb, blueIdx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::RGB2Luv<T, scn, dcn, srgb, blueIdx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1962,7 +1962,7 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
template <typename T> struct name ## _traits \
{ \
typedef ::cv::gpu::cudev::color_detail::Luv2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
typedef ::cv::cuda::cudev::color_detail::Luv2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
static __host__ __device__ __forceinline__ functor_type create_functor() \
{ \
return functor_type(); \
@ -1971,6 +1971,6 @@ namespace cv { namespace gpu { namespace cudev
#undef CV_DESCALE
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__

View File

@ -47,7 +47,7 @@
#include "../warp.hpp"
#include "../warp_shuffle.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace reduce_detail
{

View File

@ -47,7 +47,7 @@
#include "../warp.hpp"
#include "../warp_shuffle.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace reduce_key_val_detail
{

View File

@ -47,7 +47,7 @@
#include "../vec_traits.hpp"
#include "../functional.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace transform_detail
{
@ -390,6 +390,6 @@ namespace cv { namespace gpu { namespace cudev
}
};
} // namespace transform_detail
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_TRANSFORM_DETAIL_HPP__

View File

@ -46,7 +46,7 @@
#include "../common.hpp"
#include "../vec_traits.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace type_traits_detail
{
@ -182,6 +182,6 @@ namespace cv { namespace gpu { namespace cudev
enum { value = 1 };
};
} // namespace type_traits_detail
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__

View File

@ -45,7 +45,7 @@
#include "../datamov_utils.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace vec_distance_detail
{
@ -112,6 +112,6 @@ namespace cv { namespace gpu { namespace cudev
}
};
} // namespace vec_distance_detail
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__

View File

@ -43,7 +43,7 @@
#ifndef __OPENCV_GPU_DYNAMIC_SMEM_HPP__
#define __OPENCV_GPU_DYNAMIC_SMEM_HPP__
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template<class T> struct DynamicSharedMem
{

View File

@ -46,7 +46,7 @@
#include "common.hpp"
#include "warp_reduce.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
struct Emulation
{
@ -256,6 +256,6 @@ namespace cv { namespace gpu { namespace cudev
}
};
}; //struct Emulation
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* OPENCV_GPU_EMULATION_HPP_ */

View File

@ -48,7 +48,7 @@
#include "vec_math.hpp"
#include "type_traits.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename Ptr2D> struct PointFilter
{
@ -273,6 +273,6 @@ namespace cv { namespace gpu { namespace cudev
float scale_x, scale_y;
int width, haight;
};
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_FILTERS_HPP__

View File

@ -45,7 +45,7 @@
#include <cstdio>
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template<class Func>
void printFuncAttrib(Func& func)
@ -66,6 +66,6 @@ namespace cv { namespace gpu { namespace cudev
printf("\n");
fflush(stdout);
}
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ */

View File

@ -49,7 +49,7 @@
#include "type_traits.hpp"
#include "device_functions.h"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
// Function Objects
template<typename Argument, typename Result> struct unary_function : public std::unary_function<Argument, Result> {};
@ -784,6 +784,6 @@ namespace cv { namespace gpu { namespace cudev
#define OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(type) \
template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type >
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_FUNCTIONAL_HPP__

View File

@ -47,7 +47,7 @@
#include <float.h>
#include "common.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <class T> struct numeric_limits;
@ -117,6 +117,6 @@ template <> struct numeric_limits<double>
static const bool is_signed = true;
};
}}} // namespace cv { namespace gpu { namespace cudev {
}}} // namespace cv { namespace cuda { namespace cudev {
#endif // __OPENCV_GPU_LIMITS_GPU_HPP__

View File

@ -47,7 +47,7 @@
#include "detail/reduce.hpp"
#include "detail/reduce_key_val.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <int N, typename T, class Op>
__device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op)

View File

@ -45,7 +45,7 @@
#include "common.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }
template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }

View File

@ -48,7 +48,7 @@
#include "opencv2/core/cuda/warp.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
@ -174,13 +174,13 @@ namespace cv { namespace gpu { namespace cudev
__device__ T warpScanInclusive(T idata, volatile T* s_Data, unsigned int tid)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::gpu::cudev::Warp::laneId();
const unsigned int laneId = cv::cuda::cudev::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (OPENCV_GPU_WARP_SIZE / 2); i *= 2)
{
const T n = cv::gpu::cudev::shfl_up(idata, i);
const T n = cv::cuda::cudev::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}

View File

@ -123,7 +123,7 @@
vmin4(a,b) per-byte unsigned minimum: min(a, b)
*/
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
// 2

View File

@ -47,7 +47,7 @@
#include "utility.hpp"
#include "detail/transform_detail.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T, typename D, typename UnOp, typename Mask>
static inline void transform(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, const Mask& mask, cudaStream_t stream)

View File

@ -45,7 +45,7 @@
#include "detail/type_traits_detail.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T> struct IsSimpleParameter
{

View File

@ -46,7 +46,7 @@
#include "saturate_cast.hpp"
#include "datamov_utils.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
#define OPENCV_GPU_LOG_WARP_SIZE (5)
#define OPENCV_GPU_WARP_SIZE (1 << OPENCV_GPU_LOG_WARP_SIZE)
@ -208,6 +208,6 @@ namespace cv { namespace gpu { namespace cudev
return false;
}
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_UTILITY_HPP__

View File

@ -47,7 +47,7 @@
#include "functional.hpp"
#include "detail/vec_distance_detail.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T> struct L1Dist
{
@ -219,6 +219,6 @@ namespace cv { namespace gpu { namespace cudev
U vec1Vals[MAX_LEN / THREAD_DIM];
};
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_VEC_DISTANCE_HPP__

View File

@ -46,7 +46,7 @@
#include "vec_traits.hpp"
#include "saturate_cast.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
// saturate_cast
@ -917,6 +917,6 @@ CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, double, double, double)
#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC
}}} // namespace cv { namespace gpu { namespace device
}}} // namespace cv { namespace cuda { namespace device
#endif // __OPENCV_GPU_VECMATH_HPP__

View File

@ -45,7 +45,7 @@
#include "common.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template<typename T, int N> struct TypeVec;
@ -275,6 +275,6 @@ namespace cv { namespace gpu { namespace cudev
static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);}
static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);}
};
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif // __OPENCV_GPU_VEC_TRAITS_HPP__

View File

@ -43,7 +43,7 @@
#ifndef __OPENCV_GPU_DEVICE_WARP_HPP__
#define __OPENCV_GPU_DEVICE_WARP_HPP__
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
struct Warp
{
@ -126,6 +126,6 @@ namespace cv { namespace gpu { namespace cudev
*t = value;
}
};
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */

View File

@ -43,7 +43,7 @@
#ifndef OPENCV_GPU_WARP_REDUCE_HPP__
#define OPENCV_GPU_WARP_REDUCE_HPP__
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <class T>
__device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x)
@ -63,6 +63,6 @@ namespace cv { namespace gpu { namespace cudev
return ptr[tid - lane];
}
}}} // namespace cv { namespace gpu { namespace cudev {
}}} // namespace cv { namespace cuda { namespace cudev {
#endif /* OPENCV_GPU_WARP_REDUCE_HPP__ */

View File

@ -43,7 +43,7 @@
#ifndef __OPENCV_GPU_WARP_SHUFFLE_HPP__
#define __OPENCV_GPU_WARP_SHUFFLE_HPP__
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T>
__device__ __forceinline__ T shfl(T val, int srcLane, int width = warpSize)

View File

@ -51,7 +51,7 @@
#include "opencv2/core.hpp"
#include "opencv2/core/gpu_types.hpp"
namespace cv { namespace gpu {
namespace cv { namespace cuda {
//////////////////////////////// GpuMat ///////////////////////////////
@ -664,12 +664,12 @@ private:
CV_EXPORTS void printCudaDeviceInfo(int device);
CV_EXPORTS void printShortCudaDeviceInfo(int device);
}} // namespace cv { namespace gpu {
}} // namespace cv { namespace cuda {
namespace cv {
template <> CV_EXPORTS void Ptr<cv::gpu::Stream::Impl>::delete_obj();
template <> CV_EXPORTS void Ptr<cv::gpu::Event::Impl>::delete_obj();
template <> CV_EXPORTS void Ptr<cv::cuda::Stream::Impl>::delete_obj();
template <> CV_EXPORTS void Ptr<cv::cuda::Event::Impl>::delete_obj();
}

View File

@ -46,7 +46,7 @@
#include "opencv2/core/gpu.hpp"
namespace cv { namespace gpu {
namespace cv { namespace cuda {
//////////////////////////////// GpuMat ///////////////////////////////
@ -587,14 +587,14 @@ bool DeviceInfo::supports(FeatureSet feature_set) const
return version >= feature_set;
}
}} // namespace cv { namespace gpu {
}} // namespace cv { namespace cuda {
//////////////////////////////// Mat ////////////////////////////////
namespace cv {
inline
Mat::Mat(const gpu::GpuMat& m)
Mat::Mat(const cuda::GpuMat& m)
: flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows)
{
m.download(*this);

View File

@ -57,7 +57,7 @@
namespace cv
{
namespace gpu
namespace cuda
{
class Stream;
class Event;

View File

@ -55,7 +55,7 @@
namespace cv
{
namespace gpu
namespace cuda
{
// Simple lightweight structures that encapsulates information about an image on device.
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile

View File

@ -93,14 +93,14 @@ public:
template<typename _Tp> _InputArray(const _Tp* vec, int n);
template<typename _Tp, int m, int n> _InputArray(const Matx<_Tp, m, n>& matx);
_InputArray(const double& val);
_InputArray(const gpu::GpuMat& d_mat);
_InputArray(const cuda::GpuMat& d_mat);
_InputArray(const ogl::Buffer& buf);
_InputArray(const gpu::CudaMem& cuda_mem);
_InputArray(const cuda::CudaMem& cuda_mem);
template<typename _Tp> _InputArray(const cudev::GpuMat_<_Tp>& m);
virtual Mat getMat(int i=-1) const;
virtual void getMatVector(std::vector<Mat>& mv) const;
virtual gpu::GpuMat getGpuMat() const;
virtual cuda::GpuMat getGpuMat() const;
virtual ogl::Buffer getOGlBuffer() const;
virtual int kind() const;
@ -142,9 +142,9 @@ public:
_OutputArray();
_OutputArray(Mat& m);
_OutputArray(std::vector<Mat>& vec);
_OutputArray(gpu::GpuMat& d_mat);
_OutputArray(cuda::GpuMat& d_mat);
_OutputArray(ogl::Buffer& buf);
_OutputArray(gpu::CudaMem& cuda_mem);
_OutputArray(cuda::CudaMem& cuda_mem);
template<typename _Tp> _OutputArray(cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _OutputArray(std::vector<_Tp>& vec);
template<typename _Tp> _OutputArray(std::vector<std::vector<_Tp> >& vec);
@ -155,9 +155,9 @@ public:
_OutputArray(const Mat& m);
_OutputArray(const std::vector<Mat>& vec);
_OutputArray(const gpu::GpuMat& d_mat);
_OutputArray(const cuda::GpuMat& d_mat);
_OutputArray(const ogl::Buffer& buf);
_OutputArray(const gpu::CudaMem& cuda_mem);
_OutputArray(const cuda::CudaMem& cuda_mem);
template<typename _Tp> _OutputArray(const cudev::GpuMat_<_Tp>& m);
template<typename _Tp> _OutputArray(const std::vector<_Tp>& vec);
template<typename _Tp> _OutputArray(const std::vector<std::vector<_Tp> >& vec);
@ -170,9 +170,9 @@ public:
virtual bool fixedType() const;
virtual bool needed() const;
virtual Mat& getMatRef(int i=-1) const;
virtual gpu::GpuMat& getGpuMatRef() const;
virtual cuda::GpuMat& getGpuMatRef() const;
virtual ogl::Buffer& getOGlBufferRef() const;
virtual gpu::CudaMem& getCudaMemRef() const;
virtual cuda::CudaMem& getCudaMemRef() const;
virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
@ -506,7 +506,7 @@ public:
//Mat(const void* img, bool copyData=false);
//! download data from GpuMat
explicit Mat(const gpu::GpuMat& m);
explicit Mat(const cuda::GpuMat& m);
//! destructor - calls release()
~Mat();

View File

@ -99,12 +99,12 @@ public:
//! copy from host/device memory (blocking)
void copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);
//! copy from device memory (non blocking)
void copyFrom(InputArray arr, gpu::Stream& stream, Target target = ARRAY_BUFFER, bool autoRelease = false);
void copyFrom(InputArray arr, cuda::Stream& stream, Target target = ARRAY_BUFFER, bool autoRelease = false);
//! copy to host/device memory (blocking)
void copyTo(OutputArray arr) const;
//! copy to device memory (non blocking)
void copyTo(OutputArray arr, gpu::Stream& stream) const;
void copyTo(OutputArray arr, cuda::Stream& stream) const;
//! create copy of current buffer
Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const;
@ -120,12 +120,12 @@ public:
void unmapHost();
//! map to device memory (blocking)
gpu::GpuMat mapDevice();
cuda::GpuMat mapDevice();
void unmapDevice();
//! map to device memory (non blocking)
gpu::GpuMat mapDevice(gpu::Stream& stream);
void unmapDevice(gpu::Stream& stream);
cuda::GpuMat mapDevice(cuda::Stream& stream);
void unmapDevice(cuda::Stream& stream);
int rows() const;
int cols() const;
@ -276,7 +276,7 @@ CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS,
}} // namespace cv::ogl
namespace cv { namespace gpu {
namespace cv { namespace cuda {
//! set a CUDA device to use OpenGL interoperability
CV_EXPORTS void setGlDevice(int device = 0);

View File

@ -75,7 +75,7 @@
# endif
#endif
namespace cv { namespace gpu {
namespace cv { namespace cuda {
CV_EXPORTS cv::String getNppErrorMessage(int code);
CV_EXPORTS cv::String getCudaDriverApiErrorMessage(int code);
}}
@ -88,7 +88,7 @@ static inline void throw_no_cuda() { CV_Error(cv::Error::GpuNotSupported, "The l
static inline void throw_no_cuda() { CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform"); }
namespace cv { namespace gpu
namespace cv { namespace cuda
{
static inline void checkNppError(int code, const char* file, const int line, const char* func)
{
@ -131,11 +131,11 @@ namespace cv { namespace gpu
}}
#if defined(__GNUC__)
#define nppSafeCall(expr) cv::gpu::checkNppError(expr, __FILE__, __LINE__, __func__)
#define cuSafeCall(expr) cv::gpu::checkCudaDriverApiError(expr, __FILE__, __LINE__, __func__)
#define nppSafeCall(expr) cv::cuda::checkNppError(expr, __FILE__, __LINE__, __func__)
#define cuSafeCall(expr) cv::cuda::checkCudaDriverApiError(expr, __FILE__, __LINE__, __func__)
#else /* defined(__CUDACC__) || defined(__MSVC__) */
#define nppSafeCall(expr) cv::gpu::checkNppError(expr, __FILE__, __LINE__, "")
#define cuSafeCall(expr) cv::gpu::checkCudaDriverApiError(expr, __FILE__, __LINE__, "")
#define nppSafeCall(expr) cv::cuda::checkNppError(expr, __FILE__, __LINE__, "")
#define cuSafeCall(expr) cv::cuda::checkCudaDriverApiError(expr, __FILE__, __LINE__, "")
#endif
#endif // HAVE_CUDA

View File

@ -52,13 +52,13 @@
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
/////////////////////////////////////////////////////
/// create
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
@ -108,7 +108,7 @@ void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
/////////////////////////////////////////////////////
/// release
void cv::gpu::GpuMat::release()
void cv::cuda::GpuMat::release()
{
if (refcount && CV_XADD(refcount, -1) == 1)
{
@ -124,7 +124,7 @@ void cv::gpu::GpuMat::release()
/////////////////////////////////////////////////////
/// upload
void cv::gpu::GpuMat::upload(InputArray arr)
void cv::cuda::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
@ -135,7 +135,7 @@ void cv::gpu::GpuMat::upload(InputArray arr)
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
}
void cv::gpu::GpuMat::upload(InputArray arr, Stream& _stream)
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
@ -150,7 +150,7 @@ void cv::gpu::GpuMat::upload(InputArray arr, Stream& _stream)
/////////////////////////////////////////////////////
/// download
void cv::gpu::GpuMat::download(OutputArray _dst) const
void cv::cuda::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
@ -160,7 +160,7 @@ void cv::gpu::GpuMat::download(OutputArray _dst) const
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
}
void cv::gpu::GpuMat::download(OutputArray _dst, Stream& _stream) const
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
@ -174,7 +174,7 @@ void cv::gpu::GpuMat::download(OutputArray _dst, Stream& _stream) const
/////////////////////////////////////////////////////
/// copyTo
void cv::gpu::GpuMat::copyTo(OutputArray _dst) const
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
@ -184,7 +184,7 @@ void cv::gpu::GpuMat::copyTo(OutputArray _dst) const
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) );
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
@ -220,7 +220,7 @@ namespace
}
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
@ -279,7 +279,7 @@ namespace
}
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar value, Stream& stream)
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
@ -333,7 +333,7 @@ GpuMat& cv::gpu::GpuMat::setTo(Scalar value, Stream& stream)
return *this;
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
GpuMat& cv::cuda::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
@ -412,7 +412,7 @@ namespace
}
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
@ -453,7 +453,7 @@ void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) con
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();

View File

@ -44,7 +44,7 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
namespace
{
@ -57,7 +57,7 @@ namespace
}
}
void cv::gpu::CudaMem::create(int rows_, int cols_, int type_)
void cv::cuda::CudaMem::create(int rows_, int cols_, int type_)
{
#ifndef HAVE_CUDA
(void) rows_;
@ -121,7 +121,7 @@ void cv::gpu::CudaMem::create(int rows_, int cols_, int type_)
#endif
}
CudaMem cv::gpu::CudaMem::reshape(int new_cn, int new_rows) const
CudaMem cv::cuda::CudaMem::reshape(int new_cn, int new_rows) const
{
CudaMem hdr = *this;
@ -164,7 +164,7 @@ CudaMem cv::gpu::CudaMem::reshape(int new_cn, int new_rows) const
return hdr;
}
void cv::gpu::CudaMem::release()
void cv::cuda::CudaMem::release()
{
#ifdef HAVE_CUDA
if (refcount && CV_XADD(refcount, -1) == 1)
@ -179,7 +179,7 @@ void cv::gpu::CudaMem::release()
#endif
}
GpuMat cv::gpu::CudaMem::createGpuMatHeader() const
GpuMat cv::cuda::CudaMem::createGpuMatHeader() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -194,7 +194,7 @@ GpuMat cv::gpu::CudaMem::createGpuMatHeader() const
#endif
}
void cv::gpu::registerPageLocked(Mat& m)
void cv::cuda::registerPageLocked(Mat& m)
{
#ifndef HAVE_CUDA
(void) m;
@ -205,7 +205,7 @@ void cv::gpu::registerPageLocked(Mat& m)
#endif
}
void cv::gpu::unregisterPageLocked(Mat& m)
void cv::cuda::unregisterPageLocked(Mat& m)
{
#ifndef HAVE_CUDA
(void) m;

View File

@ -43,9 +43,9 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
int cv::gpu::getCudaEnabledDeviceCount()
int cv::cuda::getCudaEnabledDeviceCount()
{
#ifndef HAVE_CUDA
return 0;
@ -64,7 +64,7 @@ int cv::gpu::getCudaEnabledDeviceCount()
#endif
}
void cv::gpu::setDevice(int device)
void cv::cuda::setDevice(int device)
{
#ifndef HAVE_CUDA
(void) device;
@ -74,7 +74,7 @@ void cv::gpu::setDevice(int device)
#endif
}
int cv::gpu::getDevice()
int cv::cuda::getDevice()
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -86,7 +86,7 @@ int cv::gpu::getDevice()
#endif
}
void cv::gpu::resetDevice()
void cv::cuda::resetDevice()
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -95,7 +95,7 @@ void cv::gpu::resetDevice()
#endif
}
bool cv::gpu::deviceSupports(FeatureSet feature_set)
bool cv::cuda::deviceSupports(FeatureSet feature_set)
{
#ifndef HAVE_CUDA
(void) feature_set;
@ -225,7 +225,7 @@ namespace
#endif
bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
bool cv::cuda::TargetArchs::builtWith(cv::cuda::FeatureSet feature_set)
{
#ifndef HAVE_CUDA
(void) feature_set;
@ -236,7 +236,7 @@ bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
#endif
}
bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
bool cv::cuda::TargetArchs::hasPtx(int major, int minor)
{
#ifndef HAVE_CUDA
(void) major;
@ -248,7 +248,7 @@ bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
#endif
}
bool cv::gpu::TargetArchs::hasBin(int major, int minor)
bool cv::cuda::TargetArchs::hasBin(int major, int minor)
{
#ifndef HAVE_CUDA
(void) major;
@ -260,7 +260,7 @@ bool cv::gpu::TargetArchs::hasBin(int major, int minor)
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
bool cv::cuda::TargetArchs::hasEqualOrLessPtx(int major, int minor)
{
#ifndef HAVE_CUDA
(void) major;
@ -272,7 +272,7 @@ bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
bool cv::cuda::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
{
#ifndef HAVE_CUDA
(void) major;
@ -284,7 +284,7 @@ bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
#endif
}
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
bool cv::cuda::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
{
#ifndef HAVE_CUDA
(void) major;
@ -345,7 +345,7 @@ namespace
#endif
const char* cv::gpu::DeviceInfo::name() const
const char* cv::cuda::DeviceInfo::name() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -355,7 +355,7 @@ const char* cv::gpu::DeviceInfo::name() const
#endif
}
size_t cv::gpu::DeviceInfo::totalGlobalMem() const
size_t cv::cuda::DeviceInfo::totalGlobalMem() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -365,7 +365,7 @@ size_t cv::gpu::DeviceInfo::totalGlobalMem() const
#endif
}
size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const
size_t cv::cuda::DeviceInfo::sharedMemPerBlock() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -375,7 +375,7 @@ size_t cv::gpu::DeviceInfo::sharedMemPerBlock() const
#endif
}
int cv::gpu::DeviceInfo::regsPerBlock() const
int cv::cuda::DeviceInfo::regsPerBlock() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -385,7 +385,7 @@ int cv::gpu::DeviceInfo::regsPerBlock() const
#endif
}
int cv::gpu::DeviceInfo::warpSize() const
int cv::cuda::DeviceInfo::warpSize() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -395,7 +395,7 @@ int cv::gpu::DeviceInfo::warpSize() const
#endif
}
size_t cv::gpu::DeviceInfo::memPitch() const
size_t cv::cuda::DeviceInfo::memPitch() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -405,7 +405,7 @@ size_t cv::gpu::DeviceInfo::memPitch() const
#endif
}
int cv::gpu::DeviceInfo::maxThreadsPerBlock() const
int cv::cuda::DeviceInfo::maxThreadsPerBlock() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -415,7 +415,7 @@ int cv::gpu::DeviceInfo::maxThreadsPerBlock() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxThreadsDim() const
Vec3i cv::cuda::DeviceInfo::maxThreadsDim() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -425,7 +425,7 @@ Vec3i cv::gpu::DeviceInfo::maxThreadsDim() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxGridSize() const
Vec3i cv::cuda::DeviceInfo::maxGridSize() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -435,7 +435,7 @@ Vec3i cv::gpu::DeviceInfo::maxGridSize() const
#endif
}
int cv::gpu::DeviceInfo::clockRate() const
int cv::cuda::DeviceInfo::clockRate() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -445,7 +445,7 @@ int cv::gpu::DeviceInfo::clockRate() const
#endif
}
size_t cv::gpu::DeviceInfo::totalConstMem() const
size_t cv::cuda::DeviceInfo::totalConstMem() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -455,7 +455,7 @@ size_t cv::gpu::DeviceInfo::totalConstMem() const
#endif
}
int cv::gpu::DeviceInfo::majorVersion() const
int cv::cuda::DeviceInfo::majorVersion() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -465,7 +465,7 @@ int cv::gpu::DeviceInfo::majorVersion() const
#endif
}
int cv::gpu::DeviceInfo::minorVersion() const
int cv::cuda::DeviceInfo::minorVersion() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -475,7 +475,7 @@ int cv::gpu::DeviceInfo::minorVersion() const
#endif
}
size_t cv::gpu::DeviceInfo::textureAlignment() const
size_t cv::cuda::DeviceInfo::textureAlignment() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -485,7 +485,7 @@ size_t cv::gpu::DeviceInfo::textureAlignment() const
#endif
}
size_t cv::gpu::DeviceInfo::texturePitchAlignment() const
size_t cv::cuda::DeviceInfo::texturePitchAlignment() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -495,7 +495,7 @@ size_t cv::gpu::DeviceInfo::texturePitchAlignment() const
#endif
}
int cv::gpu::DeviceInfo::multiProcessorCount() const
int cv::cuda::DeviceInfo::multiProcessorCount() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -505,7 +505,7 @@ int cv::gpu::DeviceInfo::multiProcessorCount() const
#endif
}
bool cv::gpu::DeviceInfo::kernelExecTimeoutEnabled() const
bool cv::cuda::DeviceInfo::kernelExecTimeoutEnabled() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -515,7 +515,7 @@ bool cv::gpu::DeviceInfo::kernelExecTimeoutEnabled() const
#endif
}
bool cv::gpu::DeviceInfo::integrated() const
bool cv::cuda::DeviceInfo::integrated() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -525,7 +525,7 @@ bool cv::gpu::DeviceInfo::integrated() const
#endif
}
bool cv::gpu::DeviceInfo::canMapHostMemory() const
bool cv::cuda::DeviceInfo::canMapHostMemory() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -535,7 +535,7 @@ bool cv::gpu::DeviceInfo::canMapHostMemory() const
#endif
}
DeviceInfo::ComputeMode cv::gpu::DeviceInfo::computeMode() const
DeviceInfo::ComputeMode cv::cuda::DeviceInfo::computeMode() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -553,7 +553,7 @@ DeviceInfo::ComputeMode cv::gpu::DeviceInfo::computeMode() const
#endif
}
int cv::gpu::DeviceInfo::maxTexture1D() const
int cv::cuda::DeviceInfo::maxTexture1D() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -563,7 +563,7 @@ int cv::gpu::DeviceInfo::maxTexture1D() const
#endif
}
int cv::gpu::DeviceInfo::maxTexture1DMipmap() const
int cv::cuda::DeviceInfo::maxTexture1DMipmap() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -578,7 +578,7 @@ int cv::gpu::DeviceInfo::maxTexture1DMipmap() const
#endif
}
int cv::gpu::DeviceInfo::maxTexture1DLinear() const
int cv::cuda::DeviceInfo::maxTexture1DLinear() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -588,7 +588,7 @@ int cv::gpu::DeviceInfo::maxTexture1DLinear() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxTexture2D() const
Vec2i cv::cuda::DeviceInfo::maxTexture2D() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -598,7 +598,7 @@ Vec2i cv::gpu::DeviceInfo::maxTexture2D() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxTexture2DMipmap() const
Vec2i cv::cuda::DeviceInfo::maxTexture2DMipmap() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -613,7 +613,7 @@ Vec2i cv::gpu::DeviceInfo::maxTexture2DMipmap() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxTexture2DLinear() const
Vec3i cv::cuda::DeviceInfo::maxTexture2DLinear() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -623,7 +623,7 @@ Vec3i cv::gpu::DeviceInfo::maxTexture2DLinear() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxTexture2DGather() const
Vec2i cv::cuda::DeviceInfo::maxTexture2DGather() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -633,7 +633,7 @@ Vec2i cv::gpu::DeviceInfo::maxTexture2DGather() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxTexture3D() const
Vec3i cv::cuda::DeviceInfo::maxTexture3D() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -643,7 +643,7 @@ Vec3i cv::gpu::DeviceInfo::maxTexture3D() const
#endif
}
int cv::gpu::DeviceInfo::maxTextureCubemap() const
int cv::cuda::DeviceInfo::maxTextureCubemap() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -653,7 +653,7 @@ int cv::gpu::DeviceInfo::maxTextureCubemap() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxTexture1DLayered() const
Vec2i cv::cuda::DeviceInfo::maxTexture1DLayered() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -663,7 +663,7 @@ Vec2i cv::gpu::DeviceInfo::maxTexture1DLayered() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxTexture2DLayered() const
Vec3i cv::cuda::DeviceInfo::maxTexture2DLayered() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -673,7 +673,7 @@ Vec3i cv::gpu::DeviceInfo::maxTexture2DLayered() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxTextureCubemapLayered() const
Vec2i cv::cuda::DeviceInfo::maxTextureCubemapLayered() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -683,7 +683,7 @@ Vec2i cv::gpu::DeviceInfo::maxTextureCubemapLayered() const
#endif
}
int cv::gpu::DeviceInfo::maxSurface1D() const
int cv::cuda::DeviceInfo::maxSurface1D() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -693,7 +693,7 @@ int cv::gpu::DeviceInfo::maxSurface1D() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxSurface2D() const
Vec2i cv::cuda::DeviceInfo::maxSurface2D() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -703,7 +703,7 @@ Vec2i cv::gpu::DeviceInfo::maxSurface2D() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxSurface3D() const
Vec3i cv::cuda::DeviceInfo::maxSurface3D() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -713,7 +713,7 @@ Vec3i cv::gpu::DeviceInfo::maxSurface3D() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxSurface1DLayered() const
Vec2i cv::cuda::DeviceInfo::maxSurface1DLayered() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -723,7 +723,7 @@ Vec2i cv::gpu::DeviceInfo::maxSurface1DLayered() const
#endif
}
Vec3i cv::gpu::DeviceInfo::maxSurface2DLayered() const
Vec3i cv::cuda::DeviceInfo::maxSurface2DLayered() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -733,7 +733,7 @@ Vec3i cv::gpu::DeviceInfo::maxSurface2DLayered() const
#endif
}
int cv::gpu::DeviceInfo::maxSurfaceCubemap() const
int cv::cuda::DeviceInfo::maxSurfaceCubemap() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -743,7 +743,7 @@ int cv::gpu::DeviceInfo::maxSurfaceCubemap() const
#endif
}
Vec2i cv::gpu::DeviceInfo::maxSurfaceCubemapLayered() const
Vec2i cv::cuda::DeviceInfo::maxSurfaceCubemapLayered() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -753,7 +753,7 @@ Vec2i cv::gpu::DeviceInfo::maxSurfaceCubemapLayered() const
#endif
}
size_t cv::gpu::DeviceInfo::surfaceAlignment() const
size_t cv::cuda::DeviceInfo::surfaceAlignment() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -763,7 +763,7 @@ size_t cv::gpu::DeviceInfo::surfaceAlignment() const
#endif
}
bool cv::gpu::DeviceInfo::concurrentKernels() const
bool cv::cuda::DeviceInfo::concurrentKernels() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -773,7 +773,7 @@ bool cv::gpu::DeviceInfo::concurrentKernels() const
#endif
}
bool cv::gpu::DeviceInfo::ECCEnabled() const
bool cv::cuda::DeviceInfo::ECCEnabled() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -783,7 +783,7 @@ bool cv::gpu::DeviceInfo::ECCEnabled() const
#endif
}
int cv::gpu::DeviceInfo::pciBusID() const
int cv::cuda::DeviceInfo::pciBusID() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -793,7 +793,7 @@ int cv::gpu::DeviceInfo::pciBusID() const
#endif
}
int cv::gpu::DeviceInfo::pciDeviceID() const
int cv::cuda::DeviceInfo::pciDeviceID() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -803,7 +803,7 @@ int cv::gpu::DeviceInfo::pciDeviceID() const
#endif
}
int cv::gpu::DeviceInfo::pciDomainID() const
int cv::cuda::DeviceInfo::pciDomainID() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -813,7 +813,7 @@ int cv::gpu::DeviceInfo::pciDomainID() const
#endif
}
bool cv::gpu::DeviceInfo::tccDriver() const
bool cv::cuda::DeviceInfo::tccDriver() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -823,7 +823,7 @@ bool cv::gpu::DeviceInfo::tccDriver() const
#endif
}
int cv::gpu::DeviceInfo::asyncEngineCount() const
int cv::cuda::DeviceInfo::asyncEngineCount() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -833,7 +833,7 @@ int cv::gpu::DeviceInfo::asyncEngineCount() const
#endif
}
bool cv::gpu::DeviceInfo::unifiedAddressing() const
bool cv::cuda::DeviceInfo::unifiedAddressing() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -843,7 +843,7 @@ bool cv::gpu::DeviceInfo::unifiedAddressing() const
#endif
}
int cv::gpu::DeviceInfo::memoryClockRate() const
int cv::cuda::DeviceInfo::memoryClockRate() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -853,7 +853,7 @@ int cv::gpu::DeviceInfo::memoryClockRate() const
#endif
}
int cv::gpu::DeviceInfo::memoryBusWidth() const
int cv::cuda::DeviceInfo::memoryBusWidth() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -863,7 +863,7 @@ int cv::gpu::DeviceInfo::memoryBusWidth() const
#endif
}
int cv::gpu::DeviceInfo::l2CacheSize() const
int cv::cuda::DeviceInfo::l2CacheSize() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -873,7 +873,7 @@ int cv::gpu::DeviceInfo::l2CacheSize() const
#endif
}
int cv::gpu::DeviceInfo::maxThreadsPerMultiProcessor() const
int cv::cuda::DeviceInfo::maxThreadsPerMultiProcessor() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -883,7 +883,7 @@ int cv::gpu::DeviceInfo::maxThreadsPerMultiProcessor() const
#endif
}
void cv::gpu::DeviceInfo::queryMemory(size_t& _totalMemory, size_t& _freeMemory) const
void cv::cuda::DeviceInfo::queryMemory(size_t& _totalMemory, size_t& _freeMemory) const
{
#ifndef HAVE_CUDA
(void) _totalMemory;
@ -901,7 +901,7 @@ void cv::gpu::DeviceInfo::queryMemory(size_t& _totalMemory, size_t& _freeMemory)
#endif
}
bool cv::gpu::DeviceInfo::isCompatible() const
bool cv::cuda::DeviceInfo::isCompatible() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -951,7 +951,7 @@ namespace
#endif
void cv::gpu::printCudaDeviceInfo(int device)
void cv::cuda::printCudaDeviceInfo(int device)
{
#ifndef HAVE_CUDA
(void) device;
@ -1037,7 +1037,7 @@ void cv::gpu::printCudaDeviceInfo(int device)
#endif
}
void cv::gpu::printShortCudaDeviceInfo(int device)
void cv::cuda::printShortCudaDeviceInfo(int device)
{
#ifndef HAVE_CUDA
(void) device;
@ -1251,7 +1251,7 @@ namespace
#endif
String cv::gpu::getNppErrorMessage(int code)
String cv::cuda::getNppErrorMessage(int code)
{
#ifndef HAVE_CUDA
(void) code;
@ -1261,7 +1261,7 @@ String cv::gpu::getNppErrorMessage(int code)
#endif
}
String cv::gpu::getCudaDriverApiErrorMessage(int code)
String cv::cuda::getCudaDriverApiErrorMessage(int code)
{
#ifndef HAVE_CUDA
(void) code;

View File

@ -44,9 +44,9 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
cv::gpu::GpuMat::GpuMat(int rows_, int cols_, int type_, void* data_, size_t step_) :
cv::cuda::GpuMat::GpuMat(int rows_, int cols_, int type_, void* data_, size_t step_) :
flags(Mat::MAGIC_VAL + (type_ & Mat::TYPE_MASK)), rows(rows_), cols(cols_),
step(step_), data((uchar*)data_), refcount(0),
datastart((uchar*)data_), dataend((uchar*)data_)
@ -71,7 +71,7 @@ cv::gpu::GpuMat::GpuMat(int rows_, int cols_, int type_, void* data_, size_t ste
dataend += step * (rows - 1) + minstep;
}
cv::gpu::GpuMat::GpuMat(Size size_, int type_, void* data_, size_t step_) :
cv::cuda::GpuMat::GpuMat(Size size_, int type_, void* data_, size_t step_) :
flags(Mat::MAGIC_VAL + (type_ & Mat::TYPE_MASK)), rows(size_.height), cols(size_.width),
step(step_), data((uchar*)data_), refcount(0),
datastart((uchar*)data_), dataend((uchar*)data_)
@ -95,7 +95,7 @@ cv::gpu::GpuMat::GpuMat(Size size_, int type_, void* data_, size_t step_) :
dataend += step * (rows - 1) + minstep;
}
cv::gpu::GpuMat::GpuMat(const GpuMat& m, Range rowRange_, Range colRange_)
cv::cuda::GpuMat::GpuMat(const GpuMat& m, Range rowRange_, Range colRange_)
{
flags = m.flags;
step = m.step; refcount = m.refcount;
@ -136,7 +136,7 @@ cv::gpu::GpuMat::GpuMat(const GpuMat& m, Range rowRange_, Range colRange_)
rows = cols = 0;
}
cv::gpu::GpuMat::GpuMat(const GpuMat& m, Rect roi) :
cv::cuda::GpuMat::GpuMat(const GpuMat& m, Rect roi) :
flags(m.flags), rows(roi.height), cols(roi.width),
step(m.step), data(m.data + roi.y*step), refcount(m.refcount),
datastart(m.datastart), dataend(m.dataend)
@ -153,7 +153,7 @@ cv::gpu::GpuMat::GpuMat(const GpuMat& m, Rect roi) :
rows = cols = 0;
}
GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
GpuMat cv::cuda::GpuMat::reshape(int new_cn, int new_rows) const
{
GpuMat hdr = *this;
@ -196,7 +196,7 @@ GpuMat cv::gpu::GpuMat::reshape(int new_cn, int new_rows) const
return hdr;
}
void cv::gpu::GpuMat::locateROI(Size& wholeSize, Point& ofs) const
void cv::cuda::GpuMat::locateROI(Size& wholeSize, Point& ofs) const
{
CV_DbgAssert( step > 0 );
@ -222,7 +222,7 @@ void cv::gpu::GpuMat::locateROI(Size& wholeSize, Point& ofs) const
wholeSize.width = std::max(static_cast<int>((delta2 - step * (wholeSize.height - 1)) / esz), ofs.x + cols);
}
GpuMat& cv::gpu::GpuMat::adjustROI(int dtop, int dbottom, int dleft, int dright)
GpuMat& cv::cuda::GpuMat::adjustROI(int dtop, int dbottom, int dleft, int dright)
{
Size wholeSize;
Point ofs;
@ -262,7 +262,7 @@ namespace
}
}
void cv::gpu::createContinuous(int rows, int cols, int type, OutputArray arr)
void cv::cuda::createContinuous(int rows, int cols, int type, OutputArray arr)
{
switch (arr.kind())
{
@ -316,7 +316,7 @@ namespace
}
}
void cv::gpu::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
void cv::cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
{
switch (arr.kind())
{
@ -337,7 +337,7 @@ void cv::gpu::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
}
}
GpuMat cv::gpu::allocMatFromBuf(int rows, int cols, int type, GpuMat& mat)
GpuMat cv::cuda::allocMatFromBuf(int rows, int cols, int type, GpuMat& mat)
{
if (!mat.empty() && mat.type() == type && mat.rows >= rows && mat.cols >= cols)
return mat(Rect(0, 0, cols, rows));
@ -347,7 +347,7 @@ GpuMat cv::gpu::allocMatFromBuf(int rows, int cols, int type, GpuMat& mat)
#ifndef HAVE_CUDA
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
void cv::cuda::GpuMat::create(int _rows, int _cols, int _type)
{
(void) _rows;
(void) _cols;
@ -355,50 +355,50 @@ void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
throw_no_cuda();
}
void cv::gpu::GpuMat::release()
void cv::cuda::GpuMat::release()
{
}
void cv::gpu::GpuMat::upload(InputArray arr)
void cv::cuda::GpuMat::upload(InputArray arr)
{
(void) arr;
throw_no_cuda();
}
void cv::gpu::GpuMat::upload(InputArray arr, Stream& _stream)
void cv::cuda::GpuMat::upload(InputArray arr, Stream& _stream)
{
(void) arr;
(void) _stream;
throw_no_cuda();
}
void cv::gpu::GpuMat::download(OutputArray _dst) const
void cv::cuda::GpuMat::download(OutputArray _dst) const
{
(void) _dst;
throw_no_cuda();
}
void cv::gpu::GpuMat::download(OutputArray _dst, Stream& _stream) const
void cv::cuda::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
(void) _dst;
(void) _stream;
throw_no_cuda();
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst) const
void cv::cuda::GpuMat::copyTo(OutputArray _dst) const
{
(void) _dst;
throw_no_cuda();
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
void cv::cuda::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
(void) _dst;
(void) _stream;
throw_no_cuda();
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& _stream) const
void cv::cuda::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& _stream) const
{
(void) _dst;
(void) _mask;
@ -406,7 +406,7 @@ void cv::gpu::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& _stream
throw_no_cuda();
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar s, Stream& _stream)
GpuMat& cv::cuda::GpuMat::setTo(Scalar s, Stream& _stream)
{
(void) s;
(void) _stream;
@ -414,7 +414,7 @@ GpuMat& cv::gpu::GpuMat::setTo(Scalar s, Stream& _stream)
return *this;
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar s, InputArray _mask, Stream& _stream)
GpuMat& cv::cuda::GpuMat::setTo(Scalar s, InputArray _mask, Stream& _stream)
{
(void) s;
(void) _mask;
@ -423,7 +423,7 @@ GpuMat& cv::gpu::GpuMat::setTo(Scalar s, InputArray _mask, Stream& _stream)
return *this;
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& _stream) const
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& _stream) const
{
(void) _dst;
(void) rtype;
@ -431,7 +431,7 @@ void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& _stream) co
throw_no_cuda();
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& _stream) const
void cv::cuda::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& _stream) const
{
(void) _dst;
(void) rtype;

View File

@ -43,14 +43,14 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
////////////////////////////////////////////////////////////////
// Stream
#ifndef HAVE_CUDA
class cv::gpu::Stream::Impl
class cv::cuda::Stream::Impl
{
public:
Impl(void* ptr = 0)
@ -62,7 +62,7 @@ public:
#else
class cv::gpu::Stream::Impl
class cv::cuda::Stream::Impl
{
public:
cudaStream_t stream;
@ -73,29 +73,29 @@ public:
~Impl();
};
cv::gpu::Stream::Impl::Impl() : stream(0)
cv::cuda::Stream::Impl::Impl() : stream(0)
{
cudaSafeCall( cudaStreamCreate(&stream) );
}
cv::gpu::Stream::Impl::Impl(cudaStream_t stream_) : stream(stream_)
cv::cuda::Stream::Impl::Impl(cudaStream_t stream_) : stream(stream_)
{
}
cv::gpu::Stream::Impl::~Impl()
cv::cuda::Stream::Impl::~Impl()
{
if (stream)
cudaStreamDestroy(stream);
}
cudaStream_t cv::gpu::StreamAccessor::getStream(const Stream& stream)
cudaStream_t cv::cuda::StreamAccessor::getStream(const Stream& stream)
{
return stream.impl_->stream;
}
#endif
cv::gpu::Stream::Stream()
cv::cuda::Stream::Stream()
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -104,7 +104,7 @@ cv::gpu::Stream::Stream()
#endif
}
bool cv::gpu::Stream::queryIfComplete() const
bool cv::cuda::Stream::queryIfComplete() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -120,7 +120,7 @@ bool cv::gpu::Stream::queryIfComplete() const
#endif
}
void cv::gpu::Stream::waitForCompletion()
void cv::cuda::Stream::waitForCompletion()
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -129,7 +129,7 @@ void cv::gpu::Stream::waitForCompletion()
#endif
}
void cv::gpu::Stream::waitEvent(const Event& event)
void cv::cuda::Stream::waitEvent(const Event& event)
{
#ifndef HAVE_CUDA
(void) event;
@ -161,7 +161,7 @@ namespace
#endif
void cv::gpu::Stream::enqueueHostCallback(StreamCallback callback, void* userData)
void cv::cuda::Stream::enqueueHostCallback(StreamCallback callback, void* userData)
{
#ifndef HAVE_CUDA
(void) callback;
@ -180,13 +180,13 @@ void cv::gpu::Stream::enqueueHostCallback(StreamCallback callback, void* userDat
#endif
}
Stream& cv::gpu::Stream::Null()
Stream& cv::cuda::Stream::Null()
{
static Stream s(new Impl(0));
return s;
}
cv::gpu::Stream::operator bool_type() const
cv::cuda::Stream::operator bool_type() const
{
#ifndef HAVE_CUDA
return 0;
@ -205,7 +205,7 @@ template <> void cv::Ptr<Stream::Impl>::delete_obj()
#ifndef HAVE_CUDA
class cv::gpu::Event::Impl
class cv::cuda::Event::Impl
{
public:
Impl(unsigned int)
@ -216,7 +216,7 @@ public:
#else
class cv::gpu::Event::Impl
class cv::cuda::Event::Impl
{
public:
cudaEvent_t event;
@ -225,25 +225,25 @@ public:
~Impl();
};
cv::gpu::Event::Impl::Impl(unsigned int flags) : event(0)
cv::cuda::Event::Impl::Impl(unsigned int flags) : event(0)
{
cudaSafeCall( cudaEventCreateWithFlags(&event, flags) );
}
cv::gpu::Event::Impl::~Impl()
cv::cuda::Event::Impl::~Impl()
{
if (event)
cudaEventDestroy(event);
}
cudaEvent_t cv::gpu::EventAccessor::getEvent(const Event& event)
cudaEvent_t cv::cuda::EventAccessor::getEvent(const Event& event)
{
return event.impl_->event;
}
#endif
cv::gpu::Event::Event(CreateFlags flags)
cv::cuda::Event::Event(CreateFlags flags)
{
#ifndef HAVE_CUDA
(void) flags;
@ -253,7 +253,7 @@ cv::gpu::Event::Event(CreateFlags flags)
#endif
}
void cv::gpu::Event::record(Stream& stream)
void cv::cuda::Event::record(Stream& stream)
{
#ifndef HAVE_CUDA
(void) stream;
@ -263,7 +263,7 @@ void cv::gpu::Event::record(Stream& stream)
#endif
}
bool cv::gpu::Event::queryIfComplete() const
bool cv::cuda::Event::queryIfComplete() const
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -279,7 +279,7 @@ bool cv::gpu::Event::queryIfComplete() const
#endif
}
void cv::gpu::Event::waitForCompletion()
void cv::cuda::Event::waitForCompletion()
{
#ifndef HAVE_CUDA
throw_no_cuda();
@ -288,7 +288,7 @@ void cv::gpu::Event::waitForCompletion()
#endif
}
float cv::gpu::Event::elapsedTime(const Event& start, const Event& end)
float cv::cuda::Event::elapsedTime(const Event& start, const Event& end)
{
#ifndef HAVE_CUDA
(void) start;

View File

@ -943,9 +943,9 @@ _InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
_InputArray::_InputArray(const std::vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
_InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
_InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {}
_InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
_InputArray::_InputArray(const cuda::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
_InputArray::_InputArray(const ogl::Buffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const gpu::CudaMem& cuda_mem) : flags(CUDA_MEM), obj((void*)&cuda_mem) {}
_InputArray::_InputArray(const cuda::CudaMem& cuda_mem) : flags(CUDA_MEM), obj((void*)&cuda_mem) {}
_InputArray::~_InputArray() {}
@ -1018,7 +1018,7 @@ Mat _InputArray::getMat(int i) const
if( k == GPU_MAT )
{
CV_Assert( i < 0 );
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for gpu::GpuMat object");
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object");
return Mat();
}
@ -1027,7 +1027,7 @@ Mat _InputArray::getMat(int i) const
{
CV_Assert( i < 0 );
const gpu::CudaMem* cuda_mem = (const gpu::CudaMem*)obj;
const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
return cuda_mem->createMatHeader();
}
@ -1120,33 +1120,33 @@ void _InputArray::getMatVector(std::vector<Mat>& mv) const
}
}
gpu::GpuMat _InputArray::getGpuMat() const
cuda::GpuMat _InputArray::getGpuMat() const
{
int k = kind();
if (k == GPU_MAT)
{
const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
return *d_mat;
}
if (k == CUDA_MEM)
{
const gpu::CudaMem* cuda_mem = (const gpu::CudaMem*)obj;
const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
return cuda_mem->createGpuMatHeader();
}
if (k == OPENGL_BUFFER)
{
CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapDevice/unmapDevice methods for ogl::Buffer object");
return gpu::GpuMat();
return cuda::GpuMat();
}
if (k == NONE)
return gpu::GpuMat();
return cuda::GpuMat();
CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for gpu::GpuMat and gpu::CudaMem");
return gpu::GpuMat();
CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::CudaMem");
return cuda::GpuMat();
}
ogl::Buffer _InputArray::getOGlBuffer() const
@ -1230,7 +1230,7 @@ Size _InputArray::size(int i) const
if( k == GPU_MAT )
{
CV_Assert( i < 0 );
const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
return d_mat->size();
}
@ -1243,7 +1243,7 @@ Size _InputArray::size(int i) const
//if( k == CUDA_MEM )
{
CV_Assert( i < 0 );
const gpu::CudaMem* cuda_mem = (const gpu::CudaMem*)obj;
const cuda::CudaMem* cuda_mem = (const cuda::CudaMem*)obj;
return cuda_mem->size();
}
}
@ -1299,11 +1299,11 @@ int _InputArray::type(int i) const
return ((const ogl::Buffer*)obj)->type();
if( k == GPU_MAT )
return ((const gpu::GpuMat*)obj)->type();
return ((const cuda::GpuMat*)obj)->type();
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
return ((const gpu::CudaMem*)obj)->type();
return ((const cuda::CudaMem*)obj)->type();
}
int _InputArray::depth(int i) const
@ -1359,26 +1359,26 @@ bool _InputArray::empty() const
}
if( k == GPU_MAT )
return ((const gpu::GpuMat*)obj)->empty();
return ((const cuda::GpuMat*)obj)->empty();
CV_Assert( k == CUDA_MEM );
//if( k == CUDA_MEM )
return ((const gpu::CudaMem*)obj)->empty();
return ((const cuda::CudaMem*)obj)->empty();
}
_OutputArray::_OutputArray() {}
_OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
_OutputArray::_OutputArray(std::vector<Mat>& vec) : _InputArray(vec) {}
_OutputArray::_OutputArray(gpu::GpuMat& d_mat) : _InputArray(d_mat) {}
_OutputArray::_OutputArray(cuda::GpuMat& d_mat) : _InputArray(d_mat) {}
_OutputArray::_OutputArray(ogl::Buffer& buf) : _InputArray(buf) {}
_OutputArray::_OutputArray(gpu::CudaMem& cuda_mem) : _InputArray(cuda_mem) {}
_OutputArray::_OutputArray(cuda::CudaMem& cuda_mem) : _InputArray(cuda_mem) {}
_OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const std::vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
_OutputArray::_OutputArray(const gpu::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const cuda::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const ogl::Buffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const gpu::CudaMem& cuda_mem) : _InputArray(cuda_mem) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const cuda::CudaMem& cuda_mem) : _InputArray(cuda_mem) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::~_OutputArray() {}
@ -1404,9 +1404,9 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
}
if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((gpu::GpuMat*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((gpu::GpuMat*)obj)->type() == mtype);
((gpu::GpuMat*)obj)->create(_sz, mtype);
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
((cuda::GpuMat*)obj)->create(_sz, mtype);
return;
}
if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
@ -1418,9 +1418,9 @@ void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int
}
if( k == CUDA_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((gpu::CudaMem*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((gpu::CudaMem*)obj)->type() == mtype);
((gpu::CudaMem*)obj)->create(_sz, mtype);
CV_Assert(!fixedSize() || ((cuda::CudaMem*)obj)->size() == _sz);
CV_Assert(!fixedType() || ((cuda::CudaMem*)obj)->type() == mtype);
((cuda::CudaMem*)obj)->create(_sz, mtype);
return;
}
int sizes[] = {_sz.height, _sz.width};
@ -1439,9 +1439,9 @@ void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransp
}
if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((gpu::GpuMat*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((gpu::GpuMat*)obj)->type() == mtype);
((gpu::GpuMat*)obj)->create(rows, cols, mtype);
CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
((cuda::GpuMat*)obj)->create(rows, cols, mtype);
return;
}
if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
@ -1453,9 +1453,9 @@ void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransp
}
if( k == CUDA_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((gpu::CudaMem*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((gpu::CudaMem*)obj)->type() == mtype);
((gpu::CudaMem*)obj)->create(rows, cols, mtype);
CV_Assert(!fixedSize() || ((cuda::CudaMem*)obj)->size() == Size(cols, rows));
CV_Assert(!fixedType() || ((cuda::CudaMem*)obj)->type() == mtype);
((cuda::CudaMem*)obj)->create(rows, cols, mtype);
return;
}
int sizes[] = {rows, cols};
@ -1678,13 +1678,13 @@ void _OutputArray::release() const
if( k == GPU_MAT )
{
((gpu::GpuMat*)obj)->release();
((cuda::GpuMat*)obj)->release();
return;
}
if( k == CUDA_MEM )
{
((gpu::CudaMem*)obj)->release();
((cuda::CudaMem*)obj)->release();
return;
}
@ -1757,11 +1757,11 @@ Mat& _OutputArray::getMatRef(int i) const
}
}
gpu::GpuMat& _OutputArray::getGpuMatRef() const
cuda::GpuMat& _OutputArray::getGpuMatRef() const
{
int k = kind();
CV_Assert( k == GPU_MAT );
return *(gpu::GpuMat*)obj;
return *(cuda::GpuMat*)obj;
}
ogl::Buffer& _OutputArray::getOGlBufferRef() const
@ -1771,11 +1771,11 @@ ogl::Buffer& _OutputArray::getOGlBufferRef() const
return *(ogl::Buffer*)obj;
}
gpu::CudaMem& _OutputArray::getCudaMemRef() const
cuda::CudaMem& _OutputArray::getCudaMemRef() const
{
int k = kind();
CV_Assert( k == CUDA_MEM );
return *(gpu::CudaMem*)obj;
return *(cuda::CudaMem*)obj;
}
static _OutputArray _none;

View File

@ -50,7 +50,7 @@
#endif
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
namespace
{
@ -122,7 +122,7 @@ namespace
////////////////////////////////////////////////////////////////////////
// setGlDevice
void cv::gpu::setGlDevice(int device)
void cv::cuda::setGlDevice(int device)
{
#ifndef HAVE_OPENGL
(void) device;
@ -627,7 +627,7 @@ void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease)
#endif
}
void cv::ogl::Buffer::copyFrom(InputArray arr, gpu::Stream& stream, Target target, bool autoRelease)
void cv::ogl::Buffer::copyFrom(InputArray arr, cuda::Stream& stream, Target target, bool autoRelease)
{
#ifndef HAVE_OPENGL
(void) arr;
@ -647,7 +647,7 @@ void cv::ogl::Buffer::copyFrom(InputArray arr, gpu::Stream& stream, Target targe
create(dmat.size(), dmat.type(), target, autoRelease);
impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, gpu::StreamAccessor::getStream(stream));
impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream));
#endif
#endif
}
@ -692,7 +692,7 @@ void cv::ogl::Buffer::copyTo(OutputArray arr) const
#endif
}
void cv::ogl::Buffer::copyTo(OutputArray arr, gpu::Stream& stream) const
void cv::ogl::Buffer::copyTo(OutputArray arr, cuda::Stream& stream) const
{
#ifndef HAVE_OPENGL
(void) arr;
@ -706,7 +706,7 @@ void cv::ogl::Buffer::copyTo(OutputArray arr, gpu::Stream& stream) const
#else
arr.create(rows_, cols_, type_);
GpuMat dmat = arr.getGpuMat();
impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, gpu::StreamAccessor::getStream(stream));
impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream));
#endif
#endif
}
@ -794,7 +794,7 @@ void cv::ogl::Buffer::unmapDevice()
#endif
}
gpu::GpuMat cv::ogl::Buffer::mapDevice(gpu::Stream& stream)
cuda::GpuMat cv::ogl::Buffer::mapDevice(cuda::Stream& stream)
{
#ifndef HAVE_OPENGL
(void) stream;
@ -806,12 +806,12 @@ gpu::GpuMat cv::ogl::Buffer::mapDevice(gpu::Stream& stream)
throw_no_cuda();
return GpuMat();
#else
return GpuMat(rows_, cols_, type_, impl_->mapDevice(gpu::StreamAccessor::getStream(stream)));
return GpuMat(rows_, cols_, type_, impl_->mapDevice(cuda::StreamAccessor::getStream(stream)));
#endif
#endif
}
void cv::ogl::Buffer::unmapDevice(gpu::Stream& stream)
void cv::ogl::Buffer::unmapDevice(cuda::Stream& stream)
{
#ifndef HAVE_OPENGL
(void) stream;
@ -821,7 +821,7 @@ void cv::ogl::Buffer::unmapDevice(gpu::Stream& stream)
(void) stream;
throw_no_cuda();
#else
impl_->unmapDevice(gpu::StreamAccessor::getStream(stream));
impl_->unmapDevice(cuda::StreamAccessor::getStream(stream));
#endif
#endif
}

View File

@ -52,7 +52,7 @@
namespace cv { namespace cudev {
using namespace cv::gpu;
using namespace cv::cuda;
// CV_CUDEV_ARCH

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -44,7 +44,7 @@
#include "test_precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
using namespace cv::cudev;
using namespace cvtest;

View File

@ -89,7 +89,7 @@
#endif
#endif
namespace cv { namespace gpu {
namespace cv { namespace cuda {
//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
@ -255,6 +255,6 @@ CV_EXPORTS void calcWobbleSuppressionMaps(
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
GpuMat &mapx, GpuMat &mapy);
}} // namespace cv { namespace gpu {
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_GPU_HPP__ */

View File

@ -65,10 +65,10 @@ PERF_TEST_P(Count, Calib3D_ProjectPoints,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::projectPoints(d_src, rvec, tvec, camera_mat, cv::Mat(), dst);
TEST_CYCLE() cv::cuda::projectPoints(d_src, rvec, tvec, camera_mat, cv::Mat(), dst);
GPU_SANITY_CHECK(dst);
}
@ -120,7 +120,7 @@ PERF_TEST_P(Count, Calib3D_SolvePnPRansac,
if (PERF_RUN_GPU())
{
TEST_CYCLE() cv::gpu::solvePnPRansac(object, image, camera_mat, dist_coef, rvec, tvec);
TEST_CYCLE() cv::cuda::solvePnPRansac(object, image, camera_mat, dist_coef, rvec, tvec);
GPU_SANITY_CHECK(rvec, 1e-3);
GPU_SANITY_CHECK(tvec, 1e-3);

View File

@ -151,10 +151,10 @@ PERF_TEST_P(Image, DISABLED_Labeling_ConnectivityMask,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_image(image);
cv::gpu::GpuMat mask;
cv::cuda::GpuMat d_image(image);
cv::cuda::GpuMat mask;
TEST_CYCLE() cv::gpu::connectivityMask(d_image, mask, cv::Scalar::all(0), cv::Scalar::all(2));
TEST_CYCLE() cv::cuda::connectivityMask(d_image, mask, cv::Scalar::all(0), cv::Scalar::all(2));
GPU_SANITY_CHECK(mask);
}
@ -174,12 +174,12 @@ PERF_TEST_P(Image, DISABLED_Labeling_ConnectedComponents,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_mask;
cv::gpu::connectivityMask(cv::gpu::GpuMat(image), d_mask, cv::Scalar::all(0), cv::Scalar::all(2));
cv::cuda::GpuMat d_mask;
cv::cuda::connectivityMask(cv::cuda::GpuMat(image), d_mask, cv::Scalar::all(0), cv::Scalar::all(2));
cv::gpu::GpuMat components;
cv::cuda::GpuMat components;
TEST_CYCLE() cv::gpu::labelComponents(d_mask, components);
TEST_CYCLE() cv::cuda::labelComponents(d_mask, components);
GPU_SANITY_CHECK(components);
}

View File

@ -64,7 +64,7 @@ PERF_TEST_P(Sz_Depth_Cn, MatOp_SetTo,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat dst(size, type);
cv::cuda::GpuMat dst(size, type);
TEST_CYCLE() dst.setTo(val);
@ -102,8 +102,8 @@ PERF_TEST_P(Sz_Depth_Cn, MatOp_SetToMasked,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat dst(src);
const cv::gpu::GpuMat d_mask(mask);
cv::cuda::GpuMat dst(src);
const cv::cuda::GpuMat d_mask(mask);
TEST_CYCLE() dst.setTo(val, d_mask);
@ -139,9 +139,9 @@ PERF_TEST_P(Sz_Depth_Cn, MatOp_CopyToMasked,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
const cv::gpu::GpuMat d_mask(mask);
cv::gpu::GpuMat dst(d_src.size(), d_src.type(), cv::Scalar::all(0));
const cv::cuda::GpuMat d_src(src);
const cv::cuda::GpuMat d_mask(mask);
cv::cuda::GpuMat dst(d_src.size(), d_src.type(), cv::Scalar::all(0));
TEST_CYCLE() d_src.copyTo(dst, d_mask);
@ -179,8 +179,8 @@ PERF_TEST_P(Sz_2Depth, MatOp_ConvertTo,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() d_src.convertTo(dst, depth2, a, b);

View File

@ -68,11 +68,11 @@ PERF_TEST_P(Image, ObjDetect_HOG,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_img(img);
const cv::cuda::GpuMat d_img(img);
std::vector<cv::Rect> gpu_found_locations;
cv::gpu::HOGDescriptor d_hog;
d_hog.setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
cv::cuda::HOGDescriptor d_hog;
d_hog.setSVMDetector(cv::cuda::HOGDescriptor::getDefaultPeopleDetector());
TEST_CYCLE() d_hog.detectMultiScale(d_img, gpu_found_locations);
@ -83,7 +83,7 @@ PERF_TEST_P(Image, ObjDetect_HOG,
std::vector<cv::Rect> cpu_found_locations;
cv::HOGDescriptor hog;
hog.setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
hog.setSVMDetector(cv::cuda::HOGDescriptor::getDefaultPeopleDetector());
TEST_CYCLE() hog.detectMultiScale(img, cpu_found_locations);
@ -105,11 +105,11 @@ PERF_TEST_P(ImageAndCascade, ObjDetect_HaarClassifier,
if (PERF_RUN_GPU())
{
cv::gpu::CascadeClassifier_GPU d_cascade;
cv::cuda::CascadeClassifier_GPU d_cascade;
ASSERT_TRUE(d_cascade.load(perf::TestBase::getDataPath(GetParam().second)));
const cv::gpu::GpuMat d_img(img);
cv::gpu::GpuMat objects_buffer;
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat objects_buffer;
int detections_num = 0;
TEST_CYCLE() detections_num = d_cascade.detectMultiScale(d_img, objects_buffer);
@ -144,11 +144,11 @@ PERF_TEST_P(ImageAndCascade, ObjDetect_LBPClassifier,
if (PERF_RUN_GPU())
{
cv::gpu::CascadeClassifier_GPU d_cascade;
cv::cuda::CascadeClassifier_GPU d_cascade;
ASSERT_TRUE(d_cascade.load(perf::TestBase::getDataPath(GetParam().second)));
const cv::gpu::GpuMat d_img(img);
cv::gpu::GpuMat objects_buffer;
const cv::cuda::GpuMat d_img(img);
cv::cuda::GpuMat objects_buffer;
int detections_num = 0;
TEST_CYCLE() detections_num = d_cascade.detectMultiScale(d_img, objects_buffer);

View File

@ -80,10 +80,10 @@ PERF_TEST_P(Image, HoughLinesP, testing::Values(std::string("im1_1280x800.jpg"))
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_image(image);
cv::gpu::GpuMat d_lines;
cv::cuda::GpuMat d_image(image);
cv::cuda::GpuMat d_lines;
cv::Ptr<cv::gpu::HoughSegmentDetector> hough = cv::gpu::createHoughSegmentDetector(rho, theta, minLineLenght, maxLineGap);
cv::Ptr<cv::cuda::HoughSegmentDetector> hough = cv::cuda::createHoughSegmentDetector(rho, theta, minLineLenght, maxLineGap);
hough->detect(d_image, d_lines);
@ -144,11 +144,11 @@ PERF_TEST_P(Image_Depth, GoodFeaturesToTrack,
if (PERF_RUN_GPU())
{
cv::Ptr<cv::gpu::CornersDetector> detector = cv::gpu::createGoodFeaturesToTrackDetector(src.type(), maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k);
cv::Ptr<cv::cuda::CornersDetector> detector = cv::cuda::createGoodFeaturesToTrackDetector(src.type(), maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k);
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_mask(mask);
cv::gpu::GpuMat d_pts;
cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_mask(mask);
cv::cuda::GpuMat d_pts;
detector->detect(d_src, d_pts, d_mask);
@ -233,13 +233,13 @@ PERF_TEST_P(ImagePair_Depth_GraySource, OpticalFlowPyrLKSparse,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_src1(src1);
cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat d_pts(pts.reshape(2, 1));
cv::gpu::GpuMat d_nextPts;
cv::gpu::GpuMat d_status;
cv::cuda::GpuMat d_src1(src1);
cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat d_pts(pts.reshape(2, 1));
cv::cuda::GpuMat d_nextPts;
cv::cuda::GpuMat d_status;
cv::gpu::PyrLKOpticalFlow d_pyrLK;
cv::cuda::PyrLKOpticalFlow d_pyrLK;
d_pyrLK.winSize = winSize;
d_pyrLK.maxLevel = maxLevel;
d_pyrLK.iters = criteria.maxCount;
@ -311,12 +311,12 @@ PERF_TEST_P(ImagePair_Depth, OpticalFlowFarneback,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_src1(src1);
cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat d_u(src1.size(), CV_32FC1, cv::Scalar::all(0));
cv::gpu::GpuMat d_v(src1.size(), CV_32FC1, cv::Scalar::all(0));
cv::cuda::GpuMat d_src1(src1);
cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat d_u(src1.size(), CV_32FC1, cv::Scalar::all(0));
cv::cuda::GpuMat d_v(src1.size(), CV_32FC1, cv::Scalar::all(0));
cv::gpu::FarnebackOpticalFlow d_farneback;
cv::cuda::FarnebackOpticalFlow d_farneback;
d_farneback.pyrScale = pyrScale;
d_farneback.numLevels = numLevels;
d_farneback.winSize = winSize;
@ -398,15 +398,15 @@ PERF_TEST_P(ImagePair_BlockSize_ShiftSize_MaxRange, OpticalFlowBM,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_src1(src1);
cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat d_velx, d_vely, buf;
cv::cuda::GpuMat d_src1(src1);
cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat d_velx, d_vely, buf;
cv::gpu::calcOpticalFlowBM(d_src1, d_src2, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
cv::cuda::calcOpticalFlowBM(d_src1, d_src2, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
TEST_CYCLE_N(10)
{
cv::gpu::calcOpticalFlowBM(d_src1, d_src2, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
cv::cuda::calcOpticalFlowBM(d_src1, d_src2, block_size, shift_size, max_range, false, d_velx, d_vely, buf);
}
}
else
@ -449,11 +449,11 @@ PERF_TEST_P(ImagePair_BlockSize_ShiftSize_MaxRange, FastOpticalFlowBM,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_src1(src1);
cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat d_velx, d_vely;
cv::cuda::GpuMat d_src1(src1);
cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat d_velx, d_vely;
cv::gpu::FastOpticalFlowBM fastBM;
cv::cuda::FastOpticalFlowBM fastBM;
fastBM(d_src1, d_src2, d_velx, d_vely, max_range.width, block_size.width);

View File

@ -43,19 +43,19 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
void cv::gpu::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::cuda::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::projectPoints(const GpuMat&, const Mat&, const Mat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::cuda::projectPoints(const GpuMat&, const Mat&, const Mat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::solvePnPRansac(const Mat&, const Mat&, const Mat&, const Mat&, Mat&, Mat&, bool, int, float, int, std::vector<int>*) { throw_no_cuda(); }
void cv::cuda::solvePnPRansac(const Mat&, const Mat&, const Mat&, const Mat&, Mat&, Mat&, bool, int, float, int, std::vector<int>*) { throw_no_cuda(); }
#else
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace transform_points
{
@ -78,7 +78,7 @@ namespace cv { namespace gpu { namespace cudev
}
}}}
using namespace ::cv::gpu::cudev;
using namespace ::cv::cuda::cudev;
namespace
{
@ -97,7 +97,7 @@ namespace
}
}
void cv::gpu::transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, GpuMat& dst, Stream& stream)
void cv::cuda::transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, GpuMat& dst, Stream& stream)
{
transformPointsCaller(src, rvec, tvec, dst, StreamAccessor::getStream(stream));
}
@ -121,7 +121,7 @@ namespace
}
}
void cv::gpu::projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, Stream& stream)
void cv::cuda::projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, Stream& stream)
{
projectPointsCaller(src, rvec, tvec, camera_mat, dist_coef, dst, StreamAccessor::getStream(stream));
}
@ -208,7 +208,7 @@ namespace
};
}
void cv::gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
void cv::cuda::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess,
int num_iters, float max_dist, int min_inlier_count,
std::vector<int>* inliers)
@ -252,7 +252,7 @@ void cv::gpu::solvePnPRansac(const Mat& object, const Mat& image, const Mat& cam
// Find the best hypothesis index
Point best_idx;
double best_score;
gpu::minMaxLoc(d_hypothesis_scores, NULL, &best_score, NULL, &best_idx);
cuda::minMaxLoc(d_hypothesis_scores, NULL, &best_score, NULL, &best_idx);
int num_inliers = static_cast<int>(best_score);
// Extract the best hypothesis data

View File

@ -44,23 +44,23 @@
#include "opencv2/objdetect/objdetect_c.h"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_no_cuda(); }
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const String&) { throw_no_cuda(); }
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_no_cuda(); }
bool cv::gpu::CascadeClassifier_GPU::empty() const { throw_no_cuda(); return true; }
bool cv::gpu::CascadeClassifier_GPU::load(const String&) { throw_no_cuda(); return true; }
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const { throw_no_cuda(); return Size();}
void cv::gpu::CascadeClassifier_GPU::release() { throw_no_cuda(); }
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, double, int, Size) {throw_no_cuda(); return -1;}
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, Size, Size, double, int) {throw_no_cuda(); return -1;}
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU() { throw_no_cuda(); }
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU(const String&) { throw_no_cuda(); }
cv::cuda::CascadeClassifier_GPU::~CascadeClassifier_GPU() { throw_no_cuda(); }
bool cv::cuda::CascadeClassifier_GPU::empty() const { throw_no_cuda(); return true; }
bool cv::cuda::CascadeClassifier_GPU::load(const String&) { throw_no_cuda(); return true; }
Size cv::cuda::CascadeClassifier_GPU::getClassifierSize() const { throw_no_cuda(); return Size();}
void cv::cuda::CascadeClassifier_GPU::release() { throw_no_cuda(); }
int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, double, int, Size) {throw_no_cuda(); return -1;}
int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat&, GpuMat&, Size, Size, double, int) {throw_no_cuda(); return -1;}
#else
struct cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
struct cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
{
public:
CascadeClassifierImpl(){}
@ -75,7 +75,7 @@ public:
#ifndef HAVE_OPENCV_GPULEGACY
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
struct cv::cuda::CascadeClassifier_GPU::HaarCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
{
public:
HaarCascade()
@ -104,7 +104,7 @@ public:
#else
struct cv::gpu::CascadeClassifier_GPU::HaarCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
struct cv::cuda::CascadeClassifier_GPU::HaarCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
{
public:
HaarCascade() : lastAllocatedFrameSize(-1, -1)
@ -203,7 +203,7 @@ private:
NCVStatus load(const String& classifierFile)
{
int devId = cv::gpu::getDevice();
int devId = cv::cuda::getDevice();
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR);
// Load the classifier from file (assuming its size is about 1 mb) using a simple allocator
@ -372,7 +372,7 @@ struct PyrLavel
cv::Size sWindow;
};
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace lbp
{
@ -398,7 +398,7 @@ namespace cv { namespace gpu { namespace cudev
}
}}}
struct cv::gpu::CascadeClassifier_GPU::LbpCascade : cv::gpu::CascadeClassifier_GPU::CascadeClassifierImpl
struct cv::cuda::CascadeClassifier_GPU::LbpCascade : cv::cuda::CascadeClassifier_GPU::CascadeClassifierImpl
{
public:
struct Stage
@ -457,8 +457,8 @@ public:
GpuMat buff = integralBuffer;
// generate integral for scale
gpu::resize(image, src, level.sFrame, 0, 0, cv::INTER_LINEAR);
gpu::integral(src, sint, buff);
cuda::resize(image, src, level.sFrame, 0, 0, cv::INTER_LINEAR);
cuda::integral(src, sint, buff);
// calculate job
int totalWidth = level.workArea.width / step;
@ -515,7 +515,7 @@ private:
roiSize.height = frame.height;
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
cudaSafeCall( cudaGetDeviceProperties(&prop, cv::cuda::getDevice()) );
Ncv32u bufSize;
ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
@ -694,36 +694,36 @@ private:
static const int integralFactor = 4;
};
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU()
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU()
: findLargestObject(false), visualizeInPlace(false), impl(0) {}
cv::gpu::CascadeClassifier_GPU::CascadeClassifier_GPU(const String& filename)
cv::cuda::CascadeClassifier_GPU::CascadeClassifier_GPU(const String& filename)
: findLargestObject(false), visualizeInPlace(false), impl(0) { load(filename); }
cv::gpu::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
cv::cuda::CascadeClassifier_GPU::~CascadeClassifier_GPU() { release(); }
void cv::gpu::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
void cv::cuda::CascadeClassifier_GPU::release() { if (impl) { delete impl; impl = 0; } }
bool cv::gpu::CascadeClassifier_GPU::empty() const { return impl == 0; }
bool cv::cuda::CascadeClassifier_GPU::empty() const { return impl == 0; }
Size cv::gpu::CascadeClassifier_GPU::getClassifierSize() const
Size cv::cuda::CascadeClassifier_GPU::getClassifierSize() const
{
return this->empty() ? Size() : impl->getClassifierCvSize();
}
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
int cv::cuda::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
{
CV_Assert( !this->empty());
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, cv::Size());
}
int cv::gpu::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors)
int cv::cuda::CascadeClassifier_GPU::detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize, double scaleFactor, int minNeighbors)
{
CV_Assert( !this->empty());
return impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, minSize, maxObjectSize);
}
bool cv::gpu::CascadeClassifier_GPU::load(const String& filename)
bool cv::cuda::CascadeClassifier_GPU::load(const String& filename)
{
release();

View File

@ -47,7 +47,7 @@
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/reduce.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
#define SOLVE_PNP_RANSAC_MAX_NUM_ITERS 200
@ -79,7 +79,7 @@ namespace cv { namespace gpu { namespace cudev
cudaSafeCall(cudaMemcpyToSymbol(crot1, rot + 3, sizeof(float) * 3));
cudaSafeCall(cudaMemcpyToSymbol(crot2, rot + 6, sizeof(float) * 3));
cudaSafeCall(cudaMemcpyToSymbol(ctransl, transl, sizeof(float) * 3));
cv::gpu::cudev::transform(src, dst, TransformOp(), WithOutMask(), stream);
cv::cuda::cudev::transform(src, dst, TransformOp(), WithOutMask(), stream);
}
} // namespace transform_points
@ -120,7 +120,7 @@ namespace cv { namespace gpu { namespace cudev
cudaSafeCall(cudaMemcpyToSymbol(ctransl, transl, sizeof(float) * 3));
cudaSafeCall(cudaMemcpyToSymbol(cproj0, proj, sizeof(float) * 3));
cudaSafeCall(cudaMemcpyToSymbol(cproj1, proj + 3, sizeof(float) * 3));
cv::gpu::cudev::transform(src, dst, ProjectOp(), WithOutMask(), stream);
cv::cuda::cudev::transform(src, dst, ProjectOp(), WithOutMask(), stream);
}
} // namespace project_points
@ -187,7 +187,7 @@ namespace cv { namespace gpu { namespace cudev
cudaSafeCall( cudaDeviceSynchronize() );
}
} // namespace solvepnp_ransac
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */

View File

@ -50,7 +50,7 @@
#include <iostream>
#include <stdio.h>
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace ccl
{

View File

@ -47,7 +47,7 @@
#include <thrust/functional.h>
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev { namespace globmotion {
namespace cv { namespace cuda { namespace cudev { namespace globmotion {
__constant__ float cml[9];
__constant__ float cmr[9];

View File

@ -47,7 +47,7 @@
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
// Other values are not supported
#define CELL_WIDTH 8
@ -808,7 +808,7 @@ namespace cv { namespace gpu { namespace cudev
void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }
void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }
} // namespace hog
}}} // namespace cv { namespace gpu { namespace cudev
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */

View File

@ -46,7 +46,7 @@
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace lbp
{

View File

@ -46,7 +46,7 @@
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/emulation.hpp"
namespace cv { namespace gpu { namespace cudev {
namespace cv { namespace cuda { namespace cudev {
namespace lbp {

View File

@ -43,17 +43,17 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
void cv::gpu::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
void cv::gpu::calcWobbleSuppressionMaps(
void cv::cuda::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
void cv::cuda::calcWobbleSuppressionMaps(
int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
#else
namespace cv { namespace gpu { namespace cudev { namespace globmotion {
namespace cv { namespace cuda { namespace cudev { namespace globmotion {
int compactPoints(int N, float *points0, float *points1, const uchar *mask);
@ -63,14 +63,14 @@ namespace cv { namespace gpu { namespace cudev { namespace globmotion {
}}}}
void cv::gpu::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
void cv::cuda::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
{
CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
int npoints = points0.cols;
int remaining = cv::gpu::cudev::globmotion::compactPoints(
int remaining = cv::cuda::cudev::globmotion::compactPoints(
npoints, (float*)points0.data, (float*)points1.data, mask.data);
points0 = points0.colRange(0, remaining);
@ -78,7 +78,7 @@ void cv::gpu::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask
}
void cv::gpu::calcWobbleSuppressionMaps(
void cv::cuda::calcWobbleSuppressionMaps(
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
GpuMat &mapx, GpuMat &mapy)
{
@ -88,7 +88,7 @@ void cv::gpu::calcWobbleSuppressionMaps(
mapx.create(size, CV_32F);
mapy.create(size, CV_32F);
cv::gpu::cudev::globmotion::calcWobbleSuppressionMaps(
cv::cuda::cudev::globmotion::calcWobbleSuppressionMaps(
left, idx, right, size.width, size.height,
ml.ptr<float>(), mr.ptr<float>(), mapx, mapy);
}

View File

@ -44,15 +44,15 @@
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::cuda::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::cuda::graphcut(GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::connectivityMask(const GpuMat&, GpuMat&, const cv::Scalar&, const cv::Scalar&, Stream&) { throw_no_cuda(); }
void cv::gpu::labelComponents(const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
void cv::cuda::connectivityMask(const GpuMat&, GpuMat&, const cv::Scalar&, const cv::Scalar&, Stream&) { throw_no_cuda(); }
void cv::cuda::labelComponents(const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace ccl
{
@ -68,7 +68,7 @@ static float4 scalarToCudaType(const cv::Scalar& in)
return make_float4((float)in[0], (float)in[1], (float)in[2], (float)in[3]);
}
void cv::gpu::connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& s)
void cv::cuda::connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& s)
{
CV_Assert(!image.empty());
@ -102,7 +102,7 @@ void cv::gpu::connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scal
f(image, mask, culo, cuhi, stream);
}
void cv::gpu::labelComponents(const GpuMat& mask, GpuMat& components, int flags, Stream& s)
void cv::cuda::labelComponents(const GpuMat& mask, GpuMat& components, int flags, Stream& s)
{
CV_Assert(!mask.empty() && mask.type() == CV_8U);
@ -142,7 +142,7 @@ namespace
};
}
void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf, Stream& s)
void cv::cuda::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf, Stream& s)
{
#if (CUDA_VERSION < 5000)
CV_Assert(terminals.type() == CV_32S);
@ -201,7 +201,7 @@ void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTrans
cudaSafeCall( cudaDeviceSynchronize() );
}
void cv::gpu::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
void cv::cuda::graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight, GpuMat& labels, GpuMat& buf, Stream& s)
{
#if (CUDA_VERSION < 5000)

View File

@ -44,25 +44,25 @@
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
cv::gpu::HOGDescriptor::HOGDescriptor(Size, Size, Size, Size, int, double, double, bool, int) { throw_no_cuda(); }
size_t cv::gpu::HOGDescriptor::getDescriptorSize() const { throw_no_cuda(); return 0; }
size_t cv::gpu::HOGDescriptor::getBlockHistogramSize() const { throw_no_cuda(); return 0; }
double cv::gpu::HOGDescriptor::getWinSigma() const { throw_no_cuda(); return 0; }
bool cv::gpu::HOGDescriptor::checkDetectorSize() const { throw_no_cuda(); return false; }
void cv::gpu::HOGDescriptor::setSVMDetector(const std::vector<float>&) { throw_no_cuda(); }
void cv::gpu::HOGDescriptor::detect(const GpuMat&, std::vector<Point>&, double, Size, Size) { throw_no_cuda(); }
void cv::gpu::HOGDescriptor::detectMultiScale(const GpuMat&, std::vector<Rect>&, double, Size, Size, double, int) { throw_no_cuda(); }
void cv::gpu::HOGDescriptor::computeBlockHistograms(const GpuMat&) { throw_no_cuda(); }
void cv::gpu::HOGDescriptor::getDescriptors(const GpuMat&, Size, GpuMat&, int) { throw_no_cuda(); }
std::vector<float> cv::gpu::HOGDescriptor::getDefaultPeopleDetector() { throw_no_cuda(); return std::vector<float>(); }
std::vector<float> cv::gpu::HOGDescriptor::getPeopleDetector48x96() { throw_no_cuda(); return std::vector<float>(); }
std::vector<float> cv::gpu::HOGDescriptor::getPeopleDetector64x128() { throw_no_cuda(); return std::vector<float>(); }
void cv::gpu::HOGDescriptor::computeConfidence(const GpuMat&, std::vector<Point>&, double, Size, Size, std::vector<Point>&, std::vector<double>&) { throw_no_cuda(); }
void cv::gpu::HOGDescriptor::computeConfidenceMultiScale(const GpuMat&, std::vector<Rect>&, double, Size, Size, std::vector<HOGConfidence>&, int) { throw_no_cuda(); }
cv::cuda::HOGDescriptor::HOGDescriptor(Size, Size, Size, Size, int, double, double, bool, int) { throw_no_cuda(); }
size_t cv::cuda::HOGDescriptor::getDescriptorSize() const { throw_no_cuda(); return 0; }
size_t cv::cuda::HOGDescriptor::getBlockHistogramSize() const { throw_no_cuda(); return 0; }
double cv::cuda::HOGDescriptor::getWinSigma() const { throw_no_cuda(); return 0; }
bool cv::cuda::HOGDescriptor::checkDetectorSize() const { throw_no_cuda(); return false; }
void cv::cuda::HOGDescriptor::setSVMDetector(const std::vector<float>&) { throw_no_cuda(); }
void cv::cuda::HOGDescriptor::detect(const GpuMat&, std::vector<Point>&, double, Size, Size) { throw_no_cuda(); }
void cv::cuda::HOGDescriptor::detectMultiScale(const GpuMat&, std::vector<Rect>&, double, Size, Size, double, int) { throw_no_cuda(); }
void cv::cuda::HOGDescriptor::computeBlockHistograms(const GpuMat&) { throw_no_cuda(); }
void cv::cuda::HOGDescriptor::getDescriptors(const GpuMat&, Size, GpuMat&, int) { throw_no_cuda(); }
std::vector<float> cv::cuda::HOGDescriptor::getDefaultPeopleDetector() { throw_no_cuda(); return std::vector<float>(); }
std::vector<float> cv::cuda::HOGDescriptor::getPeopleDetector48x96() { throw_no_cuda(); return std::vector<float>(); }
std::vector<float> cv::cuda::HOGDescriptor::getPeopleDetector64x128() { throw_no_cuda(); return std::vector<float>(); }
void cv::cuda::HOGDescriptor::computeConfidence(const GpuMat&, std::vector<Point>&, double, Size, Size, std::vector<Point>&, std::vector<double>&) { throw_no_cuda(); }
void cv::cuda::HOGDescriptor::computeConfidenceMultiScale(const GpuMat&, std::vector<Rect>&, double, Size, Size, std::vector<HOGConfidence>&, int) { throw_no_cuda(); }
#else
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace hog
{
@ -70,8 +70,8 @@ namespace cv { namespace gpu { namespace cudev
int nblocks_win_x, int nblocks_win_y);
void compute_hists(int nbins, int block_stride_x, int blovck_stride_y,
int height, int width, const cv::gpu::PtrStepSzf& grad,
const cv::gpu::PtrStepSzb& qangle, float sigma, float* block_hists);
int height, int width, const cv::cuda::PtrStepSzf& grad,
const cv::cuda::PtrStepSzb& qangle, float sigma, float* block_hists);
void normalize_hists(int nbins, int block_stride_x, int block_stride_y,
int height, int width, float* block_hists, float threshold);
@ -87,24 +87,24 @@ namespace cv { namespace gpu { namespace cudev
void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
cv::gpu::PtrStepSzf descriptors);
cv::cuda::PtrStepSzf descriptors);
void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x,
int win_stride_y, int win_stride_x, int height, int width, float* block_hists,
cv::gpu::PtrStepSzf descriptors);
cv::cuda::PtrStepSzf descriptors);
void compute_gradients_8UC1(int nbins, int height, int width, const cv::gpu::PtrStepSzb& img,
float angle_scale, cv::gpu::PtrStepSzf grad, cv::gpu::PtrStepSzb qangle, bool correct_gamma);
void compute_gradients_8UC4(int nbins, int height, int width, const cv::gpu::PtrStepSzb& img,
float angle_scale, cv::gpu::PtrStepSzf grad, cv::gpu::PtrStepSzb qangle, bool correct_gamma);
void compute_gradients_8UC1(int nbins, int height, int width, const cv::cuda::PtrStepSzb& img,
float angle_scale, cv::cuda::PtrStepSzf grad, cv::cuda::PtrStepSzb qangle, bool correct_gamma);
void compute_gradients_8UC4(int nbins, int height, int width, const cv::cuda::PtrStepSzb& img,
float angle_scale, cv::cuda::PtrStepSzf grad, cv::cuda::PtrStepSzb qangle, bool correct_gamma);
void resize_8UC1(const cv::gpu::PtrStepSzb& src, cv::gpu::PtrStepSzb dst);
void resize_8UC4(const cv::gpu::PtrStepSzb& src, cv::gpu::PtrStepSzb dst);
void resize_8UC1(const cv::cuda::PtrStepSzb& src, cv::cuda::PtrStepSzb dst);
void resize_8UC4(const cv::cuda::PtrStepSzb& src, cv::cuda::PtrStepSzb dst);
}
}}}
using namespace ::cv::gpu::cudev;
using namespace ::cv::cuda::cudev;
cv::gpu::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size block_stride_, Size cell_size_,
cv::cuda::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size block_stride_, Size cell_size_,
int nbins_, double win_sigma_, double threshold_L2hys_, bool gamma_correction_, int nlevels_)
: win_size(win_size_),
block_size(block_size_),
@ -132,30 +132,30 @@ cv::gpu::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size blo
hog::set_up_constants(nbins, block_stride.width, block_stride.height, blocks_per_win.width, blocks_per_win.height);
}
size_t cv::gpu::HOGDescriptor::getDescriptorSize() const
size_t cv::cuda::HOGDescriptor::getDescriptorSize() const
{
return numPartsWithin(win_size, block_size, block_stride).area() * getBlockHistogramSize();
}
size_t cv::gpu::HOGDescriptor::getBlockHistogramSize() const
size_t cv::cuda::HOGDescriptor::getBlockHistogramSize() const
{
Size cells_per_block = Size(block_size.width / cell_size.width, block_size.height / cell_size.height);
return (size_t)(nbins * cells_per_block.area());
}
double cv::gpu::HOGDescriptor::getWinSigma() const
double cv::cuda::HOGDescriptor::getWinSigma() const
{
return win_sigma >= 0 ? win_sigma : (block_size.width + block_size.height) / 8.0;
}
bool cv::gpu::HOGDescriptor::checkDetectorSize() const
bool cv::cuda::HOGDescriptor::checkDetectorSize() const
{
size_t detector_size = detector.rows * detector.cols;
size_t descriptor_size = getDescriptorSize();
return detector_size == 0 || detector_size == descriptor_size || detector_size == descriptor_size + 1;
}
void cv::gpu::HOGDescriptor::setSVMDetector(const std::vector<float>& _detector)
void cv::cuda::HOGDescriptor::setSVMDetector(const std::vector<float>& _detector)
{
std::vector<float> detector_reordered(_detector.size());
@ -179,7 +179,7 @@ void cv::gpu::HOGDescriptor::setSVMDetector(const std::vector<float>& _detector)
CV_Assert(checkDetectorSize());
}
cv::gpu::GpuMat cv::gpu::HOGDescriptor::getBuffer(const Size& sz, int type, GpuMat& buf)
cv::cuda::GpuMat cv::cuda::HOGDescriptor::getBuffer(const Size& sz, int type, GpuMat& buf)
{
if (buf.empty() || buf.type() != type)
buf.create(sz, type);
@ -190,13 +190,13 @@ cv::gpu::GpuMat cv::gpu::HOGDescriptor::getBuffer(const Size& sz, int type, GpuM
return buf(Rect(Point(0,0), sz));
}
cv::gpu::GpuMat cv::gpu::HOGDescriptor::getBuffer(int rows, int cols, int type, GpuMat& buf)
cv::cuda::GpuMat cv::cuda::HOGDescriptor::getBuffer(int rows, int cols, int type, GpuMat& buf)
{
return getBuffer(Size(cols, rows), type, buf);
}
void cv::gpu::HOGDescriptor::computeGradient(const GpuMat& img, GpuMat& _grad, GpuMat& _qangle)
void cv::cuda::HOGDescriptor::computeGradient(const GpuMat& img, GpuMat& _grad, GpuMat& _qangle)
{
CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4);
@ -219,7 +219,7 @@ void cv::gpu::HOGDescriptor::computeGradient(const GpuMat& img, GpuMat& _grad, G
}
void cv::gpu::HOGDescriptor::computeBlockHistograms(const GpuMat& img)
void cv::cuda::HOGDescriptor::computeBlockHistograms(const GpuMat& img)
{
computeGradient(img, grad, qangle);
@ -237,7 +237,7 @@ void cv::gpu::HOGDescriptor::computeBlockHistograms(const GpuMat& img)
}
void cv::gpu::HOGDescriptor::getDescriptors(const GpuMat& img, Size win_stride, GpuMat& descriptors, int descr_format)
void cv::cuda::HOGDescriptor::getDescriptors(const GpuMat& img, Size win_stride, GpuMat& descriptors, int descr_format)
{
CV_Assert(win_stride.width % block_stride.width == 0 && win_stride.height % block_stride.height == 0);
@ -264,7 +264,7 @@ void cv::gpu::HOGDescriptor::getDescriptors(const GpuMat& img, Size win_stride,
}
}
void cv::gpu::HOGDescriptor::computeConfidence(const GpuMat& img, std::vector<Point>& hits, double hit_threshold,
void cv::cuda::HOGDescriptor::computeConfidence(const GpuMat& img, std::vector<Point>& hits, double hit_threshold,
Size win_stride, Size padding, std::vector<Point>& locations, std::vector<double>& confidences)
{
CV_Assert(padding == Size(0, 0));
@ -307,7 +307,7 @@ void cv::gpu::HOGDescriptor::computeConfidence(const GpuMat& img, std::vector<Po
}
}
void cv::gpu::HOGDescriptor::computeConfidenceMultiScale(const GpuMat& img, std::vector<Rect>& found_locations,
void cv::cuda::HOGDescriptor::computeConfidenceMultiScale(const GpuMat& img, std::vector<Rect>& found_locations,
double hit_threshold, Size win_stride, Size padding,
std::vector<HOGConfidence> &conf_out, int group_threshold)
{
@ -359,7 +359,7 @@ void cv::gpu::HOGDescriptor::computeConfidenceMultiScale(const GpuMat& img, std:
}
void cv::gpu::HOGDescriptor::detect(const GpuMat& img, std::vector<Point>& hits, double hit_threshold, Size win_stride, Size padding)
void cv::cuda::HOGDescriptor::detect(const GpuMat& img, std::vector<Point>& hits, double hit_threshold, Size win_stride, Size padding)
{
CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4);
CV_Assert(padding == Size(0, 0));
@ -396,7 +396,7 @@ void cv::gpu::HOGDescriptor::detect(const GpuMat& img, std::vector<Point>& hits,
void cv::gpu::HOGDescriptor::detectMultiScale(const GpuMat& img, std::vector<Rect>& found_locations, double hit_threshold,
void cv::cuda::HOGDescriptor::detectMultiScale(const GpuMat& img, std::vector<Rect>& found_locations, double hit_threshold,
Size win_stride, Size padding, double scale0, int group_threshold)
{
@ -450,22 +450,22 @@ void cv::gpu::HOGDescriptor::detectMultiScale(const GpuMat& img, std::vector<Rec
groupRectangles(found_locations, group_threshold, 0.2/*magic number copied from CPU version*/);
}
int cv::gpu::HOGDescriptor::numPartsWithin(int size, int part_size, int stride)
int cv::cuda::HOGDescriptor::numPartsWithin(int size, int part_size, int stride)
{
return (size - part_size + stride) / stride;
}
cv::Size cv::gpu::HOGDescriptor::numPartsWithin(cv::Size size, cv::Size part_size, cv::Size stride)
cv::Size cv::cuda::HOGDescriptor::numPartsWithin(cv::Size size, cv::Size part_size, cv::Size stride)
{
return Size(numPartsWithin(size.width, part_size.width, stride.width), numPartsWithin(size.height, part_size.height, stride.height));
}
std::vector<float> cv::gpu::HOGDescriptor::getDefaultPeopleDetector()
std::vector<float> cv::cuda::HOGDescriptor::getDefaultPeopleDetector()
{
return getPeopleDetector64x128();
}
std::vector<float> cv::gpu::HOGDescriptor::getPeopleDetector48x96()
std::vector<float> cv::cuda::HOGDescriptor::getPeopleDetector48x96()
{
static const float detector[] = {
0.294350f, -0.098796f, -0.129522f, 0.078753f, 0.387527f, 0.261529f,
@ -805,7 +805,7 @@ std::vector<float> cv::gpu::HOGDescriptor::getPeopleDetector48x96()
std::vector<float> cv::gpu::HOGDescriptor::getPeopleDetector64x128()
std::vector<float> cv::cuda::HOGDescriptor::getPeopleDetector64x128()
{
static const float detector[] = {
0.05359386f, -0.14721455f, -0.05532170f, 0.05077307f,

View File

@ -49,15 +49,15 @@ using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// transformPoints
struct TransformPoints : testing::TestWithParam<cv::gpu::DeviceInfo>
struct TransformPoints : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -67,8 +67,8 @@ GPU_TEST_P(TransformPoints, Accuracy)
cv::Mat rvec = randomMat(cv::Size(3, 1), CV_32F, 0, 1);
cv::Mat tvec = randomMat(cv::Size(3, 1), CV_32F, 0, 1);
cv::gpu::GpuMat dst;
cv::gpu::transformPoints(loadMat(src), rvec, tvec, dst);
cv::cuda::GpuMat dst;
cv::cuda::transformPoints(loadMat(src), rvec, tvec, dst);
ASSERT_EQ(src.size(), dst.size());
ASSERT_EQ(src.type(), dst.type());
@ -97,15 +97,15 @@ INSTANTIATE_TEST_CASE_P(GPU_Calib3D, TransformPoints, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// ProjectPoints
struct ProjectPoints : testing::TestWithParam<cv::gpu::DeviceInfo>
struct ProjectPoints : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -120,8 +120,8 @@ GPU_TEST_P(ProjectPoints, Accuracy)
camera_mat.at<float>(2, 0) = 0.f;
camera_mat.at<float>(2, 1) = 0.f;
cv::gpu::GpuMat dst;
cv::gpu::projectPoints(loadMat(src), rvec, tvec, camera_mat, cv::Mat(), dst);
cv::cuda::GpuMat dst;
cv::cuda::projectPoints(loadMat(src), rvec, tvec, camera_mat, cv::Mat(), dst);
ASSERT_EQ(1, dst.rows);
ASSERT_EQ(MatType(CV_32FC2), MatType(dst.type()));
@ -147,15 +147,15 @@ INSTANTIATE_TEST_CASE_P(GPU_Calib3D, ProjectPoints, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// SolvePnPRansac
struct SolvePnPRansac : testing::TestWithParam<cv::gpu::DeviceInfo>
struct SolvePnPRansac : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -177,7 +177,7 @@ GPU_TEST_P(SolvePnPRansac, Accuracy)
cv::Mat rvec, tvec;
std::vector<int> inliers;
cv::gpu::solvePnPRansac(object, cv::Mat(1, (int)image_vec.size(), CV_32FC2, &image_vec[0]),
cv::cuda::solvePnPRansac(object, cv::Mat(1, (int)image_vec.size(), CV_32FC2, &image_vec[0]),
camera_mat, cv::Mat(1, 8, CV_32F, cv::Scalar::all(0)),
rvec, tvec, false, 200, 2.f, 100, &inliers);

View File

@ -47,9 +47,9 @@
using namespace std;
using namespace cv;
struct CompactPoints : testing::TestWithParam<gpu::DeviceInfo>
struct CompactPoints : testing::TestWithParam<cuda::DeviceInfo>
{
virtual void SetUp() { gpu::setDevice(GetParam().deviceID()); }
virtual void SetUp() { cuda::setDevice(GetParam().deviceID()); }
};
GPU_TEST_P(CompactPoints, CanCompactizeSmallInput)
@ -69,8 +69,8 @@ GPU_TEST_P(CompactPoints, CanCompactizeSmallInput)
mask.at<uchar>(0,1) = 0;
mask.at<uchar>(0,2) = 1;
gpu::GpuMat dsrc0(src0), dsrc1(src1), dmask(mask);
gpu::compactPoints(dsrc0, dsrc1, dmask);
cuda::GpuMat dsrc0(src0), dsrc1(src1), dmask(mask);
cuda::compactPoints(dsrc0, dsrc1, dmask);
dsrc0.download(src0);
dsrc1.download(src1);

View File

@ -49,9 +49,9 @@ using namespace cvtest;
////////////////////////////////////////////////////////////////////////////////
// SetTo
PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
PARAM_TEST_CASE(SetTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
@ -63,7 +63,7 @@ PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -71,7 +71,7 @@ GPU_TEST_P(SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(zero);
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
@ -81,11 +81,11 @@ GPU_TEST_P(SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
@ -95,7 +95,7 @@ GPU_TEST_P(SetTo, SameVal)
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
@ -106,11 +106,11 @@ GPU_TEST_P(SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
}
catch (const cv::Exception& e)
@ -120,7 +120,7 @@ GPU_TEST_P(SetTo, DifferentVal)
}
else
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val);
EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
@ -133,11 +133,11 @@ GPU_TEST_P(SetTo, Masked)
cv::Mat mat_gold = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat mat = createMat(size, type, useRoi);
cv::cuda::GpuMat mat = createMat(size, type, useRoi);
mat.setTo(val, loadMat(mask));
}
catch (const cv::Exception& e)
@ -147,7 +147,7 @@ GPU_TEST_P(SetTo, Masked)
}
else
{
cv::gpu::GpuMat mat = loadMat(mat_gold, useRoi);
cv::cuda::GpuMat mat = loadMat(mat_gold, useRoi);
mat.setTo(val, loadMat(mask, useRoi));
mat_gold.setTo(val, mask);
@ -165,9 +165,9 @@ INSTANTIATE_TEST_CASE_P(GPU_GpuMat, SetTo, testing::Combine(
////////////////////////////////////////////////////////////////////////////////
// CopyTo
PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
PARAM_TEST_CASE(CopyTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
@ -180,7 +180,7 @@ PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -188,8 +188,8 @@ GPU_TEST_P(CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, type, useRoi);
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = createMat(size, type, useRoi);
d_src.copyTo(dst);
EXPECT_MAT_NEAR(src, dst, 0.0);
@ -200,12 +200,12 @@ GPU_TEST_P(CopyTo, Masked)
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
cv::cuda::GpuMat d_src = loadMat(src);
cv::cuda::GpuMat dst;
d_src.copyTo(dst, loadMat(mask, useRoi));
}
catch (const cv::Exception& e)
@ -215,8 +215,8 @@ GPU_TEST_P(CopyTo, Masked)
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
d_src.copyTo(dst, loadMat(mask, useRoi));
cv::Mat dst_gold = cv::Mat::zeros(size, type);
@ -235,9 +235,9 @@ INSTANTIATE_TEST_CASE_P(GPU_GpuMat, CopyTo, testing::Combine(
////////////////////////////////////////////////////////////////////////////////
// ConvertTo
PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
PARAM_TEST_CASE(ConvertTo, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
cv::Size size;
int depth1;
int depth2;
@ -251,7 +251,7 @@ PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, Us
depth2 = GET_PARAM(3);
useRoi = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -259,12 +259,12 @@ GPU_TEST_P(ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
cv::cuda::GpuMat d_src = loadMat(src);
cv::cuda::GpuMat dst;
d_src.convertTo(dst, depth2);
}
catch (const cv::Exception& e)
@ -274,8 +274,8 @@ GPU_TEST_P(ConvertTo, WithOutScaling)
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2);
cv::Mat dst_gold;
@ -291,12 +291,12 @@ GPU_TEST_P(ConvertTo, WithScaling)
double a = randomDouble(0.0, 1.0);
double b = randomDouble(-10.0, 10.0);
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::gpu::NATIVE_DOUBLE))
if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
{
try
{
cv::gpu::GpuMat d_src = loadMat(src);
cv::gpu::GpuMat dst;
cv::cuda::GpuMat d_src = loadMat(src);
cv::cuda::GpuMat dst;
d_src.convertTo(dst, depth2, a, b);
}
catch (const cv::Exception& e)
@ -306,8 +306,8 @@ GPU_TEST_P(ConvertTo, WithScaling)
}
else
{
cv::gpu::GpuMat d_src = loadMat(src, useRoi);
cv::gpu::GpuMat dst = createMat(size, depth2, useRoi);
cv::cuda::GpuMat d_src = loadMat(src, useRoi);
cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
d_src.convertTo(dst, depth2, a, b);
cv::Mat dst_gold;
@ -327,29 +327,29 @@ INSTANTIATE_TEST_CASE_P(GPU_GpuMat, ConvertTo, testing::Combine(
////////////////////////////////////////////////////////////////////////////////
// ensureSizeIsEnough
struct EnsureSizeIsEnough : testing::TestWithParam<cv::gpu::DeviceInfo>
struct EnsureSizeIsEnough : testing::TestWithParam<cv::cuda::DeviceInfo>
{
virtual void SetUp()
{
cv::gpu::DeviceInfo devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(EnsureSizeIsEnough, BufferReuse)
{
cv::gpu::GpuMat buffer(100, 100, CV_8U);
cv::gpu::GpuMat old = buffer;
cv::cuda::GpuMat buffer(100, 100, CV_8U);
cv::cuda::GpuMat old = buffer;
// don't reallocate memory
cv::gpu::ensureSizeIsEnough(10, 20, CV_8U, buffer);
cv::cuda::ensureSizeIsEnough(10, 20, CV_8U, buffer);
EXPECT_EQ(10, buffer.rows);
EXPECT_EQ(20, buffer.cols);
EXPECT_EQ(CV_8UC1, buffer.type());
EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
// don't reallocate memory
cv::gpu::ensureSizeIsEnough(20, 30, CV_8U, buffer);
cv::cuda::ensureSizeIsEnough(20, 30, CV_8U, buffer);
EXPECT_EQ(20, buffer.rows);
EXPECT_EQ(30, buffer.cols);
EXPECT_EQ(CV_8UC1, buffer.type());

View File

@ -151,14 +151,14 @@ namespace
};
}
struct Labeling : testing::TestWithParam<cv::gpu::DeviceInfo>
struct Labeling : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
cv::Mat loat_image()
@ -179,15 +179,15 @@ GPU_TEST_P(Labeling, DISABLED_ConnectedComponents)
GreedyLabeling host(image);
host(host._labels);
cv::gpu::GpuMat mask;
cv::cuda::GpuMat mask;
mask.create(image.rows, image.cols, CV_8UC1);
cv::gpu::GpuMat components;
cv::cuda::GpuMat components;
components.create(image.rows, image.cols, CV_32SC1);
cv::gpu::connectivityMask(cv::gpu::GpuMat(image), mask, cv::Scalar::all(0), cv::Scalar::all(2));
cv::cuda::connectivityMask(cv::cuda::GpuMat(image), mask, cv::Scalar::all(0), cv::Scalar::all(2));
cv::gpu::labelComponents(mask, components);
cv::cuda::labelComponents(mask, components);
host.checkCorrectness(cv::Mat(components));
}

View File

@ -48,9 +48,9 @@ using namespace cvtest;
//#define DUMP
struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
struct HOG : testing::TestWithParam<cv::cuda::DeviceInfo>, cv::cuda::HOGDescriptor
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
#ifdef DUMP
std::ofstream f;
@ -68,7 +68,7 @@ struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
#ifdef DUMP
@ -127,7 +127,7 @@ struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
void testDetect(const cv::Mat& img)
{
gamma_correction = false;
setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
setSVMDetector(cv::cuda::HOGDescriptor::getDefaultPeopleDetector());
std::vector<cv::Point> locations;
@ -212,10 +212,10 @@ GPU_TEST_P(HOG, GetDescriptors)
cv::Mat img;
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
cv::gpu::GpuMat d_img(img);
cv::cuda::GpuMat d_img(img);
// Convert train images into feature vectors (train table)
cv::gpu::GpuMat descriptors, descriptors_by_cols;
cv::cuda::GpuMat descriptors, descriptors_by_cols;
getDescriptors(d_img, win_size, descriptors, DESCR_FORMAT_ROW_BY_ROW);
getDescriptors(d_img, win_size, descriptors_by_cols, DESCR_FORMAT_COL_BY_COL);
@ -251,38 +251,38 @@ GPU_TEST_P(HOG, GetDescriptors)
img_rgb = readImage("hog/positive1.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
computeBlockHistograms(cv::cuda::GpuMat(img));
// Everything is fine with interpolation for left top subimage
ASSERT_EQ(0.0, cv::norm((cv::Mat)block_hists, (cv::Mat)descriptors.rowRange(0, 1)));
img_rgb = readImage("hog/positive2.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
computeBlockHistograms(cv::cuda::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(1, 2)));
img_rgb = readImage("hog/negative1.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
computeBlockHistograms(cv::cuda::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(2, 3)));
img_rgb = readImage("hog/negative2.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
computeBlockHistograms(cv::cuda::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(3, 4)));
img_rgb = readImage("hog/positive3.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
computeBlockHistograms(cv::cuda::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(4, 5)));
img_rgb = readImage("hog/negative3.png");
ASSERT_TRUE(!img_rgb.empty());
cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA);
computeBlockHistograms(cv::gpu::GpuMat(img));
computeBlockHistograms(cv::cuda::GpuMat(img));
compare_inner_parts(cv::Mat(block_hists), cv::Mat(descriptors.rowRange(5, 6)));
}
@ -290,15 +290,15 @@ INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, HOG, ALL_DEVICES);
//============== caltech hog tests =====================//
struct CalTech : public ::testing::TestWithParam<std::tr1::tuple<cv::gpu::DeviceInfo, std::string> >
struct CalTech : public ::testing::TestWithParam<std::tr1::tuple<cv::cuda::DeviceInfo, std::string> >
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
cv::Mat img;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
img = readImage(GET_PARAM(1), cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
@ -307,11 +307,11 @@ struct CalTech : public ::testing::TestWithParam<std::tr1::tuple<cv::gpu::Device
GPU_TEST_P(CalTech, HOG)
{
cv::gpu::GpuMat d_img(img);
cv::cuda::GpuMat d_img(img);
cv::Mat markedImage(img.clone());
cv::gpu::HOGDescriptor d_hog;
d_hog.setSVMDetector(cv::gpu::HOGDescriptor::getDefaultPeopleDetector());
cv::cuda::HOGDescriptor d_hog;
d_hog.setSVMDetector(cv::cuda::HOGDescriptor::getDefaultPeopleDetector());
d_hog.nlevels = d_hog.nlevels + 32;
std::vector<cv::Rect> found_locations;
@ -341,20 +341,20 @@ INSTANTIATE_TEST_CASE_P(detect, CalTech, testing::Combine(ALL_DEVICES,
//////////////////////////////////////////////////////////////////////////////////////////
/// LBP classifier
PARAM_TEST_CASE(LBP_Read_classifier, cv::gpu::DeviceInfo, int)
PARAM_TEST_CASE(LBP_Read_classifier, cv::cuda::DeviceInfo, int)
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(LBP_Read_classifier, Accuracy)
{
cv::gpu::CascadeClassifier_GPU classifier;
cv::cuda::CascadeClassifier_GPU classifier;
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
ASSERT_TRUE(classifier.load(classifierXmlPath));
}
@ -363,14 +363,14 @@ INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, LBP_Read_classifier,
testing::Combine(ALL_DEVICES, testing::Values<int>(0)));
PARAM_TEST_CASE(LBP_classify, cv::gpu::DeviceInfo, int)
PARAM_TEST_CASE(LBP_classify, cv::cuda::DeviceInfo, int)
{
cv::gpu::DeviceInfo devInfo;
cv::cuda::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::setDevice(devInfo.deviceID());
}
};
@ -396,11 +396,11 @@ GPU_TEST_P(LBP_classify, Accuracy)
for (; it != rects.end(); ++it)
cv::rectangle(markedImage, *it, cv::Scalar(255, 0, 0));
cv::gpu::CascadeClassifier_GPU gpuClassifier;
cv::cuda::CascadeClassifier_GPU gpuClassifier;
ASSERT_TRUE(gpuClassifier.load(classifierXmlPath));
cv::gpu::GpuMat gpu_rects;
cv::gpu::GpuMat tested(grey);
cv::cuda::GpuMat gpu_rects;
cv::cuda::GpuMat tested(grey);
int count = gpuClassifier.detectMultiScale(tested, gpu_rects);
#if defined (LOG_CASCADE_STATISTIC)

View File

@ -104,7 +104,7 @@ GPU_TEST_P(Buffer, ConstructorFromMat)
GPU_TEST_P(Buffer, ConstructorFromGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::gpu::GpuMat d_gold(gold);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Buffer buf(d_gold, cv::ogl::Buffer::ARRAY_BUFFER);
@ -152,7 +152,7 @@ GPU_TEST_P(Buffer, CopyFromMat)
GPU_TEST_P(Buffer, CopyFromGpuMat)
{
cv::Mat gold = randomMat(size, type);
cv::gpu::GpuMat d_gold(gold);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Buffer buf;
buf.copyFrom(d_gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
@ -185,7 +185,7 @@ GPU_TEST_P(Buffer, CopyToGpuMat)
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::gpu::GpuMat dst;
cv::cuda::GpuMat dst;
buf.copyTo(dst);
EXPECT_MAT_NEAR(gold, dst, 0);
@ -261,7 +261,7 @@ GPU_TEST_P(Buffer, MapDevice)
cv::ogl::Buffer buf(gold, cv::ogl::Buffer::ARRAY_BUFFER, true);
cv::gpu::GpuMat dst = buf.mapDevice();
cv::cuda::GpuMat dst = buf.mapDevice();
EXPECT_MAT_NEAR(gold, dst, 0);
@ -335,7 +335,7 @@ GPU_TEST_P(Texture2D, ConstructorFromMat)
GPU_TEST_P(Texture2D, ConstructorFromGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::gpu::GpuMat d_gold(gold);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Texture2D tex(d_gold, true);
@ -395,7 +395,7 @@ GPU_TEST_P(Texture2D, CopyFromMat)
GPU_TEST_P(Texture2D, CopyFromGpuMat)
{
cv::Mat gold = randomMat(size, type, 0, depth == CV_8U ? 255 : 1);
cv::gpu::GpuMat d_gold(gold);
cv::cuda::GpuMat d_gold(gold);
cv::ogl::Texture2D tex;
tex.copyFrom(d_gold, true);
@ -426,7 +426,7 @@ GPU_TEST_P(Texture2D, CopyToGpuMat)
cv::ogl::Texture2D tex(gold, true);
cv::gpu::GpuMat dst;
cv::cuda::GpuMat dst;
tex.copyTo(dst, depth);
EXPECT_MAT_NEAR(gold, dst, 1e-2);

View File

@ -50,20 +50,20 @@
using namespace cvtest;
struct Async : testing::TestWithParam<cv::gpu::DeviceInfo>
struct Async : testing::TestWithParam<cv::cuda::DeviceInfo>
{
cv::gpu::CudaMem src;
cv::gpu::GpuMat d_src;
cv::cuda::CudaMem src;
cv::cuda::GpuMat d_src;
cv::gpu::CudaMem dst;
cv::gpu::GpuMat d_dst;
cv::cuda::CudaMem dst;
cv::cuda::GpuMat d_dst;
virtual void SetUp()
{
cv::gpu::DeviceInfo devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
cv::cuda::DeviceInfo devInfo = GetParam();
cv::cuda::setDevice(devInfo.deviceID());
src = cv::gpu::CudaMem(cv::gpu::CudaMem::PAGE_LOCKED);
src = cv::cuda::CudaMem(cv::cuda::CudaMem::PAGE_LOCKED);
cv::Mat m = randomMat(cv::Size(128, 128), CV_8UC1);
m.copyTo(src);
@ -76,8 +76,8 @@ void checkMemSet(int status, void* userData)
Async* test = reinterpret_cast<Async*>(userData);
cv::gpu::CudaMem src = test->src;
cv::gpu::CudaMem dst = test->dst;
cv::cuda::CudaMem src = test->src;
cv::cuda::CudaMem dst = test->dst;
cv::Mat dst_gold = cv::Mat::zeros(src.size(), src.type());
@ -86,7 +86,7 @@ void checkMemSet(int status, void* userData)
GPU_TEST_P(Async, MemSet)
{
cv::gpu::Stream stream;
cv::cuda::Stream stream;
d_dst.upload(src);
@ -105,8 +105,8 @@ void checkConvert(int status, void* userData)
Async* test = reinterpret_cast<Async*>(userData);
cv::gpu::CudaMem src = test->src;
cv::gpu::CudaMem dst = test->dst;
cv::cuda::CudaMem src = test->src;
cv::cuda::CudaMem dst = test->dst;
cv::Mat dst_gold;
src.createMatHeader().convertTo(dst_gold, CV_32S);
@ -116,7 +116,7 @@ void checkConvert(int status, void* userData)
GPU_TEST_P(Async, Convert)
{
cv::gpu::Stream stream;
cv::cuda::Stream stream;
d_src.upload(src, stream);
d_src.convertTo(d_dst, CV_32S, stream);

View File

@ -49,7 +49,7 @@
#include "opencv2/core/gpu.hpp"
namespace cv { namespace gpu {
namespace cv { namespace cuda {
//! adds one matrix to another (dst = src1 + src2)
CV_EXPORTS void add(InputArray src1, InputArray src2, OutputArray dst, InputArray mask = noArray(), int dtype = -1, Stream& stream = Stream::Null());
@ -369,6 +369,6 @@ public:
CV_EXPORTS Ptr<Convolution> createConvolution(Size user_block_size = Size());
}} // namespace cv { namespace gpu {
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_GPUARITHM_HPP__ */

View File

@ -77,12 +77,12 @@ PERF_TEST_P(Sz_Type_Flags, GEMM,
{
declare.time(5.0);
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
const cv::gpu::GpuMat d_src3(src3);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
const cv::cuda::GpuMat d_src3(src3);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags);
TEST_CYCLE() cv::cuda::gemm(d_src1, d_src2, 1.0, d_src3, 1.0, dst, flags);
GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
@ -118,11 +118,11 @@ PERF_TEST_P(Sz_Flags, MulSpectrums,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_a(a);
const cv::gpu::GpuMat d_b(b);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_a(a);
const cv::cuda::GpuMat d_b(b);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::mulSpectrums(d_a, d_b, dst, flag);
TEST_CYCLE() cv::cuda::mulSpectrums(d_a, d_b, dst, flag);
GPU_SANITY_CHECK(dst);
}
@ -152,11 +152,11 @@ PERF_TEST_P(Sz, MulAndScaleSpectrums,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::mulAndScaleSpectrums(d_src1, d_src2, dst, cv::DFT_ROWS, scale, false);
TEST_CYCLE() cv::cuda::mulAndScaleSpectrums(d_src1, d_src2, dst, cv::DFT_ROWS, scale, false);
GPU_SANITY_CHECK(dst);
}
@ -183,10 +183,10 @@ PERF_TEST_P(Sz_Flags, Dft,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::dft(d_src, dst, size, flag);
TEST_CYCLE() cv::cuda::dft(d_src, dst, size, flag);
GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
@ -222,15 +222,15 @@ PERF_TEST_P(Sz_KernelSz_Ccorr, Convolve,
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat d_image = cv::gpu::createContinuous(size, CV_32FC1);
cv::cuda::GpuMat d_image = cv::cuda::createContinuous(size, CV_32FC1);
d_image.upload(image);
cv::gpu::GpuMat d_templ = cv::gpu::createContinuous(templ_size, templ_size, CV_32FC1);
cv::cuda::GpuMat d_templ = cv::cuda::createContinuous(templ_size, templ_size, CV_32FC1);
d_templ.upload(templ);
cv::Ptr<cv::gpu::Convolution> convolution = cv::gpu::createConvolution();
cv::Ptr<cv::cuda::Convolution> convolution = cv::cuda::createConvolution();
cv::gpu::GpuMat dst;
cv::cuda::GpuMat dst;
TEST_CYCLE() convolution->convolve(d_image, d_templ, dst, ccorr);
@ -262,11 +262,11 @@ PERF_TEST_P(Sz, Integral,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
cv::cuda::GpuMat d_buf;
TEST_CYCLE() cv::gpu::integral(d_src, dst, d_buf);
TEST_CYCLE() cv::cuda::integral(d_src, dst, d_buf);
GPU_SANITY_CHECK(dst);
}
@ -293,10 +293,10 @@ PERF_TEST_P(Sz, IntegralSqr,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst, buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst, buf;
TEST_CYCLE() cv::gpu::sqrIntegral(d_src, dst, buf);
TEST_CYCLE() cv::cuda::sqrIntegral(d_src, dst, buf);
GPU_SANITY_CHECK(dst);
}

View File

@ -69,13 +69,13 @@ PERF_TEST_P(Sz_Depth_Cn, Merge,
if (PERF_RUN_GPU())
{
std::vector<cv::gpu::GpuMat> d_src(channels);
std::vector<cv::cuda::GpuMat> d_src(channels);
for (int i = 0; i < channels; ++i)
d_src[i].upload(src[i]);
cv::gpu::GpuMat dst;
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::merge(d_src, dst);
TEST_CYCLE() cv::cuda::merge(d_src, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -106,13 +106,13 @@ PERF_TEST_P(Sz_Depth_Cn, Split,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
std::vector<cv::gpu::GpuMat> dst;
const cv::cuda::GpuMat d_src(src);
std::vector<cv::cuda::GpuMat> dst;
TEST_CYCLE() cv::gpu::split(d_src, dst);
TEST_CYCLE() cv::cuda::split(d_src, dst);
const cv::gpu::GpuMat& dst0 = dst[0];
const cv::gpu::GpuMat& dst1 = dst[1];
const cv::cuda::GpuMat& dst0 = dst[0];
const cv::cuda::GpuMat& dst1 = dst[1];
GPU_SANITY_CHECK(dst0, 1e-10);
GPU_SANITY_CHECK(dst1, 1e-10);
@ -146,10 +146,10 @@ PERF_TEST_P(Sz_Type, Transpose,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::transpose(d_src, dst);
TEST_CYCLE() cv::cuda::transpose(d_src, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -189,10 +189,10 @@ PERF_TEST_P(Sz_Depth_Cn_Code, Flip,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::flip(d_src, dst, flipCode);
TEST_CYCLE() cv::cuda::flip(d_src, dst, flipCode);
GPU_SANITY_CHECK(dst);
}
@ -224,10 +224,10 @@ PERF_TEST_P(Sz_Type, LutOneChannel,
if (PERF_RUN_GPU())
{
cv::Ptr<cv::gpu::LookUpTable> lutAlg = cv::gpu::createLookUpTable(lut);
cv::Ptr<cv::cuda::LookUpTable> lutAlg = cv::cuda::createLookUpTable(lut);
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() lutAlg->transform(d_src, dst);
@ -261,10 +261,10 @@ PERF_TEST_P(Sz_Type, LutMultiChannel,
if (PERF_RUN_GPU())
{
cv::Ptr<cv::gpu::LookUpTable> lutAlg = cv::gpu::createLookUpTable(lut);
cv::Ptr<cv::cuda::LookUpTable> lutAlg = cv::cuda::createLookUpTable(lut);
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() lutAlg->transform(d_src, dst);
@ -303,10 +303,10 @@ PERF_TEST_P(Sz_Depth_Cn_Border, CopyMakeBorder,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::copyMakeBorder(d_src, dst, 5, 5, 5, 5, borderMode);
TEST_CYCLE() cv::cuda::copyMakeBorder(d_src, dst, 5, 5, 5, 5, borderMode);
GPU_SANITY_CHECK(dst);
}

View File

@ -66,11 +66,11 @@ PERF_TEST_P(Sz_Depth, AddMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::add(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::add(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -102,10 +102,10 @@ PERF_TEST_P(Sz_Depth, AddScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::add(d_src, s, dst);
TEST_CYCLE() cv::cuda::add(d_src, s, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -137,11 +137,11 @@ PERF_TEST_P(Sz_Depth, SubtractMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::subtract(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::subtract(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -173,10 +173,10 @@ PERF_TEST_P(Sz_Depth, SubtractScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::subtract(d_src, s, dst);
TEST_CYCLE() cv::cuda::subtract(d_src, s, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -208,11 +208,11 @@ PERF_TEST_P(Sz_Depth, MultiplyMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::multiply(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::multiply(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst, 1e-6);
}
@ -244,10 +244,10 @@ PERF_TEST_P(Sz_Depth, MultiplyScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::multiply(d_src, s, dst);
TEST_CYCLE() cv::cuda::multiply(d_src, s, dst);
GPU_SANITY_CHECK(dst, 1e-6);
}
@ -279,11 +279,11 @@ PERF_TEST_P(Sz_Depth, DivideMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::divide(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::divide(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst, 1e-6);
}
@ -315,10 +315,10 @@ PERF_TEST_P(Sz_Depth, DivideScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::divide(d_src, s, dst);
TEST_CYCLE() cv::cuda::divide(d_src, s, dst);
GPU_SANITY_CHECK(dst, 1e-6);
}
@ -350,10 +350,10 @@ PERF_TEST_P(Sz_Depth, DivideScalarInv,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::divide(s[0], d_src, dst);
TEST_CYCLE() cv::cuda::divide(s[0], d_src, dst);
GPU_SANITY_CHECK(dst, 1e-6);
}
@ -385,11 +385,11 @@ PERF_TEST_P(Sz_Depth, AbsDiffMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::absdiff(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::absdiff(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -421,10 +421,10 @@ PERF_TEST_P(Sz_Depth, AbsDiffScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::absdiff(d_src, s, dst);
TEST_CYCLE() cv::cuda::absdiff(d_src, s, dst);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -453,10 +453,10 @@ PERF_TEST_P(Sz_Depth, Abs,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::abs(d_src, dst);
TEST_CYCLE() cv::cuda::abs(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -481,10 +481,10 @@ PERF_TEST_P(Sz_Depth, Sqr,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::sqr(d_src, dst);
TEST_CYCLE() cv::cuda::sqr(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -509,10 +509,10 @@ PERF_TEST_P(Sz_Depth, Sqrt,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::sqrt(d_src, dst);
TEST_CYCLE() cv::cuda::sqrt(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -541,10 +541,10 @@ PERF_TEST_P(Sz_Depth, Log,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::log(d_src, dst);
TEST_CYCLE() cv::cuda::log(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -573,10 +573,10 @@ PERF_TEST_P(Sz_Depth, Exp,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::exp(d_src, dst);
TEST_CYCLE() cv::cuda::exp(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -609,10 +609,10 @@ PERF_TEST_P(Sz_Depth_Power, Pow,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::pow(d_src, power, dst);
TEST_CYCLE() cv::cuda::pow(d_src, power, dst);
GPU_SANITY_CHECK(dst);
}
@ -650,11 +650,11 @@ PERF_TEST_P(Sz_Depth_Code, CompareMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::compare(d_src1, d_src2, dst, cmp_code);
TEST_CYCLE() cv::cuda::compare(d_src1, d_src2, dst, cmp_code);
GPU_SANITY_CHECK(dst);
}
@ -688,10 +688,10 @@ PERF_TEST_P(Sz_Depth_Code, CompareScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::compare(d_src, s, dst, cmp_code);
TEST_CYCLE() cv::cuda::compare(d_src, s, dst, cmp_code);
GPU_SANITY_CHECK(dst);
}
@ -720,10 +720,10 @@ PERF_TEST_P(Sz_Depth, BitwiseNot,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_not(d_src, dst);
TEST_CYCLE() cv::cuda::bitwise_not(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -755,11 +755,11 @@ PERF_TEST_P(Sz_Depth, BitwiseAndMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_and(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::bitwise_and(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -796,10 +796,10 @@ PERF_TEST_P(Sz_Depth_Cn, BitwiseAndScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_and(d_src, is, dst);
TEST_CYCLE() cv::cuda::bitwise_and(d_src, is, dst);
GPU_SANITY_CHECK(dst);
}
@ -831,11 +831,11 @@ PERF_TEST_P(Sz_Depth, BitwiseOrMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_or(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::bitwise_or(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -872,10 +872,10 @@ PERF_TEST_P(Sz_Depth_Cn, BitwiseOrScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_or(d_src, is, dst);
TEST_CYCLE() cv::cuda::bitwise_or(d_src, is, dst);
GPU_SANITY_CHECK(dst);
}
@ -907,11 +907,11 @@ PERF_TEST_P(Sz_Depth, BitwiseXorMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_xor(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::bitwise_xor(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -948,10 +948,10 @@ PERF_TEST_P(Sz_Depth_Cn, BitwiseXorScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::bitwise_xor(d_src, is, dst);
TEST_CYCLE() cv::cuda::bitwise_xor(d_src, is, dst);
GPU_SANITY_CHECK(dst);
}
@ -986,10 +986,10 @@ PERF_TEST_P(Sz_Depth_Cn, RShift,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::rshift(d_src, val, dst);
TEST_CYCLE() cv::cuda::rshift(d_src, val, dst);
GPU_SANITY_CHECK(dst);
}
@ -1020,10 +1020,10 @@ PERF_TEST_P(Sz_Depth_Cn, LShift,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::lshift(d_src, val, dst);
TEST_CYCLE() cv::cuda::lshift(d_src, val, dst);
GPU_SANITY_CHECK(dst);
}
@ -1051,11 +1051,11 @@ PERF_TEST_P(Sz_Depth, MinMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::min(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::min(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -1087,10 +1087,10 @@ PERF_TEST_P(Sz_Depth, MinScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::min(d_src, val[0], dst);
TEST_CYCLE() cv::cuda::min(d_src, val[0], dst);
GPU_SANITY_CHECK(dst);
}
@ -1122,11 +1122,11 @@ PERF_TEST_P(Sz_Depth, MaxMat,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::max(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::max(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -1158,10 +1158,10 @@ PERF_TEST_P(Sz_Depth, MaxScalar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::max(d_src, val[0], dst);
TEST_CYCLE() cv::cuda::max(d_src, val[0], dst);
GPU_SANITY_CHECK(dst);
}
@ -1199,11 +1199,11 @@ PERF_TEST_P(Sz_3Depth, AddWeighted,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::addWeighted(d_src1, 0.5, d_src2, 0.5, 10.0, dst, dst_depth);
TEST_CYCLE() cv::cuda::addWeighted(d_src1, 0.5, d_src2, 0.5, 10.0, dst, dst_depth);
GPU_SANITY_CHECK(dst, 1e-10);
}
@ -1230,10 +1230,10 @@ PERF_TEST_P(Sz, MagnitudeComplex,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::magnitude(d_src, dst);
TEST_CYCLE() cv::cuda::magnitude(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -1263,10 +1263,10 @@ PERF_TEST_P(Sz, MagnitudeSqrComplex,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::magnitudeSqr(d_src, dst);
TEST_CYCLE() cv::cuda::magnitudeSqr(d_src, dst);
GPU_SANITY_CHECK(dst);
}
@ -1292,11 +1292,11 @@ PERF_TEST_P(Sz, Magnitude,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::magnitude(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::magnitude(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -1326,11 +1326,11 @@ PERF_TEST_P(Sz, MagnitudeSqr,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::magnitudeSqr(d_src1, d_src2, dst);
TEST_CYCLE() cv::cuda::magnitudeSqr(d_src1, d_src2, dst);
GPU_SANITY_CHECK(dst);
}
@ -1360,11 +1360,11 @@ PERF_TEST_P(Sz_AngleInDegrees, Phase,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::phase(d_src1, d_src2, dst, angleInDegrees);
TEST_CYCLE() cv::cuda::phase(d_src1, d_src2, dst, angleInDegrees);
GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
@ -1396,12 +1396,12 @@ PERF_TEST_P(Sz_AngleInDegrees, CartToPolar,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat magnitude;
cv::gpu::GpuMat angle;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat magnitude;
cv::cuda::GpuMat angle;
TEST_CYCLE() cv::gpu::cartToPolar(d_src1, d_src2, magnitude, angle, angleInDegrees);
TEST_CYCLE() cv::cuda::cartToPolar(d_src1, d_src2, magnitude, angle, angleInDegrees);
GPU_SANITY_CHECK(magnitude);
GPU_SANITY_CHECK(angle, 1e-6, ERROR_RELATIVE);
@ -1436,12 +1436,12 @@ PERF_TEST_P(Sz_AngleInDegrees, PolarToCart,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_magnitude(magnitude);
const cv::gpu::GpuMat d_angle(angle);
cv::gpu::GpuMat x;
cv::gpu::GpuMat y;
const cv::cuda::GpuMat d_magnitude(magnitude);
const cv::cuda::GpuMat d_angle(angle);
cv::cuda::GpuMat x;
cv::cuda::GpuMat y;
TEST_CYCLE() cv::gpu::polarToCart(d_magnitude, d_angle, x, y, angleInDegrees);
TEST_CYCLE() cv::cuda::polarToCart(d_magnitude, d_angle, x, y, angleInDegrees);
GPU_SANITY_CHECK(x);
GPU_SANITY_CHECK(y);
@ -1479,10 +1479,10 @@ PERF_TEST_P(Sz_Depth_Op, Threshold,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::threshold(d_src, dst, 100.0, 255.0, threshOp);
TEST_CYCLE() cv::cuda::threshold(d_src, dst, 100.0, 255.0, threshOp);
GPU_SANITY_CHECK(dst, 1e-10);
}

View File

@ -68,11 +68,11 @@ PERF_TEST_P(Sz_Depth_Norm, Norm,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
double gpu_dst;
TEST_CYCLE() gpu_dst = cv::gpu::norm(d_src, normType, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::norm(d_src, normType, d_buf);
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
}
@ -106,12 +106,12 @@ PERF_TEST_P(Sz_Norm, NormDiff,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src1(src1);
const cv::gpu::GpuMat d_src2(src2);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src1(src1);
const cv::cuda::GpuMat d_src2(src2);
cv::cuda::GpuMat d_buf;
double gpu_dst;
TEST_CYCLE() gpu_dst = cv::gpu::norm(d_src1, d_src2, d_buf, normType);
TEST_CYCLE() gpu_dst = cv::cuda::norm(d_src1, d_src2, d_buf, normType);
SANITY_CHECK(gpu_dst);
@ -145,11 +145,11 @@ PERF_TEST_P(Sz_Depth_Cn, Sum,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_dst;
TEST_CYCLE() gpu_dst = cv::gpu::sum(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::sum(d_src, d_buf);
SANITY_CHECK(gpu_dst, 1e-5, ERROR_RELATIVE);
}
@ -182,11 +182,11 @@ PERF_TEST_P(Sz_Depth_Cn, SumAbs,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_dst;
TEST_CYCLE() gpu_dst = cv::gpu::absSum(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::absSum(d_src, d_buf);
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
}
@ -215,11 +215,11 @@ PERF_TEST_P(Sz_Depth_Cn, SumSqr,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_dst;
TEST_CYCLE() gpu_dst = cv::gpu::sqrSum(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::sqrSum(d_src, d_buf);
SANITY_CHECK(gpu_dst, 1e-6, ERROR_RELATIVE);
}
@ -247,11 +247,11 @@ PERF_TEST_P(Sz_Depth, MinMax,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
double gpu_minVal, gpu_maxVal;
TEST_CYCLE() cv::gpu::minMax(d_src, &gpu_minVal, &gpu_maxVal, cv::gpu::GpuMat(), d_buf);
TEST_CYCLE() cv::cuda::minMax(d_src, &gpu_minVal, &gpu_maxVal, cv::cuda::GpuMat(), d_buf);
SANITY_CHECK(gpu_minVal, 1e-10);
SANITY_CHECK(gpu_maxVal, 1e-10);
@ -285,12 +285,12 @@ PERF_TEST_P(Sz_Depth, MinMaxLoc,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_valbuf, d_locbuf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_valbuf, d_locbuf;
double gpu_minVal, gpu_maxVal;
cv::Point gpu_minLoc, gpu_maxLoc;
TEST_CYCLE() cv::gpu::minMaxLoc(d_src, &gpu_minVal, &gpu_maxVal, &gpu_minLoc, &gpu_maxLoc, cv::gpu::GpuMat(), d_valbuf, d_locbuf);
TEST_CYCLE() cv::cuda::minMaxLoc(d_src, &gpu_minVal, &gpu_maxVal, &gpu_minLoc, &gpu_maxLoc, cv::cuda::GpuMat(), d_valbuf, d_locbuf);
SANITY_CHECK(gpu_minVal, 1e-10);
SANITY_CHECK(gpu_maxVal, 1e-10);
@ -322,11 +322,11 @@ PERF_TEST_P(Sz_Depth, CountNonZero,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
int gpu_dst = 0;
TEST_CYCLE() gpu_dst = cv::gpu::countNonZero(d_src, d_buf);
TEST_CYCLE() gpu_dst = cv::cuda::countNonZero(d_src, d_buf);
SANITY_CHECK(gpu_dst);
}
@ -370,10 +370,10 @@ PERF_TEST_P(Sz_Depth_Cn_Code_Dim, Reduce,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
TEST_CYCLE() cv::gpu::reduce(d_src, dst, dim, reduceOp);
TEST_CYCLE() cv::cuda::reduce(d_src, dst, dim, reduceOp);
GPU_SANITY_CHECK(dst);
}
@ -412,11 +412,11 @@ PERF_TEST_P(Sz_Depth_NormType, Normalize,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
cv::gpu::GpuMat d_norm_buf, d_cvt_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat dst;
cv::cuda::GpuMat d_norm_buf, d_cvt_buf;
TEST_CYCLE() cv::gpu::normalize(d_src, dst, alpha, beta, norm_type, type, cv::gpu::GpuMat(), d_norm_buf, d_cvt_buf);
TEST_CYCLE() cv::cuda::normalize(d_src, dst, alpha, beta, norm_type, type, cv::cuda::GpuMat(), d_norm_buf, d_cvt_buf);
GPU_SANITY_CHECK(dst, 1e-6);
}
@ -444,12 +444,12 @@ PERF_TEST_P(Sz, MeanStdDev,
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_buf;
const cv::cuda::GpuMat d_src(src);
cv::cuda::GpuMat d_buf;
cv::Scalar gpu_mean;
cv::Scalar gpu_stddev;
TEST_CYCLE() cv::gpu::meanStdDev(d_src, gpu_mean, gpu_stddev, d_buf);
TEST_CYCLE() cv::cuda::meanStdDev(d_src, gpu_mean, gpu_stddev, d_buf);
SANITY_CHECK(gpu_mean);
SANITY_CHECK(gpu_stddev);

View File

@ -43,18 +43,18 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::gemm(InputArray, InputArray, double, InputArray, double, OutputArray, int, Stream&) { throw_no_cuda(); }
void cv::cuda::gemm(InputArray, InputArray, double, InputArray, double, OutputArray, int, Stream&) { throw_no_cuda(); }
void cv::gpu::mulSpectrums(InputArray, InputArray, OutputArray, int, bool, Stream&) { throw_no_cuda(); }
void cv::gpu::mulAndScaleSpectrums(InputArray, InputArray, OutputArray, int, float, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::mulSpectrums(InputArray, InputArray, OutputArray, int, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::mulAndScaleSpectrums(InputArray, InputArray, OutputArray, int, float, bool, Stream&) { throw_no_cuda(); }
void cv::gpu::dft(InputArray, OutputArray, Size, int, Stream&) { throw_no_cuda(); }
void cv::cuda::dft(InputArray, OutputArray, Size, int, Stream&) { throw_no_cuda(); }
Ptr<Convolution> cv::gpu::createConvolution(Size) { throw_no_cuda(); return Ptr<Convolution>(); }
Ptr<Convolution> cv::cuda::createConvolution(Size) { throw_no_cuda(); return Ptr<Convolution>(); }
#else /* !defined (HAVE_CUDA) */
@ -162,7 +162,7 @@ namespace
////////////////////////////////////////////////////////////////////////
// gemm
void cv::gpu::gemm(InputArray _src1, InputArray _src2, double alpha, InputArray _src3, double beta, OutputArray _dst, int flags, Stream& stream)
void cv::cuda::gemm(InputArray _src1, InputArray _src2, double alpha, InputArray _src3, double beta, OutputArray _dst, int flags, Stream& stream)
{
#ifndef HAVE_CUBLAS
(void) _src1;
@ -221,7 +221,7 @@ void cv::gpu::gemm(InputArray _src1, InputArray _src2, double alpha, InputArray
{
if (tr3)
{
gpu::transpose(src3, dst, stream);
cuda::transpose(src3, dst, stream);
}
else
{
@ -297,7 +297,7 @@ void cv::gpu::gemm(InputArray _src1, InputArray _src2, double alpha, InputArray
#ifdef HAVE_CUFFT
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream);
@ -306,7 +306,7 @@ namespace cv { namespace gpu { namespace cudev
#endif
void cv::gpu::mulSpectrums(InputArray _src1, InputArray _src2, OutputArray _dst, int flags, bool conjB, Stream& stream)
void cv::cuda::mulSpectrums(InputArray _src1, InputArray _src2, OutputArray _dst, int flags, bool conjB, Stream& stream)
{
#ifndef HAVE_CUFFT
(void) _src1;
@ -341,7 +341,7 @@ void cv::gpu::mulSpectrums(InputArray _src1, InputArray _src2, OutputArray _dst,
#ifdef HAVE_CUFFT
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream);
@ -350,7 +350,7 @@ namespace cv { namespace gpu { namespace cudev
#endif
void cv::gpu::mulAndScaleSpectrums(InputArray _src1, InputArray _src2, OutputArray _dst, int flags, float scale, bool conjB, Stream& stream)
void cv::cuda::mulAndScaleSpectrums(InputArray _src1, InputArray _src2, OutputArray _dst, int flags, float scale, bool conjB, Stream& stream)
{
#ifndef HAVE_CUFFT
(void) _src1;
@ -384,7 +384,7 @@ void cv::gpu::mulAndScaleSpectrums(InputArray _src1, InputArray _src2, OutputArr
//////////////////////////////////////////////////////////////////////////////
// dft
void cv::gpu::dft(InputArray _src, OutputArray _dst, Size dft_size, int flags, Stream& stream)
void cv::cuda::dft(InputArray _src, OutputArray _dst, Size dft_size, int flags, Stream& stream)
{
#ifndef HAVE_CUFFT
(void) _src;
@ -478,7 +478,7 @@ void cv::gpu::dft(InputArray _src, OutputArray _dst, Size dft_size, int flags, S
cufftSafeCall( cufftDestroy(plan) );
if (is_scaled_dft)
gpu::multiply(_dst, Scalar::all(1. / dft_size.area()), _dst, 1, -1, stream);
cuda::multiply(_dst, Scalar::all(1. / dft_size.area()), _dst, 1, -1, stream);
#endif
}
@ -580,7 +580,7 @@ namespace
cufftSafeCall( cufftSetStream(planC2R, stream) );
GpuMat templ_roi(templ.size(), CV_32FC1, templ.data, templ.step);
gpu::copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
cuda::copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
templ_block.cols - templ_roi.cols, 0, Scalar(), _stream);
cufftSafeCall( cufftExecR2C(planR2C, templ_block.ptr<cufftReal>(), templ_spect.ptr<cufftComplex>()) );
@ -594,12 +594,12 @@ namespace
std::min(y + dft_size.height, image.rows) - y);
GpuMat image_roi(image_roi_size, CV_32F, (void*)(image.ptr<float>(y) + x),
image.step);
gpu::copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows,
cuda::copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows,
0, image_block.cols - image_roi.cols, 0, Scalar(), _stream);
cufftSafeCall(cufftExecR2C(planR2C, image_block.ptr<cufftReal>(),
image_spect.ptr<cufftComplex>()));
gpu::mulAndScaleSpectrums(image_spect, templ_spect, result_spect, 0,
cuda::mulAndScaleSpectrums(image_spect, templ_spect, result_spect, 0,
1.f / dft_size.area(), ccorr, _stream);
cufftSafeCall(cufftExecC2R(planC2R, result_spect.ptr<cufftComplex>(),
result_data.ptr<cufftReal>()));
@ -622,7 +622,7 @@ namespace
#endif
Ptr<Convolution> cv::gpu::createConvolution(Size user_block_size)
Ptr<Convolution> cv::cuda::createConvolution(Size user_block_size)
{
#ifndef HAVE_CUFFT
(void) user_block_size;

View File

@ -43,30 +43,30 @@
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::merge(const GpuMat*, size_t, OutputArray, Stream&) { throw_no_cuda(); }
void cv::gpu::merge(const std::vector<GpuMat>&, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::merge(const GpuMat*, size_t, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::merge(const std::vector<GpuMat>&, OutputArray, Stream&) { throw_no_cuda(); }
void cv::gpu::split(InputArray, GpuMat*, Stream&) { throw_no_cuda(); }
void cv::gpu::split(InputArray, std::vector<GpuMat>&, Stream&) { throw_no_cuda(); }
void cv::cuda::split(InputArray, GpuMat*, Stream&) { throw_no_cuda(); }
void cv::cuda::split(InputArray, std::vector<GpuMat>&, Stream&) { throw_no_cuda(); }
void cv::gpu::transpose(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::transpose(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::gpu::flip(InputArray, OutputArray, int, Stream&) { throw_no_cuda(); }
void cv::cuda::flip(InputArray, OutputArray, int, Stream&) { throw_no_cuda(); }
Ptr<LookUpTable> cv::gpu::createLookUpTable(InputArray) { throw_no_cuda(); return Ptr<LookUpTable>(); }
Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray) { throw_no_cuda(); return Ptr<LookUpTable>(); }
void cv::gpu::copyMakeBorder(InputArray, OutputArray, int, int, int, int, int, Scalar, Stream&) { throw_no_cuda(); }
void cv::cuda::copyMakeBorder(InputArray, OutputArray, int, int, int, int, int, Scalar, Stream&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
////////////////////////////////////////////////////////////////////////
// merge/split
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace split_merge
{
@ -112,7 +112,7 @@ namespace
src_as_devmem[i] = src[i];
PtrStepSzb dst_as_devmem(dst);
cv::gpu::cudev::split_merge::merge(src_as_devmem, dst_as_devmem, (int)n, CV_ELEM_SIZE(depth), StreamAccessor::getStream(stream));
cv::cuda::cudev::split_merge::merge(src_as_devmem, dst_as_devmem, (int)n, CV_ELEM_SIZE(depth), StreamAccessor::getStream(stream));
}
}
@ -145,28 +145,28 @@ namespace
dst_as_devmem[i] = dst[i];
PtrStepSzb src_as_devmem(src);
cv::gpu::cudev::split_merge::split(src_as_devmem, dst_as_devmem, num_channels, src.elemSize1(), StreamAccessor::getStream(stream));
cv::cuda::cudev::split_merge::split(src_as_devmem, dst_as_devmem, num_channels, src.elemSize1(), StreamAccessor::getStream(stream));
}
}
void cv::gpu::merge(const GpuMat* src, size_t n, OutputArray dst, Stream& stream)
void cv::cuda::merge(const GpuMat* src, size_t n, OutputArray dst, Stream& stream)
{
merge_caller(src, n, dst, stream);
}
void cv::gpu::merge(const std::vector<GpuMat>& src, OutputArray dst, Stream& stream)
void cv::cuda::merge(const std::vector<GpuMat>& src, OutputArray dst, Stream& stream)
{
merge_caller(&src[0], src.size(), dst, stream);
}
void cv::gpu::split(InputArray _src, GpuMat* dst, Stream& stream)
void cv::cuda::split(InputArray _src, GpuMat* dst, Stream& stream)
{
GpuMat src = _src.getGpuMat();
split_caller(src, dst, stream);
}
void cv::gpu::split(InputArray _src, std::vector<GpuMat>& dst, Stream& stream)
void cv::cuda::split(InputArray _src, std::vector<GpuMat>& dst, Stream& stream)
{
GpuMat src = _src.getGpuMat();
dst.resize(src.channels());
@ -182,7 +182,7 @@ namespace arithm
template <typename T> void transpose(PtrStepSz<T> src, PtrStepSz<T> dst, cudaStream_t stream);
}
void cv::gpu::transpose(InputArray _src, OutputArray _dst, Stream& _stream)
void cv::cuda::transpose(InputArray _src, OutputArray _dst, Stream& _stream)
{
GpuMat src = _src.getGpuMat();
@ -263,7 +263,7 @@ namespace
};
}
void cv::gpu::flip(InputArray _src, OutputArray _dst, int flipCode, Stream& stream)
void cv::cuda::flip(InputArray _src, OutputArray _dst, int flipCode, Stream& stream)
{
typedef void (*func_t)(const GpuMat& src, GpuMat& dst, int flipCode, cudaStream_t stream);
static const func_t funcs[6][4] =
@ -349,7 +349,7 @@ namespace
}
else
{
gpu::split(d_nppLut, d_nppLut3);
cuda::split(d_nppLut, d_nppLut3);
pValues3[0] = d_nppLut3[0].ptr<Npp32s>();
pValues3[1] = d_nppLut3[1].ptr<Npp32s>();
@ -495,7 +495,7 @@ namespace
#endif // (CUDA_VERSION >= 5000)
Ptr<LookUpTable> cv::gpu::createLookUpTable(InputArray lut)
Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray lut)
{
return new LookUpTableImpl(lut);
}
@ -503,7 +503,7 @@ Ptr<LookUpTable> cv::gpu::createLookUpTable(InputArray lut)
////////////////////////////////////////////////////////////////////////
// copyMakeBorder
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
namespace imgproc
{
@ -515,7 +515,7 @@ namespace
{
template <typename T, int cn> void copyMakeBorder_caller(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream)
{
using namespace ::cv::gpu::cudev::imgproc;
using namespace ::cv::cuda::cudev::imgproc;
Scalar_<T> val(saturate_cast<T>(value[0]), saturate_cast<T>(value[1]), saturate_cast<T>(value[2]), saturate_cast<T>(value[3]));
@ -529,7 +529,7 @@ typedef Npp32s __attribute__((__may_alias__)) Npp32s_a;
typedef Npp32s Npp32s_a;
#endif
void cv::gpu::copyMakeBorder(InputArray _src, OutputArray _dst, int top, int bottom, int left, int right, int borderType, Scalar value, Stream& _stream)
void cv::cuda::copyMakeBorder(InputArray _src, OutputArray _dst, int top, int bottom, int left, int right, int borderType, Scalar value, Stream& _stream)
{
GpuMat src = _src.getGpuMat();

View File

@ -50,8 +50,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -102,7 +102,7 @@ namespace arithm
};
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{

View File

@ -50,8 +50,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -69,7 +69,7 @@ namespace arithm
};
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{

View File

@ -50,8 +50,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -89,7 +89,7 @@ namespace arithm
};
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAdd4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{

View File

@ -50,8 +50,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -68,7 +68,7 @@ namespace arithm
};
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{

View File

@ -49,8 +49,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -100,7 +100,7 @@ namespace arithm
};
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{

View File

@ -50,10 +50,10 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{

View File

@ -50,10 +50,10 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
@ -72,17 +72,17 @@ namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::cudev::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::cudev::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::cudev::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);

View File

@ -50,8 +50,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -107,7 +107,7 @@ namespace arithm
};
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VCmpEq4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{

View File

@ -51,8 +51,8 @@
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
using namespace cv::cuda;
using namespace cv::cuda::cudev;
namespace arithm
{
@ -125,7 +125,7 @@ namespace arithm
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace cudev
namespace cv { namespace cuda { namespace cudev
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{

Some files were not shown because too many files have changed in this diff Show More