Merge branch 'gpu-device-layer'
This commit is contained in:
commit
f017ad5943
@ -3,14 +3,14 @@ ocv_add_module(core ${ZLIB_LIBRARIES})
|
|||||||
ocv_module_include_directories(${ZLIB_INCLUDE_DIR})
|
ocv_module_include_directories(${ZLIB_INCLUDE_DIR})
|
||||||
|
|
||||||
if(HAVE_CUDA)
|
if(HAVE_CUDA)
|
||||||
ocv_source_group("Src\\Cuda" GLOB "src/cuda/*.cu")
|
ocv_source_group("Src\\Cuda" GLOB "src/cuda/*.cu")
|
||||||
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/src" "${OpenCV_SOURCE_DIR}/modules/gpu/src/cuda" ${CUDA_INCLUDE_DIRS})
|
ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/include" ${CUDA_INCLUDE_DIRS})
|
||||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
|
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
|
||||||
|
|
||||||
file(GLOB lib_cuda "src/cuda/*.cu")
|
file(GLOB lib_cuda "src/cuda/*.cu")
|
||||||
ocv_cuda_compile(cuda_objs ${lib_cuda})
|
ocv_cuda_compile(cuda_objs ${lib_cuda})
|
||||||
|
|
||||||
|
|
||||||
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
|
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
|
||||||
else()
|
else()
|
||||||
set(lib_cuda "")
|
set(lib_cuda "")
|
||||||
|
@ -7,11 +7,11 @@ ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video o
|
|||||||
|
|
||||||
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/cuda" "${CMAKE_CURRENT_SOURCE_DIR}/../highgui/src")
|
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/cuda" "${CMAKE_CURRENT_SOURCE_DIR}/../highgui/src")
|
||||||
|
|
||||||
file(GLOB lib_hdrs "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
|
file(GLOB lib_hdrs "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
|
||||||
file(GLOB lib_int_hdrs "src/*.hpp" "src/*.h")
|
file(GLOB lib_device_hdrs "include/opencv2/${name}/device/*.hpp" "include/opencv2/${name}/device/*.h")
|
||||||
|
file(GLOB lib_device_hdrs_detail "include/opencv2/${name}/device/detail/*.hpp" "include/opencv2/${name}/device/detail/*.h")
|
||||||
|
file(GLOB lib_int_hdrs "src/*.hpp" "src/*.h")
|
||||||
file(GLOB lib_cuda_hdrs "src/cuda/*.hpp" "src/cuda/*.h")
|
file(GLOB lib_cuda_hdrs "src/cuda/*.hpp" "src/cuda/*.h")
|
||||||
file(GLOB lib_device_hdrs "src/opencv2/gpu/device/*.hpp" "src/opencv2/gpu/device/*.h")
|
|
||||||
file(GLOB lib_device_hdrs_detail "src/opencv2/gpu/device/detail/*.hpp" "src/opencv2/gpu/device/detail/*.h")
|
|
||||||
file(GLOB lib_srcs "src/*.cpp")
|
file(GLOB lib_srcs "src/*.cpp")
|
||||||
file(GLOB lib_cuda "src/cuda/*.cu*")
|
file(GLOB lib_cuda "src/cuda/*.cu*")
|
||||||
|
|
||||||
@ -74,8 +74,8 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
ocv_set_module_sources(
|
ocv_set_module_sources(
|
||||||
HEADERS ${lib_hdrs}
|
HEADERS ${lib_hdrs} ${lib_device_hdrs} ${lib_device_hdrs_detail}
|
||||||
SOURCES ${lib_int_hdrs} ${lib_cuda_hdrs} ${lib_device_hdrs} ${lib_device_hdrs_detail} ${lib_srcs} ${lib_cuda} ${ncv_files} ${cuda_objs}
|
SOURCES ${lib_int_hdrs} ${lib_cuda_hdrs} ${lib_srcs} ${lib_cuda} ${ncv_files} ${cuda_objs}
|
||||||
)
|
)
|
||||||
|
|
||||||
ocv_create_module(${cuda_link_libs})
|
ocv_create_module(${cuda_link_libs})
|
||||||
|
@ -1535,6 +1535,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
return functor_type(); \
|
return functor_type(); \
|
||||||
} \
|
} \
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#undef CV_DESCALE
|
||||||
}}} // namespace cv { namespace gpu { namespace device
|
}}} // namespace cv { namespace gpu { namespace device
|
||||||
|
|
||||||
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
|
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
|
@ -40,27 +40,27 @@
|
|||||||
//
|
//
|
||||||
//M*/
|
//M*/
|
||||||
|
|
||||||
#ifndef __OPENCV_GPU_UTILITY_DETAIL_HPP__
|
#ifndef __OPENCV_GPU_REDUCTION_DETAIL_HPP__
|
||||||
#define __OPENCV_GPU_UTILITY_DETAIL_HPP__
|
#define __OPENCV_GPU_REDUCTION_DETAIL_HPP__
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
namespace utility_detail
|
namespace utility_detail
|
||||||
{
|
{
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// Reduction
|
// Reductor
|
||||||
|
|
||||||
template <int n> struct WarpReductor
|
template <int n> struct WarpReductor
|
||||||
{
|
{
|
||||||
template <typename T, typename Op> static __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
template <typename T, typename Op> static __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
||||||
{
|
{
|
||||||
if (tid < n)
|
if (tid < n)
|
||||||
data[tid] = partial_reduction;
|
data[tid] = partial_reduction;
|
||||||
if (n > 32) __syncthreads();
|
if (n > 32) __syncthreads();
|
||||||
|
|
||||||
if (n > 32)
|
if (n > 32)
|
||||||
{
|
{
|
||||||
if (tid < n - 32)
|
if (tid < n - 32)
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]);
|
||||||
if (tid < 16)
|
if (tid < 16)
|
||||||
{
|
{
|
||||||
@ -73,7 +73,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
else if (n > 16)
|
else if (n > 16)
|
||||||
{
|
{
|
||||||
if (tid < n - 16)
|
if (tid < n - 16)
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);
|
||||||
if (tid < 8)
|
if (tid < 8)
|
||||||
{
|
{
|
||||||
@ -85,7 +85,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
else if (n > 8)
|
else if (n > 8)
|
||||||
{
|
{
|
||||||
if (tid < n - 8)
|
if (tid < n - 8)
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]);
|
||||||
if (tid < 4)
|
if (tid < 4)
|
||||||
{
|
{
|
||||||
@ -96,23 +96,23 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
else if (n > 4)
|
else if (n > 4)
|
||||||
{
|
{
|
||||||
if (tid < n - 4)
|
if (tid < n - 4)
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]);
|
||||||
if (tid < 2)
|
if (tid < 2)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (n > 2)
|
else if (n > 2)
|
||||||
{
|
{
|
||||||
if (tid < n - 2)
|
if (tid < n - 2)
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]);
|
||||||
if (tid < 2)
|
if (tid < 2)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
template <> struct WarpReductor<64>
|
template <> struct WarpReductor<64>
|
||||||
@ -121,15 +121,15 @@ namespace cv { namespace gpu { namespace device
|
|||||||
{
|
{
|
||||||
data[tid] = partial_reduction;
|
data[tid] = partial_reduction;
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
|
||||||
if (tid < 32)
|
if (tid < 32)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -138,14 +138,14 @@ namespace cv { namespace gpu { namespace device
|
|||||||
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction;
|
data[tid] = partial_reduction;
|
||||||
|
|
||||||
if (tid < 16)
|
if (tid < 16)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -154,13 +154,13 @@ namespace cv { namespace gpu { namespace device
|
|||||||
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction;
|
data[tid] = partial_reduction;
|
||||||
|
|
||||||
if (tid < 8)
|
if (tid < 8)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -169,12 +169,12 @@ namespace cv { namespace gpu { namespace device
|
|||||||
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction;
|
data[tid] = partial_reduction;
|
||||||
|
|
||||||
if (tid < 4)
|
if (tid < 4)
|
||||||
{
|
{
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);
|
||||||
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -214,11 +214,11 @@ namespace cv { namespace gpu { namespace device
|
|||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// PredValWarpReductor
|
// PredValWarpReductor
|
||||||
|
|
||||||
template <int n> struct PredValWarpReductor;
|
template <int n> struct PredValWarpReductor;
|
||||||
template <> struct PredValWarpReductor<64>
|
template <> struct PredValWarpReductor<64>
|
||||||
{
|
{
|
||||||
template <typename T, typename V, typename Pred>
|
template <typename T, typename V, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 32)
|
if (tid < 32)
|
||||||
@ -253,14 +253,14 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 4];
|
sval[tid] = myVal = sval[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 2];
|
sval[tid] = myVal = sval[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -272,7 +272,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
template <> struct PredValWarpReductor<32>
|
template <> struct PredValWarpReductor<32>
|
||||||
{
|
{
|
||||||
template <typename T, typename V, typename Pred>
|
template <typename T, typename V, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 16)
|
if (tid < 16)
|
||||||
@ -300,14 +300,14 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 4];
|
sval[tid] = myVal = sval[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 2];
|
sval[tid] = myVal = sval[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -320,7 +320,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
|
|
||||||
template <> struct PredValWarpReductor<16>
|
template <> struct PredValWarpReductor<16>
|
||||||
{
|
{
|
||||||
template <typename T, typename V, typename Pred>
|
template <typename T, typename V, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 8)
|
if (tid < 8)
|
||||||
@ -341,14 +341,14 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 4];
|
sval[tid] = myVal = sval[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 2];
|
sval[tid] = myVal = sval[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -360,7 +360,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
template <> struct PredValWarpReductor<8>
|
template <> struct PredValWarpReductor<8>
|
||||||
{
|
{
|
||||||
template <typename T, typename V, typename Pred>
|
template <typename T, typename V, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 4)
|
if (tid < 4)
|
||||||
@ -374,14 +374,14 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 4];
|
sval[tid] = myVal = sval[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 2];
|
sval[tid] = myVal = sval[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -407,7 +407,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
myData = sdata[tid];
|
myData = sdata[tid];
|
||||||
myVal = sval[tid];
|
myVal = sval[tid];
|
||||||
|
|
||||||
if (n >= 512 && tid < 256)
|
if (n >= 512 && tid < 256)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 256];
|
T reg = sdata[tid + 256];
|
||||||
|
|
||||||
@ -416,9 +416,9 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 256];
|
sval[tid] = myVal = sval[tid + 256];
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
}
|
}
|
||||||
if (n >= 256 && tid < 128)
|
if (n >= 256 && tid < 128)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 128];
|
T reg = sdata[tid + 128];
|
||||||
|
|
||||||
@ -427,9 +427,9 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 128];
|
sval[tid] = myVal = sval[tid + 128];
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
}
|
}
|
||||||
if (n >= 128 && tid < 64)
|
if (n >= 128 && tid < 64)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 64];
|
T reg = sdata[tid + 64];
|
||||||
|
|
||||||
@ -438,13 +438,13 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 64];
|
sval[tid] = myVal = sval[tid + 64];
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tid < 32)
|
if (tid < 32)
|
||||||
{
|
{
|
||||||
if (n >= 64)
|
if (n >= 64)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 32];
|
T reg = sdata[tid + 32];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -453,8 +453,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval[tid] = myVal = sval[tid + 32];
|
sval[tid] = myVal = sval[tid + 32];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 32)
|
if (n >= 32)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 16];
|
T reg = sdata[tid + 16];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -463,8 +463,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval[tid] = myVal = sval[tid + 16];
|
sval[tid] = myVal = sval[tid + 16];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 16)
|
if (n >= 16)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 8];
|
T reg = sdata[tid + 8];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -473,8 +473,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval[tid] = myVal = sval[tid + 8];
|
sval[tid] = myVal = sval[tid + 8];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 8)
|
if (n >= 8)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 4];
|
T reg = sdata[tid + 4];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -483,18 +483,18 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval[tid] = myVal = sval[tid + 4];
|
sval[tid] = myVal = sval[tid + 4];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 4)
|
if (n >= 4)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 2];
|
T reg = sdata[tid + 2];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval[tid] = myVal = sval[tid + 2];
|
sval[tid] = myVal = sval[tid + 2];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 2)
|
if (n >= 2)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 1];
|
T reg = sdata[tid + 1];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -513,7 +513,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
template <int n> struct PredVal2WarpReductor;
|
template <int n> struct PredVal2WarpReductor;
|
||||||
template <> struct PredVal2WarpReductor<64>
|
template <> struct PredVal2WarpReductor<64>
|
||||||
{
|
{
|
||||||
template <typename T, typename V1, typename V2, typename Pred>
|
template <typename T, typename V1, typename V2, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 32)
|
if (tid < 32)
|
||||||
@ -553,7 +553,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -561,7 +561,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -574,7 +574,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
template <> struct PredVal2WarpReductor<32>
|
template <> struct PredVal2WarpReductor<32>
|
||||||
{
|
{
|
||||||
template <typename T, typename V1, typename V2, typename Pred>
|
template <typename T, typename V1, typename V2, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 16)
|
if (tid < 16)
|
||||||
@ -606,7 +606,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -614,7 +614,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -628,7 +628,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
|
|
||||||
template <> struct PredVal2WarpReductor<16>
|
template <> struct PredVal2WarpReductor<16>
|
||||||
{
|
{
|
||||||
template <typename T, typename V1, typename V2, typename Pred>
|
template <typename T, typename V1, typename V2, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 8)
|
if (tid < 8)
|
||||||
@ -652,7 +652,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -660,7 +660,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -673,7 +673,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
template <> struct PredVal2WarpReductor<8>
|
template <> struct PredVal2WarpReductor<8>
|
||||||
{
|
{
|
||||||
template <typename T, typename V1, typename V2, typename Pred>
|
template <typename T, typename V1, typename V2, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
if (tid < 4)
|
if (tid < 4)
|
||||||
@ -689,7 +689,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 4];
|
sval1[tid] = myVal1 = sval1[tid + 4];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 2];
|
reg = sdata[tid + 2];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -697,7 +697,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||||
}
|
}
|
||||||
|
|
||||||
reg = sdata[tid + 1];
|
reg = sdata[tid + 1];
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
{
|
{
|
||||||
@ -712,7 +712,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
template <bool warp> struct PredVal2ReductionDispatcher;
|
template <bool warp> struct PredVal2ReductionDispatcher;
|
||||||
template <> struct PredVal2ReductionDispatcher<true>
|
template <> struct PredVal2ReductionDispatcher<true>
|
||||||
{
|
{
|
||||||
template <int n, typename T, typename V1, typename V2, typename Pred>
|
template <int n, typename T, typename V1, typename V2, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
PredVal2WarpReductor<n>::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);
|
PredVal2WarpReductor<n>::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);
|
||||||
@ -720,14 +720,14 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
template <> struct PredVal2ReductionDispatcher<false>
|
template <> struct PredVal2ReductionDispatcher<false>
|
||||||
{
|
{
|
||||||
template <int n, typename T, typename V1, typename V2, typename Pred>
|
template <int n, typename T, typename V1, typename V2, typename Pred>
|
||||||
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)
|
||||||
{
|
{
|
||||||
myData = sdata[tid];
|
myData = sdata[tid];
|
||||||
myVal1 = sval1[tid];
|
myVal1 = sval1[tid];
|
||||||
myVal2 = sval2[tid];
|
myVal2 = sval2[tid];
|
||||||
|
|
||||||
if (n >= 512 && tid < 256)
|
if (n >= 512 && tid < 256)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 256];
|
T reg = sdata[tid + 256];
|
||||||
|
|
||||||
@ -737,9 +737,9 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 256];
|
sval1[tid] = myVal1 = sval1[tid + 256];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 256];
|
sval2[tid] = myVal2 = sval2[tid + 256];
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
}
|
}
|
||||||
if (n >= 256 && tid < 128)
|
if (n >= 256 && tid < 128)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 128];
|
T reg = sdata[tid + 128];
|
||||||
|
|
||||||
@ -749,9 +749,9 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 128];
|
sval1[tid] = myVal1 = sval1[tid + 128];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 128];
|
sval2[tid] = myVal2 = sval2[tid + 128];
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
}
|
}
|
||||||
if (n >= 128 && tid < 64)
|
if (n >= 128 && tid < 64)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 64];
|
T reg = sdata[tid + 64];
|
||||||
|
|
||||||
@ -761,13 +761,13 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval1[tid] = myVal1 = sval1[tid + 64];
|
sval1[tid] = myVal1 = sval1[tid + 64];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 64];
|
sval2[tid] = myVal2 = sval2[tid + 64];
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tid < 32)
|
if (tid < 32)
|
||||||
{
|
{
|
||||||
if (n >= 64)
|
if (n >= 64)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 32];
|
T reg = sdata[tid + 32];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -777,8 +777,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval2[tid] = myVal2 = sval2[tid + 32];
|
sval2[tid] = myVal2 = sval2[tid + 32];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 32)
|
if (n >= 32)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 16];
|
T reg = sdata[tid + 16];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -788,8 +788,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval2[tid] = myVal2 = sval2[tid + 16];
|
sval2[tid] = myVal2 = sval2[tid + 16];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 16)
|
if (n >= 16)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 8];
|
T reg = sdata[tid + 8];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -799,8 +799,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval2[tid] = myVal2 = sval2[tid + 8];
|
sval2[tid] = myVal2 = sval2[tid + 8];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 8)
|
if (n >= 8)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 4];
|
T reg = sdata[tid + 4];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -810,8 +810,8 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sval2[tid] = myVal2 = sval2[tid + 4];
|
sval2[tid] = myVal2 = sval2[tid + 4];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 4)
|
if (n >= 4)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 2];
|
T reg = sdata[tid + 2];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -819,10 +819,10 @@ namespace cv { namespace gpu { namespace device
|
|||||||
sdata[tid] = myData = reg;
|
sdata[tid] = myData = reg;
|
||||||
sval1[tid] = myVal1 = sval1[tid + 2];
|
sval1[tid] = myVal1 = sval1[tid + 2];
|
||||||
sval2[tid] = myVal2 = sval2[tid + 2];
|
sval2[tid] = myVal2 = sval2[tid + 2];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n >= 2)
|
if (n >= 2)
|
||||||
{
|
{
|
||||||
T reg = sdata[tid + 1];
|
T reg = sdata[tid + 1];
|
||||||
|
|
||||||
if (pred(reg, myData))
|
if (pred(reg, myData))
|
||||||
@ -838,4 +838,4 @@ namespace cv { namespace gpu { namespace device
|
|||||||
} // namespace utility_detail
|
} // namespace utility_detail
|
||||||
}}} // namespace cv { namespace gpu { namespace device
|
}}} // namespace cv { namespace gpu { namespace device
|
||||||
|
|
||||||
#endif // __OPENCV_GPU_UTILITY_DETAIL_HPP__
|
#endif // __OPENCV_GPU_REDUCTION_DETAIL_HPP__
|
@ -47,7 +47,7 @@
|
|||||||
#include "../vec_traits.hpp"
|
#include "../vec_traits.hpp"
|
||||||
#include "../functional.hpp"
|
#include "../functional.hpp"
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
namespace transform_detail
|
namespace transform_detail
|
||||||
{
|
{
|
||||||
@ -203,7 +203,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <typename T, typename D, typename UnOp, typename Mask>
|
template <typename T, typename D, typename UnOp, typename Mask>
|
||||||
__global__ static void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
|
static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
|
||||||
{
|
{
|
||||||
typedef TransformFunctorTraits<UnOp> ft;
|
typedef TransformFunctorTraits<UnOp> ft;
|
||||||
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
|
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
|
||||||
@ -239,10 +239,10 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename D, typename UnOp, typename Mask>
|
template <typename T, typename D, typename UnOp, typename Mask>
|
||||||
static __global__ void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
|
__global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
|
||||||
{
|
{
|
||||||
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
|
|
||||||
if (x < src.cols && y < src.rows && mask(y, x))
|
if (x < src.cols && y < src.rows && mask(y, x))
|
||||||
{
|
{
|
||||||
@ -251,7 +251,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
||||||
__global__ static void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
|
static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
|
||||||
const Mask mask, const BinOp op)
|
const Mask mask, const BinOp op)
|
||||||
{
|
{
|
||||||
typedef TransformFunctorTraits<BinOp> ft;
|
typedef TransformFunctorTraits<BinOp> ft;
|
||||||
@ -274,7 +274,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
const read_type1 src1_n_el = ((const read_type1*)src1)[x];
|
const read_type1 src1_n_el = ((const read_type1*)src1)[x];
|
||||||
const read_type2 src2_n_el = ((const read_type2*)src2)[x];
|
const read_type2 src2_n_el = ((const read_type2*)src2)[x];
|
||||||
write_type dst_n_el = ((const write_type*)dst)[x];
|
write_type dst_n_el = ((const write_type*)dst)[x];
|
||||||
|
|
||||||
OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y);
|
OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y);
|
||||||
|
|
||||||
((write_type*)dst)[x] = dst_n_el;
|
((write_type*)dst)[x] = dst_n_el;
|
||||||
@ -291,11 +291,11 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
||||||
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
|
static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
|
||||||
const Mask mask, const BinOp op)
|
const Mask mask, const BinOp op)
|
||||||
{
|
{
|
||||||
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
const int x = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
const int y = blockDim.y * blockIdx.y + threadIdx.y;
|
||||||
|
|
||||||
if (x < src1.cols && y < src1.rows && mask(y, x))
|
if (x < src1.cols && y < src1.rows && mask(y, x))
|
||||||
{
|
{
|
||||||
@ -314,13 +314,13 @@ namespace cv { namespace gpu { namespace device
|
|||||||
typedef TransformFunctorTraits<UnOp> ft;
|
typedef TransformFunctorTraits<UnOp> ft;
|
||||||
|
|
||||||
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
|
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
|
||||||
const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);
|
const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);
|
||||||
|
|
||||||
transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
|
transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
|
||||||
cudaSafeCall( cudaGetLastError() );
|
cudaSafeCall( cudaGetLastError() );
|
||||||
|
|
||||||
if (stream == 0)
|
if (stream == 0)
|
||||||
cudaSafeCall( cudaDeviceSynchronize() );
|
cudaSafeCall( cudaDeviceSynchronize() );
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
|
||||||
@ -329,13 +329,13 @@ namespace cv { namespace gpu { namespace device
|
|||||||
typedef TransformFunctorTraits<BinOp> ft;
|
typedef TransformFunctorTraits<BinOp> ft;
|
||||||
|
|
||||||
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
|
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
|
||||||
const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);
|
const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);
|
||||||
|
|
||||||
transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
|
transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
|
||||||
cudaSafeCall( cudaGetLastError() );
|
cudaSafeCall( cudaGetLastError() );
|
||||||
|
|
||||||
if (stream == 0)
|
if (stream == 0)
|
||||||
cudaSafeCall( cudaDeviceSynchronize() );
|
cudaSafeCall( cudaDeviceSynchronize() );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
template<> struct TransformDispatcher<true>
|
template<> struct TransformDispatcher<true>
|
||||||
@ -347,7 +347,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
|
|
||||||
StaticAssert<ft::smart_shift != 1>::check();
|
StaticAssert<ft::smart_shift != 1>::check();
|
||||||
|
|
||||||
if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||
|
if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||
|
||||||
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
|
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
|
||||||
{
|
{
|
||||||
TransformDispatcher<false>::call(src, dst, op, mask, stream);
|
TransformDispatcher<false>::call(src, dst, op, mask, stream);
|
||||||
@ -355,7 +355,7 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
|
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
|
||||||
const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);
|
const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);
|
||||||
|
|
||||||
transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
|
transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
|
||||||
cudaSafeCall( cudaGetLastError() );
|
cudaSafeCall( cudaGetLastError() );
|
||||||
@ -380,15 +380,15 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
|
|
||||||
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
|
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
|
||||||
const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);
|
const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);
|
||||||
|
|
||||||
transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
|
transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
|
||||||
cudaSafeCall( cudaGetLastError() );
|
cudaSafeCall( cudaGetLastError() );
|
||||||
|
|
||||||
if (stream == 0)
|
if (stream == 0)
|
||||||
cudaSafeCall( cudaDeviceSynchronize() );
|
cudaSafeCall( cudaDeviceSynchronize() );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} // namespace transform_detail
|
} // namespace transform_detail
|
||||||
}}} // namespace cv { namespace gpu { namespace device
|
}}} // namespace cv { namespace gpu { namespace device
|
||||||
|
|
@ -43,7 +43,7 @@
|
|||||||
#ifndef __OPENCV_GPU_FUNCTIONAL_HPP__
|
#ifndef __OPENCV_GPU_FUNCTIONAL_HPP__
|
||||||
#define __OPENCV_GPU_FUNCTIONAL_HPP__
|
#define __OPENCV_GPU_FUNCTIONAL_HPP__
|
||||||
|
|
||||||
#include <thrust/functional.h>
|
#include <functional>
|
||||||
#include "saturate_cast.hpp"
|
#include "saturate_cast.hpp"
|
||||||
#include "vec_traits.hpp"
|
#include "vec_traits.hpp"
|
||||||
#include "type_traits.hpp"
|
#include "type_traits.hpp"
|
||||||
@ -52,9 +52,8 @@
|
|||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
// Function Objects
|
// Function Objects
|
||||||
|
template<typename Argument, typename Result> struct unary_function : public std::unary_function<Argument, Result> {};
|
||||||
using thrust::unary_function;
|
template<typename Argument1, typename Argument2, typename Result> struct binary_function : public std::binary_function<Argument1, Argument2, Result> {};
|
||||||
using thrust::binary_function;
|
|
||||||
|
|
||||||
// Arithmetic Operations
|
// Arithmetic Operations
|
||||||
template <typename T> struct plus : binary_function<T, T, T>
|
template <typename T> struct plus : binary_function<T, T, T>
|
171
modules/gpu/include/opencv2/gpu/device/scan.hpp
Normal file
171
modules/gpu/include/opencv2/gpu/device/scan.hpp
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#ifndef __OPENCV_GPU_SCAN_HPP__
|
||||||
|
#define __OPENCV_GPU_SCAN_HPP__
|
||||||
|
|
||||||
|
#include "common.hpp"
|
||||||
|
|
||||||
|
namespace cv { namespace gpu { namespace device
|
||||||
|
{
|
||||||
|
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
|
||||||
|
|
||||||
|
template <ScanKind Kind, typename T, typename F> struct WarpScan
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ WarpScan() {}
|
||||||
|
__device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
|
||||||
|
|
||||||
|
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
||||||
|
{
|
||||||
|
const unsigned int lane = idx & 31;
|
||||||
|
F op;
|
||||||
|
|
||||||
|
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
||||||
|
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
||||||
|
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
||||||
|
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
||||||
|
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
||||||
|
|
||||||
|
if( Kind == INCLUSIVE )
|
||||||
|
return ptr [idx];
|
||||||
|
else
|
||||||
|
return (lane > 0) ? ptr [idx - 1] : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
||||||
|
{
|
||||||
|
return tid;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ __forceinline__ void init(volatile T *ptr){}
|
||||||
|
|
||||||
|
static const int warp_offset = 0;
|
||||||
|
|
||||||
|
typedef WarpScan<INCLUSIVE, T, F> merge;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ WarpScanNoComp() {}
|
||||||
|
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
|
||||||
|
|
||||||
|
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
||||||
|
{
|
||||||
|
const unsigned int lane = threadIdx.x & 31;
|
||||||
|
F op;
|
||||||
|
|
||||||
|
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
||||||
|
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
||||||
|
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
||||||
|
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
||||||
|
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
||||||
|
|
||||||
|
if( Kind == INCLUSIVE )
|
||||||
|
return ptr [idx];
|
||||||
|
else
|
||||||
|
return (lane > 0) ? ptr [idx - 1] : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
||||||
|
{
|
||||||
|
return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
__device__ __forceinline__ void init(volatile T *ptr)
|
||||||
|
{
|
||||||
|
ptr[threadIdx.x] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const int warp_smem_stride = 32 + 16 + 1;
|
||||||
|
static const int warp_offset = 16;
|
||||||
|
static const int warp_log = 5;
|
||||||
|
static const int warp_mask = 31;
|
||||||
|
|
||||||
|
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
|
||||||
|
{
|
||||||
|
__device__ __forceinline__ BlockScan() {}
|
||||||
|
__device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
|
||||||
|
|
||||||
|
__device__ __forceinline__ T operator()(volatile T *ptr)
|
||||||
|
{
|
||||||
|
const unsigned int tid = threadIdx.x;
|
||||||
|
const unsigned int lane = tid & warp_mask;
|
||||||
|
const unsigned int warp = tid >> warp_log;
|
||||||
|
|
||||||
|
Sc scan;
|
||||||
|
typename Sc::merge merge_scan;
|
||||||
|
const unsigned int idx = scan.index(tid);
|
||||||
|
|
||||||
|
T val = scan(ptr, idx);
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
if( warp == 0)
|
||||||
|
scan.init(ptr);
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
if( lane == 31 )
|
||||||
|
ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
if( warp == 0 )
|
||||||
|
merge_scan(ptr, idx);
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
if ( warp > 0)
|
||||||
|
val = ptr [scan.warp_offset + warp - 1] + val;
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
ptr[idx] = val;
|
||||||
|
__syncthreads ();
|
||||||
|
|
||||||
|
return val ;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const int warp_log = 5;
|
||||||
|
static const int warp_mask = 31;
|
||||||
|
};
|
||||||
|
}}}
|
||||||
|
|
||||||
|
#endif // __OPENCV_GPU_SCAN_HPP__
|
@ -60,10 +60,8 @@ namespace cv { namespace gpu
|
|||||||
__OPENCV_GPU_HOST_DEVICE__ static void check() {};
|
__OPENCV_GPU_HOST_DEVICE__ static void check() {};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
using ::cv::gpu::device::Static;
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
#undef __OPENCV_GPU_HOST_DEVICE__
|
#undef __OPENCV_GPU_HOST_DEVICE__
|
||||||
|
|
||||||
#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */
|
#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */
|
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
#include "saturate_cast.hpp"
|
#include "saturate_cast.hpp"
|
||||||
#include "datamov_utils.hpp"
|
#include "datamov_utils.hpp"
|
||||||
#include "detail/utility_detail.hpp"
|
#include "detail/reduction_detail.hpp"
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
@ -42,9 +42,9 @@
|
|||||||
|
|
||||||
#if !defined CUDA_DISABLER
|
#if !defined CUDA_DISABLER
|
||||||
|
|
||||||
#include <opencv2/gpu/device/lbp.hpp>
|
#include "lbp.hpp"
|
||||||
#include <opencv2/gpu/device/vec_traits.hpp>
|
#include "opencv2/gpu/device/vec_traits.hpp"
|
||||||
#include <opencv2/gpu/device/saturate_cast.hpp>
|
#include "opencv2/gpu/device/saturate_cast.hpp"
|
||||||
|
|
||||||
namespace cv { namespace gpu { namespace device
|
namespace cv { namespace gpu { namespace device
|
||||||
{
|
{
|
||||||
@ -299,4 +299,4 @@ namespace cv { namespace gpu { namespace device
|
|||||||
}
|
}
|
||||||
}}}
|
}}}
|
||||||
|
|
||||||
#endif /* CUDA_DISABLER */
|
#endif /* CUDA_DISABLER */
|
||||||
|
@ -1,166 +0,0 @@
|
|||||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
//
|
|
||||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
||||||
//
|
|
||||||
// By downloading, copying, installing or using the software you agree to this license.
|
|
||||||
// If you do not agree to this license, do not download, install,
|
|
||||||
// copy or use the software.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// License Agreement
|
|
||||||
// For Open Source Computer Vision Library
|
|
||||||
//
|
|
||||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
|
||||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
|
||||||
// Third party copyrights are property of their respective owners.
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
// are permitted provided that the following conditions are met:
|
|
||||||
//
|
|
||||||
// * Redistribution's of source code must retain the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer.
|
|
||||||
//
|
|
||||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
||||||
// this list of conditions and the following disclaimer in the documentation
|
|
||||||
// and/or other materials provided with the distribution.
|
|
||||||
//
|
|
||||||
// * The name of the copyright holders may not be used to endorse or promote products
|
|
||||||
// derived from this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// This software is provided by the copyright holders and contributors "as is" and
|
|
||||||
// any express or implied warranties, including, but not limited to, the implied
|
|
||||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
||||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
||||||
// indirect, incidental, special, exemplary, or consequential damages
|
|
||||||
// (including, but not limited to, procurement of substitute goods or services;
|
|
||||||
// loss of use, data, or profits; or business interruption) however caused
|
|
||||||
// and on any theory of liability, whether in contract, strict liability,
|
|
||||||
// or tort (including negligence or otherwise) arising in any way out of
|
|
||||||
// the use of this software, even if advised of the possibility of such damage.
|
|
||||||
//
|
|
||||||
//M*/
|
|
||||||
|
|
||||||
#ifndef __OPENCV_GPU_SCAN_HPP__
|
|
||||||
#define __OPENCV_GPU_SCAN_HPP__
|
|
||||||
|
|
||||||
enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
|
|
||||||
|
|
||||||
template <ScanKind Kind, typename T, typename F> struct WarpScan
|
|
||||||
{
|
|
||||||
__device__ __forceinline__ WarpScan() {}
|
|
||||||
__device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
|
|
||||||
|
|
||||||
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
|
||||||
{
|
|
||||||
const unsigned int lane = idx & 31;
|
|
||||||
F op;
|
|
||||||
|
|
||||||
if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
|
||||||
if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
|
||||||
if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
|
||||||
if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
|
||||||
if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
|
||||||
|
|
||||||
if( Kind == INCLUSIVE )
|
|
||||||
return ptr [idx];
|
|
||||||
else
|
|
||||||
return (lane > 0) ? ptr [idx - 1] : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
|
||||||
{
|
|
||||||
return tid;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ void init(volatile T *ptr){}
|
|
||||||
|
|
||||||
static const int warp_offset = 0;
|
|
||||||
|
|
||||||
typedef WarpScan<INCLUSIVE, T, F> merge;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
|
|
||||||
{
|
|
||||||
__device__ __forceinline__ WarpScanNoComp() {}
|
|
||||||
__device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
|
|
||||||
|
|
||||||
__device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
|
|
||||||
{
|
|
||||||
const unsigned int lane = threadIdx.x & 31;
|
|
||||||
F op;
|
|
||||||
|
|
||||||
ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
|
|
||||||
ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
|
|
||||||
|
|
||||||
if( Kind == INCLUSIVE )
|
|
||||||
return ptr [idx];
|
|
||||||
else
|
|
||||||
return (lane > 0) ? ptr [idx - 1] : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ unsigned int index(const unsigned int tid)
|
|
||||||
{
|
|
||||||
return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ void init(volatile T *ptr)
|
|
||||||
{
|
|
||||||
ptr[threadIdx.x] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const int warp_smem_stride = 32 + 16 + 1;
|
|
||||||
static const int warp_offset = 16;
|
|
||||||
static const int warp_log = 5;
|
|
||||||
static const int warp_mask = 31;
|
|
||||||
|
|
||||||
typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
|
|
||||||
{
|
|
||||||
__device__ __forceinline__ BlockScan() {}
|
|
||||||
__device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
|
|
||||||
|
|
||||||
__device__ __forceinline__ T operator()(volatile T *ptr)
|
|
||||||
{
|
|
||||||
const unsigned int tid = threadIdx.x;
|
|
||||||
const unsigned int lane = tid & warp_mask;
|
|
||||||
const unsigned int warp = tid >> warp_log;
|
|
||||||
|
|
||||||
Sc scan;
|
|
||||||
typename Sc::merge merge_scan;
|
|
||||||
const unsigned int idx = scan.index(tid);
|
|
||||||
|
|
||||||
T val = scan(ptr, idx);
|
|
||||||
__syncthreads ();
|
|
||||||
|
|
||||||
if( warp == 0)
|
|
||||||
scan.init(ptr);
|
|
||||||
__syncthreads ();
|
|
||||||
|
|
||||||
if( lane == 31 )
|
|
||||||
ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
|
|
||||||
__syncthreads ();
|
|
||||||
|
|
||||||
if( warp == 0 )
|
|
||||||
merge_scan(ptr, idx);
|
|
||||||
__syncthreads();
|
|
||||||
|
|
||||||
if ( warp > 0)
|
|
||||||
val = ptr [scan.warp_offset + warp - 1] + val;
|
|
||||||
__syncthreads ();
|
|
||||||
|
|
||||||
ptr[idx] = val;
|
|
||||||
__syncthreads ();
|
|
||||||
|
|
||||||
return val ;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const int warp_log = 5;
|
|
||||||
static const int warp_mask = 31;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
|
Loading…
x
Reference in New Issue
Block a user