fixed implementation of gpumat::setTo() and improved gputest
This commit is contained in:
@@ -41,54 +41,66 @@
|
||||
//M*/
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include "cuda_shared.hpp"
|
||||
#include "cuda_runtime.h"
|
||||
|
||||
__constant__ float scalar_d[4];
|
||||
__constant__ __align__(16) float scalar_d[4];
|
||||
|
||||
namespace mat_operators
|
||||
{
|
||||
|
||||
template <typename T, int channels, int count = channels>
|
||||
struct unroll
|
||||
{
|
||||
__device__ static void unroll_set(T * mat, size_t i)
|
||||
{
|
||||
mat[i] = static_cast<T>(scalar_d[i % channels]);
|
||||
mat[i] = static_cast<T>(scalar_d[channels - count]);
|
||||
unroll<T, channels, count - 1>::unroll_set(mat, i+1);
|
||||
}
|
||||
|
||||
__device__ static void unroll_set_with_mask(T * mat, float mask, size_t i)
|
||||
__device__ static void unroll_set_with_mask(T * mat, unsigned char mask, size_t i)
|
||||
{
|
||||
mat[i] = mask * static_cast<T>(scalar_d[i % channels]);
|
||||
if ( mask != 0 )
|
||||
mat[i] = static_cast<T>(scalar_d[channels - count]);
|
||||
|
||||
unroll<T, channels, count - 1>::unroll_set_with_mask(mat, mask, i+1);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, int channels>
|
||||
struct unroll<T,channels,0>
|
||||
struct unroll<T, channels, 0>
|
||||
{
|
||||
__device__ static void unroll_set(T * , size_t){}
|
||||
__device__ static void unroll_set_with_mask(T * , float, size_t){}
|
||||
__device__ static void unroll_set_with_mask(T * , unsigned char, size_t){}
|
||||
};
|
||||
|
||||
template <typename T, int channels>
|
||||
__global__ void kernel_set_to_without_mask(T * mat)
|
||||
__device__ size_t GetIndex(size_t i, int cols, int rows, int step)
|
||||
{
|
||||
size_t i = (blockIdx.x * blockDim.x + threadIdx.x) * sizeof(T);
|
||||
unroll<T, channels>::unroll_set(mat, i);
|
||||
return ((i / static_cast<size_t>(cols))*static_cast<size_t>(step) / static_cast<size_t>(sizeof(T))) +
|
||||
(i % static_cast<size_t>(rows))*static_cast<size_t>(channels) ;
|
||||
}
|
||||
|
||||
template <typename T, int channels>
|
||||
__global__ void kernel_set_to_with_mask(T * mat, const float * mask)
|
||||
__global__ void kernel_set_to_without_mask(T * mat, int cols, int rows, int step)
|
||||
{
|
||||
size_t i = (blockIdx.x * blockDim.x + threadIdx.x) * sizeof(T);
|
||||
unroll<T, channels>::unroll_set_with_mask(mat, i, mask[i]);
|
||||
size_t i = (blockIdx.x * blockDim.x + threadIdx.x);
|
||||
if (i < cols * rows)
|
||||
{
|
||||
unroll<T, channels>::unroll_set(mat, GetIndex<T,channels>(i, cols, rows, step));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int channels>
|
||||
__global__ void kernel_set_to_with_mask(T * mat, const unsigned char * mask, int cols, int rows, int step)
|
||||
{
|
||||
size_t i = (blockIdx.x * blockDim.x + threadIdx.x);
|
||||
if (i < cols * rows)
|
||||
unroll<T, channels>::unroll_set_with_mask(mat, mask[i], GetIndex<T,channels>(i, cols, rows, step));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int elemSize1, int channels)
|
||||
{
|
||||
// download scalar to constant memory
|
||||
@@ -97,29 +109,36 @@ extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const doubl
|
||||
data[1] = scalar[1];
|
||||
data[2] = scalar[2];
|
||||
data[3] = scalar[3];
|
||||
cudaMemcpyToSymbol(scalar_d, data, sizeof(data));
|
||||
cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data)));
|
||||
|
||||
dim3 numBlocks(mat.rows * mat.step / 256, 1, 1);
|
||||
dim3 threadsPerBlock(256);
|
||||
dim3 threadsPerBlock(256,1,1);
|
||||
dim3 numBlocks (mat.rows * mat.cols / threadsPerBlock.x + 1, 1, 1);
|
||||
|
||||
if (channels == 1)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<unsigned int, 1><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 1><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
if (channels == 2)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<unsigned int, 2><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 2><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
if (channels == 3)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<unsigned int, 3><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr, (float *)mask.ptr);
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 3><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
if (channels == 4)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 4><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 4><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 4><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
cudaSafeCall( cudaThreadSynchronize() );
|
||||
}
|
||||
|
||||
extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int elemSize1, int channels)
|
||||
@@ -129,28 +148,35 @@ extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const do
|
||||
data[1] = scalar[1];
|
||||
data[2] = scalar[2];
|
||||
data[3] = scalar[3];
|
||||
cudaMemcpyToSymbol(scalar_d, data, sizeof(data));
|
||||
cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data)));
|
||||
|
||||
int numBlocks = mat.rows * mat.step / 256;
|
||||
|
||||
dim3 threadsPerBlock(256);
|
||||
dim3 threadsPerBlock(256, 1, 1);
|
||||
dim3 numBlocks (mat.rows * mat.cols / threadsPerBlock.x + 1, 1, 1);
|
||||
|
||||
if (channels == 1)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<unsigned int, 1><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr);
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float, 1><<<numBlocks,threadsPerBlock>>>(( float *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
if (channels == 2)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<unsigned int, 2><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr);
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float , 2><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
if (channels == 3)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<unsigned int, 3><<<numBlocks,threadsPerBlock>>>((unsigned int *)mat.ptr);
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float, 3><<<numBlocks,threadsPerBlock>>>(( float *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
if (channels == 4)
|
||||
{
|
||||
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 4><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 4><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<float, 4><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step);
|
||||
}
|
||||
|
||||
cudaSafeCall( cudaThreadSynchronize() );
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user