fixed errors in StereoBeliefPropogation under linux

This commit is contained in:
Vladislav Vinogradov 2010-12-13 13:52:40 +00:00
parent 070d87fb7f
commit b18a3a5f83
2 changed files with 165 additions and 77 deletions

View File

@ -73,45 +73,73 @@ namespace cv { namespace gpu { namespace bp
////////////////////////// comp data //////////////////////////
///////////////////////////////////////////////////////////////
__device__ float pixDiff(uchar l, uchar r)
template <int cn> struct PixDiff;
template <> struct PixDiff<1>
{
return abs((int)l - r);
__device__ PixDiff(const uchar* ls)
{
l = *ls;
}
__device__ float pixDiff(const uchar3& l, const uchar3& r)
__device__ float operator()(const uchar* rs) const
{
return abs((int)l - *rs);
}
uchar l;
};
template <> struct PixDiff<3>
{
__device__ PixDiff(const uchar* ls)
{
l = *((uchar3*)ls);
}
__device__ float operator()(const uchar* rs) const
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
float val = tb * abs((int)l.x - rs[0]);
val += tg * abs((int)l.y - rs[1]);
val += tr * abs((int)l.z - rs[2]);
return val;
}
uchar3 l;
};
template <> struct PixDiff<4>
{
__device__ PixDiff(const uchar* ls)
{
l = *((uchar4*)ls);
}
__device__ float operator()(const uchar* rs) const
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
uchar4 r = *((uchar4*)rs);
float val = tb * abs((int)l.x - r.x);
val += tg * abs((int)l.y - r.y);
val += tr * abs((int)l.z - r.z);
return val;
}
__device__ float pixDiff(const uchar4& l, const uchar4& r)
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
uchar4 l;
};
float val = tb * abs((int)l.x - r.x);
val += tg * abs((int)l.y - r.y);
val += tr * abs((int)l.z - r.z);
return val;
}
template <typename T, typename D>
__global__ void comp_data(const DevMem2D_<T> left, const PtrStep_<T> right, PtrElemStep_<D> data)
template <int cn, typename D>
__global__ void comp_data(const DevMem2D left, const PtrStep right, PtrElemStep_<D> data)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < left.rows - 1 && x > 0 && x < left.cols - 1)
{
const T l = left.ptr(y)[x];
const T* rs = right.ptr(y) + x;
const uchar* ls = left.ptr(y) + x * cn;
const PixDiff<cn> pixDiff(ls);
const uchar* rs = right.ptr(y) + x * cn;
D* ds = data.ptr(y) + x;
const size_t disp_step = data.step * left.rows;
@ -120,7 +148,7 @@ namespace cv { namespace gpu { namespace bp
{
if (x - disp >= 1)
{
float val = pixDiff(l, rs[-disp]);
float val = pixDiff(rs - disp * cn);
ds[disp * disp_step] = saturate_cast<D>(fmin(cdata_weight * val, cdata_weight * cmax_data_term));
}
@ -133,7 +161,9 @@ namespace cv { namespace gpu { namespace bp
}
template<typename T, typename D>
void comp_data_gpu(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
void comp_data_gpu(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
template <> void comp_data_gpu<uchar, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
@ -141,20 +171,78 @@ namespace cv { namespace gpu { namespace bp
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<T, D><<<grid, threads, 0, stream>>>((DevMem2D_<T>)left, (DevMem2D_<T>)right, (DevMem2D_<D>)data);
comp_data<1, short><<<grid, threads, 0, stream>>>(left, right, (DevMem2D_<short>)data);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
template <> void comp_data_gpu<uchar, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<1, float><<<grid, threads, 0, stream>>>(left, right, (DevMem2D_<float>)data);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
template void comp_data_gpu<uchar, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
template void comp_data_gpu<uchar, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
template <> void comp_data_gpu<uchar3, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
template void comp_data_gpu<uchar3, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
template void comp_data_gpu<uchar3, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
template void comp_data_gpu<uchar4, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
template void comp_data_gpu<uchar4, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream);
comp_data<3, short><<<grid, threads, 0, stream>>>(left, right, (DevMem2D_<short>)data);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
template <> void comp_data_gpu<uchar3, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<3, float><<<grid, threads, 0, stream>>>(left, right, (DevMem2D_<float>)data);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
template <> void comp_data_gpu<uchar4, short>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<4, short><<<grid, threads, 0, stream>>>(left, right, (DevMem2D_<short>)data);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
template <> void comp_data_gpu<uchar4, float>(const DevMem2D& left, const DevMem2D& right, const DevMem2D& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<4, float><<<grid, threads, 0, stream>>>(left, right, (DevMem2D_<float>)data);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
///////////////////////////////////////////////////////////////
//////////////////////// data step down ///////////////////////