changed StereoBeliefPropagation_GPU output disparity default type to CV_32S
This commit is contained in:
parent
7083f0f815
commit
63fed0f831
@ -387,8 +387,8 @@ namespace cv
|
||||
//! number of levels, truncation of discontinuity cost, truncation of data cost and weighting of data cost.
|
||||
StereoBeliefPropagation_GPU(int ndisp, int iters, int levels, float disc_cost, float data_cost, float lambda);
|
||||
|
||||
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
|
||||
//! Output disparity has CV_8U type.
|
||||
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,
|
||||
//! if disparity is empty output type will be CV_32S else output type will be disparity.type().
|
||||
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);
|
||||
|
||||
//! Acync version
|
||||
@ -409,8 +409,8 @@ namespace cv
|
||||
float lambda;
|
||||
private:
|
||||
GpuMat u, d, l, r, u2, d2, l2, r2;
|
||||
|
||||
std::vector<GpuMat> datas;
|
||||
std::vector<GpuMat> datas;
|
||||
GpuMat out;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ static const float DEFAULT_DATA_COST = 10.0f;
|
||||
static const float DEFAULT_LAMBDA_COST = 0.07f;
|
||||
|
||||
typedef DevMem2D_<float> DevMem2Df;
|
||||
typedef DevMem2D_<int> DevMem2Di;
|
||||
|
||||
namespace cv { namespace gpu { namespace impl {
|
||||
extern "C" void load_constants(int ndisp, float disc_cost, float data_cost, float lambda);
|
||||
@ -70,31 +71,27 @@ namespace cv { namespace gpu { namespace impl {
|
||||
extern "C" void data_down_kernel_caller(int dst_cols, int dst_rows, int src_rows, const DevMem2Df& src, DevMem2Df dst, const cudaStream_t& stream);
|
||||
extern "C" void level_up(int dst_idx, int dst_cols, int dst_rows, int src_rows, DevMem2Df* mu, DevMem2Df* md, DevMem2Df* ml, DevMem2Df* mr, const cudaStream_t& stream);
|
||||
extern "C" void call_all_iterations(int cols, int rows, int iters, DevMem2Df& u, DevMem2Df& d, DevMem2Df& l, DevMem2Df& r, const DevMem2Df& data, const cudaStream_t& stream);
|
||||
extern "C" void output_caller(const DevMem2Df& u, const DevMem2Df& d, const DevMem2Df& l, const DevMem2Df& r, const DevMem2Df& data, DevMem2D disp, const cudaStream_t& stream);
|
||||
extern "C" void output_caller(const DevMem2Df& u, const DevMem2Df& d, const DevMem2Df& l, const DevMem2Df& r, const DevMem2Df& data, DevMem2Di disp, const cudaStream_t& stream);
|
||||
}}}
|
||||
|
||||
cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_)
|
||||
: ndisp(ndisp_), iters(iters_), levels(levels_), disc_cost(DEFAULT_DISC_COST), data_cost(DEFAULT_DATA_COST), lambda(DEFAULT_LAMBDA_COST), datas(levels_)
|
||||
{
|
||||
const int max_supported_ndisp = 1 << (sizeof(unsigned char) * 8);
|
||||
|
||||
CV_Assert(0 < ndisp && ndisp <= max_supported_ndisp);
|
||||
CV_Assert(0 < ndisp);
|
||||
CV_Assert(ndisp % 8 == 0);
|
||||
}
|
||||
|
||||
cv::gpu::StereoBeliefPropagation_GPU::StereoBeliefPropagation_GPU(int ndisp_, int iters_, int levels_, float disc_cost_, float data_cost_, float lambda_)
|
||||
: ndisp(ndisp_), iters(iters_), levels(levels_), disc_cost(disc_cost_), data_cost(data_cost_), lambda(lambda_), datas(levels_)
|
||||
{
|
||||
const int max_supported_ndisp = 1 << (sizeof(unsigned char) * 8);
|
||||
|
||||
CV_Assert(0 < ndisp && ndisp <= max_supported_ndisp);
|
||||
CV_Assert(0 < ndisp);
|
||||
CV_Assert(ndisp % 8 == 0);
|
||||
}
|
||||
|
||||
static void stereo_bp_gpu_operator(int ndisp, int iters, int levels, float disc_cost, float data_cost, float lambda,
|
||||
GpuMat& u, GpuMat& d, GpuMat& l, GpuMat& r,
|
||||
GpuMat& u2, GpuMat& d2, GpuMat& l2, GpuMat& r2,
|
||||
vector<GpuMat>& datas,
|
||||
GpuMat& u2, GpuMat& d2, GpuMat& l2, GpuMat& r2,
|
||||
vector<GpuMat>& datas, GpuMat& out,
|
||||
const GpuMat& left, const GpuMat& right, GpuMat& disp,
|
||||
const cudaStream_t& stream)
|
||||
{
|
||||
@ -111,8 +108,6 @@ static void stereo_bp_gpu_operator(int ndisp, int iters, int levels, float disc_
|
||||
const int min_image_dim_size = 20;
|
||||
CV_Assert(min(lowest_cols, lowest_rows) > min_image_dim_size);
|
||||
|
||||
disp.create(rows, cols, CV_8U);
|
||||
|
||||
u.create(rows * ndisp, cols, CV_32F);
|
||||
d.create(rows * ndisp, cols, CV_32F);
|
||||
l.create(rows * ndisp, cols, CV_32F);
|
||||
@ -146,10 +141,16 @@ static void stereo_bp_gpu_operator(int ndisp, int iters, int levels, float disc_
|
||||
}
|
||||
|
||||
impl::load_constants(ndisp, disc_cost, data_cost, lambda);
|
||||
|
||||
vector<int> cols_all(levels);
|
||||
vector<int> rows_all(levels);
|
||||
vector<int> iters_all(levels);
|
||||
|
||||
datas.resize(levels);
|
||||
|
||||
AutoBuffer<int> cols_all_buf(levels);
|
||||
AutoBuffer<int> rows_all_buf(levels);
|
||||
AutoBuffer<int> iters_all_buf(levels);
|
||||
|
||||
int *cols_all = cols_all_buf;
|
||||
int *rows_all = rows_all_buf;
|
||||
int *iters_all = iters_all_buf;
|
||||
|
||||
cols_all[0] = cols;
|
||||
rows_all[0] = rows;
|
||||
@ -190,18 +191,34 @@ static void stereo_bp_gpu_operator(int ndisp, int iters, int levels, float disc_
|
||||
|
||||
mem_idx = (mem_idx + 1) & 1;
|
||||
}
|
||||
|
||||
if (disp.empty())
|
||||
disp.create(rows, cols, CV_32S);
|
||||
|
||||
impl::output_caller(u, d, l, r, datas.front(), disp, stream);
|
||||
if (disp.type() == CV_32S)
|
||||
{
|
||||
disp = zero;
|
||||
impl::output_caller(u, d, l, r, datas.front(), disp, stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
out.create(rows, cols, CV_32S);
|
||||
out = zero;
|
||||
|
||||
impl::output_caller(u, d, l, r, datas.front(), out, stream);
|
||||
|
||||
out.convertTo(disp, disp.type());
|
||||
}
|
||||
}
|
||||
|
||||
void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp)
|
||||
{
|
||||
::stereo_bp_gpu_operator(ndisp, iters, levels, disc_cost, data_cost, lambda, u, d, l, r, u2, d2, l2, r2, datas, left, right, disp, 0);
|
||||
::stereo_bp_gpu_operator(ndisp, iters, levels, disc_cost, data_cost, lambda, u, d, l, r, u2, d2, l2, r2, datas, out, left, right, disp, 0);
|
||||
}
|
||||
|
||||
void cv::gpu::StereoBeliefPropagation_GPU::operator()(const GpuMat& left, const GpuMat& right, GpuMat& disp, const CudaStream& stream)
|
||||
{
|
||||
::stereo_bp_gpu_operator(ndisp, iters, levels, disc_cost, data_cost, lambda, u, d, l, r, u2, d2, l2, r2, datas, left, right, disp, StreamAccessor::getStream(stream));
|
||||
::stereo_bp_gpu_operator(ndisp, iters, levels, disc_cost, data_cost, lambda, u, d, l, r, u2, d2, l2, r2, datas, out, left, right, disp, StreamAccessor::getStream(stream));
|
||||
}
|
||||
|
||||
bool cv::gpu::StereoBeliefPropagation_GPU::checkIfGpuCallReasonable()
|
||||
|
@ -353,42 +353,41 @@ namespace cv { namespace gpu { namespace impl {
|
||||
|
||||
namespace beliefpropagation_gpu
|
||||
{
|
||||
__global__ void output(int cols, int rows, float *u, float *d, float *l, float *r, float* data, size_t step, unsigned char *disp, size_t res_step)
|
||||
__global__ void output(int cols, int rows, float *u, float *d, float *l, float *r, float* data, size_t step, int *disp, size_t res_step)
|
||||
{
|
||||
int x = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int y = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (y > 0 && y < rows - 1)
|
||||
if (x > 0 && x < cols - 1)
|
||||
if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1)
|
||||
{
|
||||
float *us = u + (y + 1) * step + x;
|
||||
float *ds = d + (y - 1) * step + x;
|
||||
float *ls = l + y * step + (x + 1);
|
||||
float *rs = r + y * step + (x - 1);
|
||||
float *dt = data + y * step + x;
|
||||
|
||||
size_t disp_step = rows * step;
|
||||
|
||||
int best = 0;
|
||||
float best_val = FLT_MAX;
|
||||
for (int d = 0; d < cndisp; ++d)
|
||||
{
|
||||
float *us = u + (y + 1) * step + x;
|
||||
float *ds = d + (y - 1) * step + x;
|
||||
float *ls = l + y * step + (x + 1);
|
||||
float *rs = r + y * step + (x - 1);
|
||||
float *dt = data + y * step + x;
|
||||
float val = us[d * disp_step] + ds[d * disp_step] + ls[d * disp_step] + rs[d * disp_step] + dt[d * disp_step];
|
||||
|
||||
size_t disp_step = rows * step;
|
||||
|
||||
int best = 0;
|
||||
float best_val = FLT_MAX;
|
||||
for (int d = 0; d < cndisp; ++d)
|
||||
if (val < best_val)
|
||||
{
|
||||
float val = us[d * disp_step] + ds[d * disp_step] + ls[d * disp_step] + rs[d * disp_step] + dt[d * disp_step];
|
||||
|
||||
if (val < best_val)
|
||||
{
|
||||
best_val = val;
|
||||
best = d;
|
||||
}
|
||||
best_val = val;
|
||||
best = d;
|
||||
}
|
||||
|
||||
disp[res_step * y + x] = best & 0xFF;
|
||||
}
|
||||
|
||||
disp[res_step * y + x] = best;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespace cv { namespace gpu { namespace impl {
|
||||
extern "C" void output_caller(const DevMem2D_<float>& u, const DevMem2D_<float>& d, const DevMem2D_<float>& l, const DevMem2D_<float>& r, const DevMem2D_<float>& data, DevMem2D disp, const cudaStream_t& stream)
|
||||
extern "C" void output_caller(const DevMem2D_<float>& u, const DevMem2D_<float>& d, const DevMem2D_<float>& l, const DevMem2D_<float>& r, const DevMem2D_<float>& data, DevMem2D_<int> disp, const cudaStream_t& stream)
|
||||
{
|
||||
dim3 threads(32, 8, 1);
|
||||
dim3 grid(1, 1, 1);
|
||||
@ -398,12 +397,12 @@ namespace cv { namespace gpu { namespace impl {
|
||||
|
||||
if (stream == 0)
|
||||
{
|
||||
beliefpropagation_gpu::output<<<grid, threads>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step);
|
||||
beliefpropagation_gpu::output<<<grid, threads>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step/sizeof(int));
|
||||
cudaSafeCall( cudaThreadSynchronize() );
|
||||
}
|
||||
else
|
||||
{
|
||||
beliefpropagation_gpu::output<<<grid, threads, 0, stream>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step);
|
||||
beliefpropagation_gpu::output<<<grid, threads, 0, stream>>>(disp.cols, disp.rows, u.ptr, d.ptr, l.ptr, r.ptr, data.ptr, u.step/sizeof(float), disp.ptr, disp.step/sizeof(int));
|
||||
}
|
||||
}
|
||||
}}}
|
Loading…
x
Reference in New Issue
Block a user