switched more to doubles
This commit is contained in:
parent
a76a34d28e
commit
b7cb3fe8e0
@ -571,24 +571,24 @@ that can extract the intrinsic parameters from ``field of view``, ``intrinsic ma
|
||||
class CV_EXPORTS Camera
|
||||
{
|
||||
public:
|
||||
Camera(float f_x, float f_y, float c_x, float c_y, const Size &window_size);
|
||||
Camera(const Vec2f &fov, const Size &window_size);
|
||||
Camera(const cv::Matx33f &K, const Size &window_size);
|
||||
Camera(const cv::Matx44f &proj, const Size &window_size);
|
||||
Camera(double f_x, double f_y, double c_x, double c_y, const Size &window_size);
|
||||
Camera(const Vec2d &fov, const Size &window_size);
|
||||
Camera(const Matx33d &K, const Size &window_size);
|
||||
Camera(const Matx44d &proj, const Size &window_size);
|
||||
|
||||
inline const Vec2d & getClip() const { return clip_; }
|
||||
inline void setClip(const Vec2d &clip) { clip_ = clip; }
|
||||
inline const Vec2d & getClip() const;
|
||||
inline void setClip(const Vec2d &clip);
|
||||
|
||||
inline const Size & getWindowSize() const { return window_size_; }
|
||||
inline const Size & getWindowSize() const;
|
||||
void setWindowSize(const Size &window_size);
|
||||
|
||||
inline const Vec2f & getFov() const { return fov_; }
|
||||
inline void setFov(const Vec2f & fov) { fov_ = fov; }
|
||||
inline const Vec2d & getFov() const;
|
||||
inline void setFov(const Vec2d & fov);
|
||||
|
||||
inline const Vec2f & getPrincipalPoint() const { return principal_point_; }
|
||||
inline const Vec2f & getFocalLength() const { return focal_; }
|
||||
inline const Vec2d & getPrincipalPoint() const;
|
||||
inline const Vec2d & getFocalLength() const;
|
||||
|
||||
void computeProjectionMatrix(Matx44f &proj) const;
|
||||
void computeProjectionMatrix(Matx44d &proj) const;
|
||||
|
||||
static Camera KinectCamera(const Size &window_size);
|
||||
|
||||
@ -600,7 +600,7 @@ viz::Camera::Camera
|
||||
-------------------
|
||||
Constructs a Camera.
|
||||
|
||||
.. ocv:function:: Camera(float f_x, float f_y, float c_x, float c_y, const Size &window_size)
|
||||
.. ocv:function:: Camera(double f_x, double f_y, double c_x, double c_y, const Size &window_size)
|
||||
|
||||
:param f_x: Horizontal focal length.
|
||||
:param f_y: Vertical focal length.
|
||||
@ -608,19 +608,19 @@ Constructs a Camera.
|
||||
:param c_y: y coordinate of the principal point.
|
||||
:param window_size: Size of the window. This together with focal length and principal point determines the field of view.
|
||||
|
||||
.. ocv:function:: Camera(const Vec2f &fov, const Size &window_size)
|
||||
.. ocv:function:: Camera(const Vec2d &fov, const Size &window_size)
|
||||
|
||||
:param fov: Field of view (horizontal, vertical)
|
||||
:param window_size: Size of the window.
|
||||
|
||||
Principal point is at the center of the window by default.
|
||||
|
||||
.. ocv:function:: Camera(const cv::Matx33f &K, const Size &window_size)
|
||||
.. ocv:function:: Camera(const Matx33d &K, const Size &window_size)
|
||||
|
||||
:param K: Intrinsic matrix of the camera.
|
||||
:param window_size: Size of the window. This together with intrinsic matrix determines the field of view.
|
||||
|
||||
.. ocv:function:: Camera(const cv::Matx44f &proj, const Size &window_size)
|
||||
.. ocv:function:: Camera(const Matx44d &proj, const Size &window_size)
|
||||
|
||||
:param proj: Projection matrix of the camera.
|
||||
:param window_size: Size of the window. This together with projection matrix determines the field of view.
|
||||
@ -629,7 +629,7 @@ viz::Camera::computeProjectionMatrix
|
||||
------------------------------------
|
||||
Computes projection matrix using intrinsic parameters of the camera.
|
||||
|
||||
.. ocv:function:: void computeProjectionMatrix(Matx44f &proj) const
|
||||
.. ocv:function:: void computeProjectionMatrix(Matx44d &proj) const
|
||||
|
||||
:param proj: Output projection matrix.
|
||||
|
||||
|
@ -118,10 +118,10 @@ namespace cv
|
||||
class CV_EXPORTS Camera
|
||||
{
|
||||
public:
|
||||
Camera(float fx, float fy, float cx, float cy, const Size &window_size);
|
||||
explicit Camera(const Vec2f &fov, const Size &window_size);
|
||||
explicit Camera(const Matx33f &K, const Size &window_size);
|
||||
explicit Camera(const Matx44f &proj, const Size &window_size);
|
||||
Camera(double fx, double fy, double cx, double cy, const Size &window_size);
|
||||
explicit Camera(const Vec2d &fov, const Size &window_size);
|
||||
explicit Camera(const Matx33d &K, const Size &window_size);
|
||||
explicit Camera(const Matx44d &proj, const Size &window_size);
|
||||
|
||||
inline const Vec2d & getClip() const { return clip_; }
|
||||
inline void setClip(const Vec2d &clip) { clip_ = clip; }
|
||||
@ -129,24 +129,24 @@ namespace cv
|
||||
inline const Size & getWindowSize() const { return window_size_; }
|
||||
void setWindowSize(const Size &window_size);
|
||||
|
||||
inline const Vec2f & getFov() const { return fov_; }
|
||||
inline void setFov(const Vec2f & fov) { fov_ = fov; }
|
||||
inline const Vec2d& getFov() const { return fov_; }
|
||||
inline void setFov(const Vec2d& fov) { fov_ = fov; }
|
||||
|
||||
inline const Vec2f & getPrincipalPoint() const { return principal_point_; }
|
||||
inline const Vec2f & getFocalLength() const { return focal_; }
|
||||
inline const Vec2d& getPrincipalPoint() const { return principal_point_; }
|
||||
inline const Vec2d& getFocalLength() const { return focal_; }
|
||||
|
||||
void computeProjectionMatrix(Matx44f &proj) const;
|
||||
void computeProjectionMatrix(Matx44d &proj) const;
|
||||
|
||||
static Camera KinectCamera(const Size &window_size);
|
||||
|
||||
private:
|
||||
void init(float fx, float fy, float cx, float cy, const Size &window_size);
|
||||
void init(double fx, double fy, double cx, double cy, const Size &window_size);
|
||||
|
||||
Vec2d clip_;
|
||||
Vec2f fov_;
|
||||
Vec2d fov_;
|
||||
Size window_size_;
|
||||
Vec2f principal_point_;
|
||||
Vec2f focal_;
|
||||
Vec2d principal_point_;
|
||||
Vec2d focal_;
|
||||
};
|
||||
|
||||
class CV_EXPORTS KeyboardEvent
|
||||
|
@ -469,7 +469,7 @@ void cv::viz::InteractorStyle::OnKeyDown()
|
||||
// if a valid transformation was found, use it otherwise fall back to default view point.
|
||||
if (found_transformation)
|
||||
{
|
||||
const vtkMatrix4x4* m = vtkProp3D::SafeDownCast(it->second)->GetUserMatrix();
|
||||
vtkMatrix4x4* m = vtkProp3D::SafeDownCast(it->second)->GetUserMatrix();
|
||||
|
||||
cam->SetFocalPoint(m->GetElement(0, 3) - m->GetElement(0, 2),
|
||||
m->GetElement(1, 3) - m->GetElement(1, 2),
|
||||
|
@ -1195,7 +1195,7 @@ namespace cv { namespace viz { namespace
|
||||
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
|
||||
transform->PreMultiply();
|
||||
vtkSmartPointer<vtkMatrix4x4> mat_trans = vtkSmartPointer<vtkMatrix4x4>::New();
|
||||
mat_trans = convertToVtkMatrix(path[i].matrix);
|
||||
mat_trans = vtkmatrix(path[i].matrix);
|
||||
transform->SetMatrix(mat_trans);
|
||||
|
||||
vtkSmartPointer<vtkTransformPolyDataFilter> filter = vtkSmartPointer<vtkTransformPolyDataFilter>::New();
|
||||
|
@ -136,12 +136,12 @@ cv::viz::Mesh3d cv::viz::Mesh3d::load(const String& file)
|
||||
////////////////////////////////////////////////////////////////////
|
||||
/// Camera implementation
|
||||
|
||||
cv::viz::Camera::Camera(float fx, float fy, float cx, float cy, const Size &window_size)
|
||||
cv::viz::Camera::Camera(double fx, double fy, double cx, double cy, const Size &window_size)
|
||||
{
|
||||
init(fx, fy, cx, cy, window_size);
|
||||
}
|
||||
|
||||
cv::viz::Camera::Camera(const Vec2f &fov, const Size &window_size)
|
||||
cv::viz::Camera::Camera(const Vec2d &fov, const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
setClip(Vec2d(0.01, 1000.01)); // Default clipping
|
||||
@ -152,16 +152,16 @@ cv::viz::Camera::Camera(const Vec2f &fov, const Size &window_size)
|
||||
focal_ = Vec2f(principal_point_[0] / tan(fov_[0]*0.5f), principal_point_[1] / tan(fov_[1]*0.5f));
|
||||
}
|
||||
|
||||
cv::viz::Camera::Camera(const cv::Matx33f & K, const Size &window_size)
|
||||
cv::viz::Camera::Camera(const cv::Matx33d & K, const Size &window_size)
|
||||
{
|
||||
float f_x = K(0,0);
|
||||
float f_y = K(1,1);
|
||||
float c_x = K(0,2);
|
||||
float c_y = K(1,2);
|
||||
double f_x = K(0,0);
|
||||
double f_y = K(1,1);
|
||||
double c_x = K(0,2);
|
||||
double c_y = K(1,2);
|
||||
init(f_x, f_y, c_x, c_y, window_size);
|
||||
}
|
||||
|
||||
cv::viz::Camera::Camera(const Matx44f &proj, const Size &window_size)
|
||||
cv::viz::Camera::Camera(const Matx44d &proj, const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
|
||||
@ -174,28 +174,26 @@ cv::viz::Camera::Camera(const Matx44f &proj, const Size &window_size)
|
||||
|
||||
double epsilon = 2.2204460492503131e-16;
|
||||
|
||||
if (fabs(left-right) < epsilon) principal_point_[0] = static_cast<float>(window_size.width) * 0.5f;
|
||||
else principal_point_[0] = (left * static_cast<float>(window_size.width)) / (left - right);
|
||||
focal_[0] = -near * principal_point_[0] / left;
|
||||
principal_point_[0] = fabs(left-right) < epsilon ? window_size.width * 0.5 : (left * window_size.width) / (left - right);
|
||||
principal_point_[1] = fabs(top-bottom) < epsilon ? window_size.height * 0.5 : (top * window_size.height) / (top - bottom);
|
||||
|
||||
if (fabs(top-bottom) < epsilon) principal_point_[1] = static_cast<float>(window_size.height) * 0.5f;
|
||||
else principal_point_[1] = (top * static_cast<float>(window_size.height)) / (top - bottom);
|
||||
focal_[1] = near * principal_point_[1] / top;
|
||||
focal_[0] = -near * principal_point_[0] / left;
|
||||
focal_[1] = near * principal_point_[1] / top;
|
||||
|
||||
setClip(Vec2d(near, far));
|
||||
fov_[0] = (atan2(principal_point_[0],focal_[0]) + atan2(window_size.width-principal_point_[0],focal_[0]));
|
||||
fov_[1] = (atan2(principal_point_[1],focal_[1]) + atan2(window_size.height-principal_point_[1],focal_[1]));
|
||||
fov_[0] = atan2(principal_point_[0], focal_[0]) + atan2(window_size.width-principal_point_[0], focal_[0]);
|
||||
fov_[1] = atan2(principal_point_[1], focal_[1]) + atan2(window_size.height-principal_point_[1], focal_[1]);
|
||||
|
||||
window_size_ = window_size;
|
||||
}
|
||||
|
||||
void cv::viz::Camera::init(float fx, float fy, float cx, float cy, const Size &window_size)
|
||||
void cv::viz::Camera::init(double fx, double fy, double cx, double cy, const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
setClip(Vec2d(0.01, 1000.01));// Default clipping
|
||||
|
||||
fov_[0] = (atan2(cx,fx) + atan2(window_size.width-cx,fx));
|
||||
fov_[1] = (atan2(cy,fy) + atan2(window_size.height-cy,fy));
|
||||
fov_[0] = atan2(cx, fx) + atan2(window_size.width - cx, fx);
|
||||
fov_[1] = atan2(cy, fy) + atan2(window_size.height - cy, fy);
|
||||
|
||||
principal_point_[0] = cx;
|
||||
principal_point_[1] = cy;
|
||||
@ -223,7 +221,7 @@ void cv::viz::Camera::setWindowSize(const Size &window_size)
|
||||
window_size_ = window_size;
|
||||
}
|
||||
|
||||
void cv::viz::Camera::computeProjectionMatrix(Matx44f &proj) const
|
||||
void cv::viz::Camera::computeProjectionMatrix(Matx44d &proj) const
|
||||
{
|
||||
double top = clip_[0] * principal_point_[1] / focal_[1];
|
||||
double left = -clip_[0] * principal_point_[0] / focal_[0];
|
||||
@ -247,6 +245,6 @@ void cv::viz::Camera::computeProjectionMatrix(Matx44f &proj) const
|
||||
|
||||
cv::viz::Camera cv::viz::Camera::KinectCamera(const Size &window_size)
|
||||
{
|
||||
Matx33f K(525.f, 0.f, 320.f, 0.f, 525.f, 240.f, 0.f, 0.f, 1.f) ;
|
||||
Matx33d K(525.0, 0.0, 320.0, 0.0, 525.0, 240.0, 0.0, 0.0, 1.0) ;
|
||||
return Camera(K, window_size);
|
||||
}
|
||||
|
@ -64,24 +64,6 @@ cv::Affine3d cv::viz::makeCameraPose(const Vec3d& position, const Vec3d& focal_p
|
||||
return makeTransformToGlobal(u, v, n, position);
|
||||
}
|
||||
|
||||
vtkSmartPointer<vtkMatrix4x4> cv::viz::convertToVtkMatrix(const cv::Matx44f &m)
|
||||
{
|
||||
vtkSmartPointer<vtkMatrix4x4> vtk_matrix = vtkSmartPointer<vtkMatrix4x4>::New();
|
||||
for (int i = 0; i < 4; i++)
|
||||
for (int k = 0; k < 4; k++)
|
||||
vtk_matrix->SetElement(i, k, m(i, k));
|
||||
return vtk_matrix;
|
||||
}
|
||||
|
||||
cv::Matx44f cv::viz::convertToMatx(const vtkSmartPointer<vtkMatrix4x4>& vtk_matrix)
|
||||
{
|
||||
cv::Matx44f m;
|
||||
for (int i = 0; i < 4; i++)
|
||||
for (int k = 0; k < 4; k++)
|
||||
m(i, k) = vtk_matrix->GetElement(i, k);
|
||||
return m;
|
||||
}
|
||||
|
||||
namespace cv { namespace viz
|
||||
{
|
||||
template<typename _Tp> Vec<_Tp, 3>* vtkpoints_data(vtkSmartPointer<vtkPoints>& points);
|
||||
|
@ -134,7 +134,7 @@ void cv::viz::Viz3d::VizImpl::showWidget(const String &id, const Widget &widget,
|
||||
if (actor)
|
||||
{
|
||||
// If the actor is 3D, apply pose
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = convertToVtkMatrix(pose.matrix);
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = vtkmatrix(pose.matrix);
|
||||
actor->SetUserMatrix(matrix);
|
||||
actor->Modified();
|
||||
}
|
||||
@ -181,7 +181,7 @@ void cv::viz::Viz3d::VizImpl::setWidgetPose(const String &id, const Affine3d &po
|
||||
vtkProp3D *actor = vtkProp3D::SafeDownCast(wam_itr->second);
|
||||
CV_Assert("Widget is not 3D." && actor);
|
||||
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = convertToVtkMatrix(pose.matrix);
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = vtkmatrix(pose.matrix);
|
||||
actor->SetUserMatrix(matrix);
|
||||
actor->Modified();
|
||||
}
|
||||
@ -202,9 +202,8 @@ void cv::viz::Viz3d::VizImpl::updateWidgetPose(const String &id, const Affine3d
|
||||
setWidgetPose(id, pose);
|
||||
return ;
|
||||
}
|
||||
Matx44d matrix_cv = convertToMatx(matrix);
|
||||
Affine3d updated_pose = pose * Affine3d(matrix_cv);
|
||||
matrix = convertToVtkMatrix(updated_pose.matrix);
|
||||
Affine3d updated_pose = pose * Affine3d(*matrix->Element);
|
||||
matrix = vtkmatrix(updated_pose.matrix);
|
||||
|
||||
actor->SetUserMatrix(matrix);
|
||||
actor->Modified();
|
||||
@ -220,9 +219,7 @@ cv::Affine3d cv::viz::Viz3d::VizImpl::getWidgetPose(const String &id) const
|
||||
vtkProp3D *actor = vtkProp3D::SafeDownCast(wam_itr->second);
|
||||
CV_Assert("Widget is not 3D." && actor);
|
||||
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = actor->GetUserMatrix();
|
||||
Matx44d matrix_cv = convertToMatx(matrix);
|
||||
return Affine3d(matrix_cv);
|
||||
return Affine3d(*actor->GetUserMatrix()->Element);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -315,21 +312,23 @@ void cv::viz::Viz3d::VizImpl::setBackgroundColor(const Color& color)
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCamera(const Camera &camera)
|
||||
{
|
||||
vtkCamera& active_camera = *renderer_->GetActiveCamera();
|
||||
vtkSmartPointer<vtkCamera> active_camera = renderer_->GetActiveCamera();
|
||||
|
||||
// Set the intrinsic parameters of the camera
|
||||
window_->SetSize(camera.getWindowSize().width, camera.getWindowSize().height);
|
||||
double aspect_ratio = static_cast<double>(camera.getWindowSize().width)/static_cast<double>(camera.getWindowSize().height);
|
||||
|
||||
Matx44f proj_mat;
|
||||
Matx44d proj_mat;
|
||||
camera.computeProjectionMatrix(proj_mat);
|
||||
|
||||
// Use the intrinsic parameters of the camera to simulate more realistically
|
||||
Matx44f old_proj_mat = convertToMatx(active_camera.GetProjectionTransformMatrix(aspect_ratio, -1.0, 1.0));
|
||||
vtkTransform *transform = vtkTransform::New();
|
||||
vtkSmartPointer<vtkMatrix4x4> vtk_matrix = active_camera->GetProjectionTransformMatrix(aspect_ratio, -1.0, 1.0);
|
||||
Matx44d old_proj_mat(*vtk_matrix->Element);
|
||||
|
||||
// This is a hack around not being able to set Projection Matrix
|
||||
transform->SetMatrix(convertToVtkMatrix(proj_mat * old_proj_mat.inv()));
|
||||
active_camera.SetUserTransform(transform);
|
||||
transform->Delete();
|
||||
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
|
||||
transform->SetMatrix(vtkmatrix(proj_mat * old_proj_mat.inv()));
|
||||
active_camera->SetUserTransform(transform);
|
||||
|
||||
renderer_->ResetCameraClippingRange();
|
||||
renderer_->Render();
|
||||
@ -338,15 +337,14 @@ void cv::viz::Viz3d::VizImpl::setCamera(const Camera &camera)
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
cv::viz::Camera cv::viz::Viz3d::VizImpl::getCamera() const
|
||||
{
|
||||
vtkCamera& active_camera = *renderer_->GetActiveCamera();
|
||||
vtkSmartPointer<vtkCamera> active_camera = renderer_->GetActiveCamera();
|
||||
|
||||
Size window_size(renderer_->GetRenderWindow()->GetSize()[0],
|
||||
renderer_->GetRenderWindow()->GetSize()[1]);
|
||||
double aspect_ratio = window_size.width / (double)window_size.height;
|
||||
|
||||
Matx44f proj_matrix = convertToMatx(active_camera.GetProjectionTransformMatrix(aspect_ratio, -1.0f, 1.0f));
|
||||
Camera camera(proj_matrix, window_size);
|
||||
return camera;
|
||||
vtkSmartPointer<vtkMatrix4x4> proj_matrix = active_camera->GetProjectionTransformMatrix(aspect_ratio, -1.0f, 1.0f);
|
||||
return Camera(Matx44d(*proj_matrix->Element), window_size);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -180,19 +180,22 @@ private:
|
||||
};
|
||||
|
||||
|
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace viz
|
||||
{
|
||||
vtkSmartPointer<vtkMatrix4x4> convertToVtkMatrix(const cv::Matx44f &m);
|
||||
cv::Matx44f convertToMatx(const vtkSmartPointer<vtkMatrix4x4>& vtk_matrix);
|
||||
inline vtkSmartPointer<vtkMatrix4x4> vtkmatrix(const cv::Matx44d &matrix)
|
||||
{
|
||||
vtkSmartPointer<vtkMatrix4x4> vtk_matrix = vtkSmartPointer<vtkMatrix4x4>::New();
|
||||
vtk_matrix->DeepCopy(matrix.val);
|
||||
return vtk_matrix;
|
||||
}
|
||||
|
||||
struct color_tag {};
|
||||
struct gray_tag {};
|
||||
|
||||
static Vec3b fetchRgb(const unsigned char* color, color_tag) { return Vec3b(color[2], color[1], color[0]); }
|
||||
static Vec3b fetchRgb(const unsigned char* color, gray_tag) { return Vec3b(color[0], color[0], color[0]); }
|
||||
inline Vec3b fetchRgb(const unsigned char* color, color_tag) { return Vec3b(color[2], color[1], color[0]); }
|
||||
inline Vec3b fetchRgb(const unsigned char* color, gray_tag) { return Vec3b(color[0], color[0], color[0]); }
|
||||
|
||||
inline Vec3d vtkpoint(const Point3f& point) { return Vec3d(point.x, point.y, point.z); }
|
||||
template<typename _Tp> inline _Tp normalized(const _Tp& v) { return v * 1/cv::norm(v); }
|
||||
@ -204,82 +207,6 @@ namespace cv
|
||||
return scaled_color;
|
||||
}
|
||||
|
||||
struct NanFilter
|
||||
{
|
||||
template<typename _Msk>
|
||||
struct Impl
|
||||
{
|
||||
template<typename _Tp>
|
||||
static Vec<_Tp, 3>* copy(const Mat& source, Vec<_Tp, 3>* output, const Mat& nan_mask)
|
||||
{
|
||||
CV_Assert(DataDepth<_Tp>::value == source.depth() && source.size() == nan_mask.size());
|
||||
CV_Assert(nan_mask.channels() == 3 || nan_mask.channels() == 4);
|
||||
CV_DbgAssert(DataDepth<_Msk>::value == nan_mask.depth());
|
||||
|
||||
int s_chs = source.channels();
|
||||
int m_chs = nan_mask.channels();
|
||||
|
||||
for (int y = 0; y < source.rows; ++y)
|
||||
{
|
||||
const _Tp* srow = source.ptr<_Tp>(y);
|
||||
const _Msk* mrow = nan_mask.ptr<_Msk>(y);
|
||||
|
||||
for (int x = 0; x < source.cols; ++x, srow += s_chs, mrow += m_chs)
|
||||
if (!isNan(mrow[0]) && !isNan(mrow[1]) && !isNan(mrow[2]))
|
||||
*output++ = Vec<_Tp, 3>(srow);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
template<typename _Tag>
|
||||
static Vec3b* copyColor(const Mat& source, Vec3b* output, const Mat& nan_mask)
|
||||
{
|
||||
CV_Assert(source.size() == nan_mask.size());
|
||||
CV_Assert(nan_mask.channels() == 3 || nan_mask.channels() == 4);
|
||||
CV_DbgAssert(DataDepth<_Msk>::value == nan_mask.depth());
|
||||
|
||||
int s_chs = source.channels();
|
||||
int m_chs = nan_mask.channels();
|
||||
|
||||
for (int y = 0; y < source.rows; ++y)
|
||||
{
|
||||
const unsigned char* srow = source.ptr<unsigned char>(y);
|
||||
const _Msk* mrow = nan_mask.ptr<_Msk>(y);
|
||||
|
||||
for (int x = 0; x < source.cols; ++x, srow += s_chs, mrow += m_chs)
|
||||
if (!isNan(mrow[0]) && !isNan(mrow[1]) && !isNan(mrow[2]))
|
||||
*output++ = fetchRgb(srow, _Tag());
|
||||
}
|
||||
return output;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename _Tp>
|
||||
static inline Vec<_Tp, 3>* copy(const Mat& source, Vec<_Tp, 3>* output, const Mat& nan_mask)
|
||||
{
|
||||
CV_Assert(nan_mask.depth() == CV_32F || nan_mask.depth() == CV_64F);
|
||||
|
||||
typedef Vec<_Tp, 3>* (*copy_func)(const Mat&, Vec<_Tp, 3>*, const Mat&);
|
||||
const static copy_func table[2] = { &NanFilter::Impl<float>::copy<_Tp>, &NanFilter::Impl<double>::copy<_Tp> };
|
||||
|
||||
return table[nan_mask.depth() - 5](source, output, nan_mask);
|
||||
}
|
||||
|
||||
static inline Vec3b* copyColor(const Mat& source, Vec3b* output, const Mat& nan_mask)
|
||||
{
|
||||
CV_Assert(nan_mask.depth() == CV_32F || nan_mask.depth() == CV_64F);
|
||||
|
||||
typedef Vec3b* (*copy_func)(const Mat&, Vec3b*, const Mat&);
|
||||
const static copy_func table[2][2] =
|
||||
{
|
||||
{ &NanFilter::Impl<float >::copyColor<gray_tag>, &NanFilter::Impl<float> ::copyColor<color_tag> },
|
||||
{ &NanFilter::Impl<double>::copyColor<gray_tag>, &NanFilter::Impl<double>::copyColor<color_tag> }
|
||||
};
|
||||
|
||||
return table[nan_mask.depth() - 5][source.channels() == 1 ? 0 : 1](source, output, nan_mask);
|
||||
}
|
||||
};
|
||||
|
||||
struct ConvertToVtkImage
|
||||
{
|
||||
struct Impl
|
||||
|
@ -244,7 +244,7 @@ void cv::viz::Widget3D::setPose(const Affine3d &pose)
|
||||
vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(*this));
|
||||
CV_Assert("Widget is not 3D." && actor);
|
||||
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = convertToVtkMatrix(pose.matrix);
|
||||
vtkSmartPointer<vtkMatrix4x4> matrix = vtkmatrix(pose.matrix);
|
||||
actor->SetUserMatrix(matrix);
|
||||
actor->Modified();
|
||||
}
|
||||
@ -261,8 +261,8 @@ void cv::viz::Widget3D::updatePose(const Affine3d &pose)
|
||||
return;
|
||||
}
|
||||
|
||||
Affine3d updated_pose = pose * Affine3d(convertToMatx(matrix));
|
||||
matrix = convertToVtkMatrix(updated_pose.matrix);
|
||||
Affine3d updated_pose = pose * Affine3d(*matrix->Element);
|
||||
matrix = vtkmatrix(updated_pose.matrix);
|
||||
|
||||
actor->SetUserMatrix(matrix);
|
||||
actor->Modified();
|
||||
@ -272,7 +272,7 @@ cv::Affine3d cv::viz::Widget3D::getPose() const
|
||||
{
|
||||
vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(*this));
|
||||
CV_Assert("Widget is not 3D." && actor);
|
||||
return Affine3d(convertToMatx(actor->GetUserMatrix()));
|
||||
return Affine3d(*actor->GetUserMatrix()->Element);
|
||||
}
|
||||
|
||||
void cv::viz::Widget3D::setColor(const Color &color)
|
||||
|
@ -50,7 +50,6 @@ TEST(Viz, DISABLED_show_cloud_bluberry)
|
||||
Mat dragon_cloud = readCloud(get_dragon_ply_file_path());
|
||||
|
||||
Viz3d viz("show_cloud_bluberry");
|
||||
viz.setBackgroundColor();
|
||||
viz.showWidget("coosys", WCoordinateSystem());
|
||||
viz.showWidget("dragon", WCloud(dragon_cloud, Color::bluberry()));
|
||||
viz.spin();
|
||||
@ -64,7 +63,6 @@ TEST(Viz, DISABLED_show_cloud_random_color)
|
||||
theRNG().fill(colors, RNG::UNIFORM, 0, 255);
|
||||
|
||||
Viz3d viz("show_cloud_random_color");
|
||||
viz.setBackgroundColor();
|
||||
viz.showWidget("coosys", WCoordinateSystem());
|
||||
viz.showWidget("dragon", WCloud(dragon_cloud, colors));
|
||||
viz.spin();
|
||||
@ -80,7 +78,6 @@ TEST(Viz, DISABLED_show_cloud_masked)
|
||||
dragon_cloud.at<Vec3f>(i) = qnan;
|
||||
|
||||
Viz3d viz("show_cloud_masked");
|
||||
viz.setBackgroundColor();
|
||||
viz.showWidget("coosys", WCoordinateSystem());
|
||||
viz.showWidget("dragon", WCloud(dragon_cloud));
|
||||
viz.spin();
|
||||
|
Loading…
x
Reference in New Issue
Block a user