Normalize line endings and whitespace
This commit is contained in:

committed by
Andrey Kamaev

parent
69020da607
commit
04384a71e4
@@ -3359,11 +3359,11 @@ void cv::projectPoints( InputArray _opoints,
|
||||
CvMat c_imagePoints = _ipoints.getMat();
|
||||
CvMat c_objectPoints = opoints;
|
||||
Mat cameraMatrix = _cameraMatrix.getMat();
|
||||
|
||||
|
||||
Mat rvec = _rvec.getMat(), tvec = _tvec.getMat();
|
||||
CvMat c_cameraMatrix = cameraMatrix;
|
||||
CvMat c_rvec = rvec, c_tvec = tvec;
|
||||
|
||||
|
||||
Mat distCoeffs = _distCoeffs.getMat();
|
||||
CvMat c_distCoeffs = distCoeffs;
|
||||
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
|
||||
|
@@ -52,8 +52,8 @@
|
||||
|
||||
class CirclesGridClusterFinder
|
||||
{
|
||||
CirclesGridClusterFinder& operator=(const CirclesGridClusterFinder&);
|
||||
CirclesGridClusterFinder(const CirclesGridClusterFinder&);
|
||||
CirclesGridClusterFinder& operator=(const CirclesGridClusterFinder&);
|
||||
CirclesGridClusterFinder(const CirclesGridClusterFinder&);
|
||||
public:
|
||||
CirclesGridClusterFinder(bool _isAsymmetricGrid)
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -9,30 +9,30 @@ class epnp {
|
||||
~epnp();
|
||||
|
||||
void add_correspondence(const double X, const double Y, const double Z,
|
||||
const double u, const double v);
|
||||
const double u, const double v);
|
||||
|
||||
void compute_pose(cv::Mat& R, cv::Mat& t);
|
||||
private:
|
||||
template <typename T>
|
||||
void init_camera_parameters(const cv::Mat& cameraMatrix)
|
||||
{
|
||||
uc = cameraMatrix.at<T> (0, 2);
|
||||
vc = cameraMatrix.at<T> (1, 2);
|
||||
fu = cameraMatrix.at<T> (0, 0);
|
||||
fv = cameraMatrix.at<T> (1, 1);
|
||||
{
|
||||
uc = cameraMatrix.at<T> (0, 2);
|
||||
vc = cameraMatrix.at<T> (1, 2);
|
||||
fu = cameraMatrix.at<T> (0, 0);
|
||||
fv = cameraMatrix.at<T> (1, 1);
|
||||
}
|
||||
template <typename OpointType, typename IpointType>
|
||||
void init_points(const cv::Mat& opoints, const cv::Mat& ipoints)
|
||||
{
|
||||
for(int i = 0; i < number_of_correspondences; i++)
|
||||
{
|
||||
pws[3 * i ] = opoints.at<OpointType>(0,i).x;
|
||||
pws[3 * i + 1] = opoints.at<OpointType>(0,i).y;
|
||||
pws[3 * i + 2] = opoints.at<OpointType>(0,i).z;
|
||||
for(int i = 0; i < number_of_correspondences; i++)
|
||||
{
|
||||
pws[3 * i ] = opoints.at<OpointType>(0,i).x;
|
||||
pws[3 * i + 1] = opoints.at<OpointType>(0,i).y;
|
||||
pws[3 * i + 2] = opoints.at<OpointType>(0,i).z;
|
||||
|
||||
us[2 * i ] = ipoints.at<IpointType>(0,i).x*fu + uc;
|
||||
us[2 * i + 1] = ipoints.at<IpointType>(0,i).y*fv + vc;
|
||||
}
|
||||
us[2 * i ] = ipoints.at<IpointType>(0,i).x*fu + uc;
|
||||
us[2 * i + 1] = ipoints.at<IpointType>(0,i).y*fv + vc;
|
||||
}
|
||||
}
|
||||
double reprojection_error(const double R[3][3], const double t[3]);
|
||||
void choose_control_points(void);
|
||||
@@ -56,15 +56,15 @@ class epnp {
|
||||
|
||||
void gauss_newton(const CvMat * L_6x10, const CvMat * Rho, double current_betas[4]);
|
||||
void compute_A_and_b_gauss_newton(const double * l_6x10, const double * rho,
|
||||
const double cb[4], CvMat * A, CvMat * b);
|
||||
const double cb[4], CvMat * A, CvMat * b);
|
||||
|
||||
double compute_R_and_t(const double * ut, const double * betas,
|
||||
double R[3][3], double t[3]);
|
||||
double R[3][3], double t[3]);
|
||||
|
||||
void estimate_R_and_t(double R[3][3], double t[3]);
|
||||
|
||||
void copy_R_and_t(const double R_dst[3][3], const double t_dst[3],
|
||||
double R_src[3][3], double t_src[3]);
|
||||
double R_src[3][3], double t_src[3]);
|
||||
|
||||
|
||||
double uc, vc, fu, fv;
|
||||
|
@@ -275,7 +275,7 @@ cvFindHomography( const CvMat* objectPoints, const CvMat* imagePoints,
|
||||
|
||||
if( result )
|
||||
cvConvert( &matH, __H );
|
||||
|
||||
|
||||
if( mask && tempMask )
|
||||
{
|
||||
if( CV_ARE_SIZES_EQ(mask, tempMask) )
|
||||
@@ -481,12 +481,12 @@ int CvFMEstimator::run8Point( const CvMat* _m1, const CvMat* _m2, CvMat* _fmatri
|
||||
|
||||
scale0 = sqrt(2.)/scale0;
|
||||
scale1 = sqrt(2.)/scale1;
|
||||
|
||||
|
||||
cvZero( &A );
|
||||
|
||||
// form a linear system Ax=0: for each selected pair of points m1 & m2,
|
||||
// the row of A(=a) represents the coefficients of equation: (m2, 1)'*F*(m1, 1) = 0
|
||||
// to save computation time, we compute (At*A) instead of A and then solve (At*A)x=0.
|
||||
// to save computation time, we compute (At*A) instead of A and then solve (At*A)x=0.
|
||||
for( i = 0; i < count; i++ )
|
||||
{
|
||||
double x0 = (m1[i].x - m0c.x)*scale0;
|
||||
@@ -561,7 +561,7 @@ void CvFMEstimator::computeReprojError( const CvMat* _m1, const CvMat* _m2,
|
||||
const CvPoint2D64f* m2 = (const CvPoint2D64f*)_m2->data.ptr;
|
||||
const double* F = model->data.db;
|
||||
float* err = _err->data.fl;
|
||||
|
||||
|
||||
for( i = 0; i < count; i++ )
|
||||
{
|
||||
double a, b, c, d1, d2, s1, s2;
|
||||
@@ -632,7 +632,7 @@ CV_IMPL int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
|
||||
param1 = 3;
|
||||
if( param2 < DBL_EPSILON || param2 > 1 - DBL_EPSILON )
|
||||
param2 = 0.99;
|
||||
|
||||
|
||||
if( (method & ~3) == CV_RANSAC && count >= 15 )
|
||||
result = estimator.runRANSAC(m1, m2, &_F3x3, tempMask, param1, param2 );
|
||||
else
|
||||
@@ -648,7 +648,7 @@ CV_IMPL int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
|
||||
|
||||
if( result )
|
||||
cvConvert( fmatrix->rows == 3 ? &_F3x3 : &_F9x3, fmatrix );
|
||||
|
||||
|
||||
if( mask && tempMask )
|
||||
{
|
||||
if( CV_ARE_SIZES_EQ(mask, tempMask) )
|
||||
@@ -1072,7 +1072,7 @@ cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
|
||||
int npoints = points1.checkVector(2);
|
||||
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
|
||||
points1.type() == points2.type());
|
||||
|
||||
|
||||
Mat H(3, 3, CV_64F);
|
||||
CvMat _pt1 = points1, _pt2 = points2;
|
||||
CvMat matH = H, c_mask, *p_mask = 0;
|
||||
@@ -1101,7 +1101,7 @@ cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
|
||||
int npoints = points1.checkVector(2);
|
||||
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
|
||||
points1.type() == points2.type());
|
||||
|
||||
|
||||
Mat F(method == CV_FM_7POINT ? 9 : 3, 3, CV_64F);
|
||||
CvMat _pt1 = points1, _pt2 = points2;
|
||||
CvMat matF = F, c_mask, *p_mask = 0;
|
||||
@@ -1133,7 +1133,7 @@ void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
|
||||
if( npoints < 0 )
|
||||
npoints = points.checkVector(3);
|
||||
CV_Assert( npoints >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S));
|
||||
|
||||
|
||||
_lines.create(npoints, 1, CV_32FC3, -1, true);
|
||||
CvMat c_points = points, c_lines = _lines.getMat(), c_F = F;
|
||||
cvComputeCorrespondEpilines(&c_points, whichImage, &c_F, &c_lines);
|
||||
@@ -1150,7 +1150,7 @@ void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
|
||||
cn = 4;
|
||||
}
|
||||
CV_Assert( npoints >= 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
|
||||
|
||||
|
||||
_dst.create(npoints, 1, CV_MAKETYPE(CV_32F, cn-1));
|
||||
CvMat c_src = src, c_dst = _dst.getMat();
|
||||
cvConvertPointsHomogeneous(&c_src, &c_dst);
|
||||
@@ -1167,7 +1167,7 @@ void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
|
||||
cn = 3;
|
||||
}
|
||||
CV_Assert( npoints >= 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
|
||||
|
||||
|
||||
_dst.create(npoints, 1, CV_MAKETYPE(CV_32F, cn+1));
|
||||
CvMat c_src = src, c_dst = _dst.getMat();
|
||||
cvConvertPointsHomogeneous(&c_src, &c_dst);
|
||||
@@ -1177,7 +1177,7 @@ void cv::convertPointsHomogeneous( InputArray _src, OutputArray _dst )
|
||||
{
|
||||
int stype = _src.type(), dtype = _dst.type();
|
||||
CV_Assert( _dst.fixedType() );
|
||||
|
||||
|
||||
if( CV_MAT_CN(stype) > CV_MAT_CN(dtype) )
|
||||
convertPointsFromHomogeneous(_src, _dst);
|
||||
else
|
||||
|
@@ -103,7 +103,7 @@ cvRANSACUpdateNumIters( double p, double ep,
|
||||
|
||||
num = log(num);
|
||||
denom = log(denom);
|
||||
|
||||
|
||||
return denom >= 0 || -num >= max_iters*(-denom) ?
|
||||
max_iters : cvRound(num/denom);
|
||||
}
|
||||
@@ -127,7 +127,7 @@ bool CvModelEstimator2::runRANSAC( const CvMat* m1, const CvMat* m2, CvMat* mode
|
||||
models = cvCreateMat( modelSize.height*maxBasicSolutions, modelSize.width, CV_64FC1 );
|
||||
err = cvCreateMat( 1, count, CV_32FC1 );
|
||||
tmask = cvCreateMat( 1, count, CV_8UC1 );
|
||||
|
||||
|
||||
if( count > modelPoints )
|
||||
{
|
||||
ms1 = cvCreateMat( 1, modelPoints, m1->type );
|
||||
@@ -207,7 +207,7 @@ bool CvModelEstimator2::runLMeDS( const CvMat* m1, const CvMat* m2, CvMat* model
|
||||
|
||||
models = cvCreateMat( modelSize.height*maxBasicSolutions, modelSize.width, CV_64FC1 );
|
||||
err = cvCreateMat( 1, count, CV_32FC1 );
|
||||
|
||||
|
||||
if( count > modelPoints )
|
||||
{
|
||||
ms1 = cvCreateMat( 1, modelPoints, m1->type );
|
||||
@@ -323,12 +323,12 @@ bool CvModelEstimator2::checkSubset( const CvMat* m, int count )
|
||||
CvPoint2D64f* ptr = (CvPoint2D64f*)m->data.ptr;
|
||||
|
||||
assert( CV_MAT_TYPE(m->type) == CV_64FC2 );
|
||||
|
||||
|
||||
if( checkPartialSubsets )
|
||||
i0 = i1 = count - 1;
|
||||
else
|
||||
i0 = 0, i1 = count - 1;
|
||||
|
||||
|
||||
for( i = i0; i <= i1; i++ )
|
||||
{
|
||||
// check that the i-th selected point does not belong
|
||||
@@ -362,16 +362,16 @@ class Affine3DEstimator : public CvModelEstimator2
|
||||
{
|
||||
public:
|
||||
Affine3DEstimator() : CvModelEstimator2(4, cvSize(4, 3), 1) {}
|
||||
virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );
|
||||
virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );
|
||||
protected:
|
||||
virtual void computeReprojError( const CvMat* m1, const CvMat* m2, const CvMat* model, CvMat* error );
|
||||
virtual void computeReprojError( const CvMat* m1, const CvMat* m2, const CvMat* model, CvMat* error );
|
||||
virtual bool checkSubset( const CvMat* ms1, int count );
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
int cv::Affine3DEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* model )
|
||||
{
|
||||
{
|
||||
const Point3d* from = reinterpret_cast<const Point3d*>(m1->data.ptr);
|
||||
const Point3d* to = reinterpret_cast<const Point3d*>(m2->data.ptr);
|
||||
|
||||
@@ -389,7 +389,7 @@ int cv::Affine3DEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* m
|
||||
aptr[3] = 1.0;
|
||||
*reinterpret_cast<Point3d*>(aptr) = from[i];
|
||||
aptr += 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CvMat cvA = A;
|
||||
@@ -397,7 +397,7 @@ int cv::Affine3DEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* m
|
||||
CvMat cvX;
|
||||
cvReshape(model, &cvX, 1, 12);
|
||||
cvSolve(&cvA, &cvB, &cvX, CV_SVD );
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -405,10 +405,10 @@ void cv::Affine3DEstimator::computeReprojError( const CvMat* m1, const CvMat* m2
|
||||
{
|
||||
int count = m1->rows * m1->cols;
|
||||
const Point3d* from = reinterpret_cast<const Point3d*>(m1->data.ptr);
|
||||
const Point3d* to = reinterpret_cast<const Point3d*>(m2->data.ptr);
|
||||
const Point3d* to = reinterpret_cast<const Point3d*>(m2->data.ptr);
|
||||
const double* F = model->data.db;
|
||||
float* err = error->data.fl;
|
||||
|
||||
|
||||
for(int i = 0; i < count; i++ )
|
||||
{
|
||||
const Point3d& f = from[i];
|
||||
@@ -418,7 +418,7 @@ void cv::Affine3DEstimator::computeReprojError( const CvMat* m1, const CvMat* m2
|
||||
double b = F[4]*f.x + F[5]*f.y + F[ 6]*f.z + F[ 7] - t.y;
|
||||
double c = F[8]*f.x + F[9]*f.y + F[10]*f.z + F[11] - t.z;
|
||||
|
||||
err[i] = (float)sqrt(a*a + b*b + c*c);
|
||||
err[i] = (float)sqrt(a*a + b*b + c*c);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,23 +427,23 @@ bool cv::Affine3DEstimator::checkSubset( const CvMat* ms1, int count )
|
||||
CV_Assert( CV_MAT_TYPE(ms1->type) == CV_64FC3 );
|
||||
|
||||
int j, k, i = count - 1;
|
||||
const Point3d* ptr = reinterpret_cast<const Point3d*>(ms1->data.ptr);
|
||||
|
||||
const Point3d* ptr = reinterpret_cast<const Point3d*>(ms1->data.ptr);
|
||||
|
||||
// check that the i-th selected point does not belong
|
||||
// to a line connecting some previously selected points
|
||||
|
||||
|
||||
for(j = 0; j < i; ++j)
|
||||
{
|
||||
Point3d d1 = ptr[j] - ptr[i];
|
||||
double n1 = norm(d1);
|
||||
|
||||
|
||||
for(k = 0; k < j; ++k)
|
||||
{
|
||||
Point3d d2 = ptr[k] - ptr[i];
|
||||
Point3d d2 = ptr[k] - ptr[i];
|
||||
double n = norm(d2) * n1;
|
||||
|
||||
if (fabs(d1.dot(d2) / n) > 0.996)
|
||||
break;
|
||||
break;
|
||||
}
|
||||
if( k < j )
|
||||
break;
|
||||
@@ -458,12 +458,12 @@ int cv::estimateAffine3D(InputArray _from, InputArray _to,
|
||||
{
|
||||
Mat from = _from.getMat(), to = _to.getMat();
|
||||
int count = from.checkVector(3, CV_32F);
|
||||
|
||||
|
||||
CV_Assert( count >= 0 && to.checkVector(3, CV_32F) == count );
|
||||
|
||||
_out.create(3, 4, CV_64F);
|
||||
Mat out = _out.getMat();
|
||||
|
||||
|
||||
_inliers.create(count, 1, CV_8U, -1, true);
|
||||
Mat inliers = _inliers.getMat();
|
||||
inliers = Scalar::all(1);
|
||||
@@ -471,15 +471,15 @@ int cv::estimateAffine3D(InputArray _from, InputArray _to,
|
||||
Mat dFrom, dTo;
|
||||
from.convertTo(dFrom, CV_64F);
|
||||
to.convertTo(dTo, CV_64F);
|
||||
|
||||
|
||||
CvMat F3x4 = out;
|
||||
CvMat mask = inliers;
|
||||
CvMat m1 = dFrom;
|
||||
CvMat m2 = dTo;
|
||||
|
||||
const double epsilon = numeric_limits<double>::epsilon();
|
||||
|
||||
const double epsilon = numeric_limits<double>::epsilon();
|
||||
param1 = param1 <= 0 ? 3 : param1;
|
||||
param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
|
||||
|
||||
return Affine3DEstimator().runRANSAC(&m1, &m2, &F3x4, &mask, param1, param2 );
|
||||
|
||||
return Affine3DEstimator().runRANSAC(&m1, &m2, &F3x4, &mask, param1, param2 );
|
||||
}
|
||||
|
@@ -23,26 +23,26 @@ class p3p
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void init_camera_parameters(const cv::Mat& cameraMatrix)
|
||||
{
|
||||
cx = cameraMatrix.at<T> (0, 2);
|
||||
cy = cameraMatrix.at<T> (1, 2);
|
||||
fx = cameraMatrix.at<T> (0, 0);
|
||||
fy = cameraMatrix.at<T> (1, 1);
|
||||
void init_camera_parameters(const cv::Mat& cameraMatrix)
|
||||
{
|
||||
cx = cameraMatrix.at<T> (0, 2);
|
||||
cy = cameraMatrix.at<T> (1, 2);
|
||||
fx = cameraMatrix.at<T> (0, 0);
|
||||
fy = cameraMatrix.at<T> (1, 1);
|
||||
}
|
||||
template <typename OpointType, typename IpointType>
|
||||
void extract_points(const cv::Mat& opoints, const cv::Mat& ipoints, std::vector<double>& points)
|
||||
{
|
||||
points.clear();
|
||||
points.resize(20);
|
||||
for(int i = 0; i < 4; i++)
|
||||
{
|
||||
points[i*5] = ipoints.at<IpointType>(0,i).x*fx + cx;
|
||||
points[i*5+1] = ipoints.at<IpointType>(0,i).y*fy + cy;
|
||||
points[i*5+2] = opoints.at<OpointType>(0,i).x;
|
||||
points[i*5+3] = opoints.at<OpointType>(0,i).y;
|
||||
points[i*5+4] = opoints.at<OpointType>(0,i).z;
|
||||
}
|
||||
points.clear();
|
||||
points.resize(20);
|
||||
for(int i = 0; i < 4; i++)
|
||||
{
|
||||
points[i*5] = ipoints.at<IpointType>(0,i).x*fx + cx;
|
||||
points[i*5+1] = ipoints.at<IpointType>(0,i).y*fy + cy;
|
||||
points[i*5+2] = opoints.at<OpointType>(0,i).x;
|
||||
points[i*5+3] = opoints.at<OpointType>(0,i).y;
|
||||
points[i*5+4] = opoints.at<OpointType>(0,i).z;
|
||||
}
|
||||
}
|
||||
void init_inverse_parameters();
|
||||
int solve_for_lengths(double lengths[4][3], double distances[3], double cosines[3]);
|
||||
|
@@ -37,7 +37,7 @@ int solve_deg3(double a, double b, double c, double d,
|
||||
if (b == 0) {
|
||||
// Solve first order system
|
||||
if (c == 0)
|
||||
return 0;
|
||||
return 0;
|
||||
|
||||
x0 = -d / c;
|
||||
return 1;
|
||||
|
@@ -3,7 +3,7 @@
|
||||
|
||||
int solve_deg2(double a, double b, double c, double & x1, double & x2);
|
||||
|
||||
int solve_deg3(double a, double b, double c, double d,
|
||||
int solve_deg3(double a, double b, double c, double d,
|
||||
double & x0, double & x1, double & x2);
|
||||
|
||||
int solve_deg4(double a, double b, double c, double d, double e,
|
||||
|
@@ -195,11 +195,11 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
|
||||
}
|
||||
|
||||
inorm = rotation[0] /*[0][0]*/ * rotation[0] /*[0][0]*/ +
|
||||
rotation[1] /*[0][1]*/ * rotation[1] /*[0][1]*/ +
|
||||
rotation[1] /*[0][1]*/ * rotation[1] /*[0][1]*/ +
|
||||
rotation[2] /*[0][2]*/ * rotation[2] /*[0][2]*/;
|
||||
|
||||
jnorm = rotation[3] /*[1][0]*/ * rotation[3] /*[1][0]*/ +
|
||||
rotation[4] /*[1][1]*/ * rotation[4] /*[1][1]*/ +
|
||||
rotation[4] /*[1][1]*/ * rotation[4] /*[1][1]*/ +
|
||||
rotation[5] /*[1][2]*/ * rotation[5] /*[1][2]*/;
|
||||
|
||||
invInorm = cvInvSqrt( inorm );
|
||||
@@ -219,10 +219,10 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
|
||||
/* row2 = row0 x row1 (cross product) */
|
||||
rotation[6] /*->m[2][0]*/ = rotation[1] /*->m[0][1]*/ * rotation[5] /*->m[1][2]*/ -
|
||||
rotation[2] /*->m[0][2]*/ * rotation[4] /*->m[1][1]*/;
|
||||
|
||||
|
||||
rotation[7] /*->m[2][1]*/ = rotation[2] /*->m[0][2]*/ * rotation[3] /*->m[1][0]*/ -
|
||||
rotation[0] /*->m[0][0]*/ * rotation[5] /*->m[1][2]*/;
|
||||
|
||||
|
||||
rotation[8] /*->m[2][2]*/ = rotation[0] /*->m[0][0]*/ * rotation[4] /*->m[1][1]*/ -
|
||||
rotation[1] /*->m[0][1]*/ * rotation[3] /*->m[1][0]*/;
|
||||
|
||||
@@ -249,16 +249,16 @@ static CvStatus icvReleasePOSITObject( CvPOSITObject ** ppObject )
|
||||
}
|
||||
|
||||
/*F///////////////////////////////////////////////////////////////////////////////////////
|
||||
// Name: icvPseudoInverse3D
|
||||
// Name: icvPseudoInverse3D
|
||||
// Purpose: Pseudoinverse N x 3 matrix N >= 3
|
||||
// Context:
|
||||
// Context:
|
||||
// Parameters:
|
||||
// a - input matrix
|
||||
// b - pseudoinversed a
|
||||
// n - number of rows in a
|
||||
// method - if 0, then b = inv(transpose(a)*a) * transpose(a)
|
||||
// if 1, then SVD used.
|
||||
// Returns:
|
||||
// Returns:
|
||||
// Notes: Both matrix are stored by n-dimensional vectors.
|
||||
// Now only method == 0 supported.
|
||||
//F*/
|
||||
|
@@ -125,7 +125,7 @@ cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, CvMat* projPoints1, CvMa
|
||||
/* Solve system for current point */
|
||||
{
|
||||
cvSVD(&matrA,&matrW,0,&matrV,CV_SVD_V_T);
|
||||
|
||||
|
||||
/* Copy computed point */
|
||||
cvmSet(points4D,0,i,cvmGet(&matrV,3,0));/* X */
|
||||
cvmSet(points4D,1,i,cvmGet(&matrV,3,1));/* Y */
|
||||
@@ -133,7 +133,7 @@ cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, CvMat* projPoints1, CvMa
|
||||
cvmSet(points4D,3,i,cvmGet(&matrV,3,3));/* W */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
double err = 0;
|
||||
/* Points was reconstructed. Try to reproject points */
|
||||
@@ -143,34 +143,34 @@ cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, CvMat* projPoints1, CvMa
|
||||
CvMat point3D;
|
||||
double point3D_dat[4];
|
||||
point3D = cvMat(4,1,CV_64F,point3D_dat);
|
||||
|
||||
|
||||
CvMat point2D;
|
||||
double point2D_dat[3];
|
||||
point2D = cvMat(3,1,CV_64F,point2D_dat);
|
||||
|
||||
|
||||
for( i = 0; i < numPoints; i++ )
|
||||
{
|
||||
double W = cvmGet(points4D,3,i);
|
||||
|
||||
|
||||
point3D_dat[0] = cvmGet(points4D,0,i)/W;
|
||||
point3D_dat[1] = cvmGet(points4D,1,i)/W;
|
||||
point3D_dat[2] = cvmGet(points4D,2,i)/W;
|
||||
point3D_dat[3] = 1;
|
||||
|
||||
|
||||
/* !!! Project this point for each camera */
|
||||
for( int currCamera = 0; currCamera < 2; currCamera++ )
|
||||
{
|
||||
cvMatMul(projMatrs[currCamera], &point3D, &point2D);
|
||||
|
||||
|
||||
float x,y;
|
||||
float xr,yr,wr;
|
||||
x = (float)cvmGet(projPoints[currCamera],0,i);
|
||||
y = (float)cvmGet(projPoints[currCamera],1,i);
|
||||
|
||||
|
||||
wr = (float)point2D_dat[2];
|
||||
xr = (float)(point2D_dat[0]/wr);
|
||||
yr = (float)(point2D_dat[1]/wr);
|
||||
|
||||
|
||||
float deltaX,deltaY;
|
||||
deltaX = (float)fabs(x-xr);
|
||||
deltaY = (float)fabs(y-yr);
|
||||
@@ -210,7 +210,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cv::Ptr<CvMat> result;
|
||||
cv::Ptr<CvMat> points1, points2;
|
||||
cv::Ptr<CvMat> F;
|
||||
|
||||
|
||||
if (!CV_IS_MAT(F_) || !CV_IS_MAT(points1_) || !CV_IS_MAT(points2_) )
|
||||
CV_Error( CV_StsUnsupportedFormat, "Input parameters must be matrices" );
|
||||
if (!( F_->cols == 3 && F_->rows == 3))
|
||||
@@ -237,19 +237,19 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
if (CV_MAT_CN(new_points2->type) != 2)
|
||||
CV_Error( CV_StsUnsupportedFormat, "The second output matrix must have two channels; one for x and one for y" );
|
||||
}
|
||||
|
||||
|
||||
// Make sure F uses double precision
|
||||
F = cvCreateMat(3,3,CV_64FC1);
|
||||
cvConvert(F_, F);
|
||||
|
||||
|
||||
// Make sure points1 uses double precision
|
||||
points1 = cvCreateMat(points1_->rows,points1_->cols,CV_64FC2);
|
||||
cvConvert(points1_, points1);
|
||||
|
||||
|
||||
// Make sure points2 uses double precision
|
||||
points2 = cvCreateMat(points2_->rows,points2_->cols,CV_64FC2);
|
||||
cvConvert(points2_, points2);
|
||||
|
||||
|
||||
tmp33 = cvCreateMat(3,3,CV_64FC1);
|
||||
tmp31 = cvCreateMat(3,1,CV_64FC1), tmp31_2 = cvCreateMat(3,1,CV_64FC1);
|
||||
T1i = cvCreateMat(3,3,CV_64FC1), T2i = cvCreateMat(3,3,CV_64FC1);
|
||||
@@ -259,7 +259,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
S = cvCreateMat(3,3,CV_64FC1);
|
||||
V = cvCreateMat(3,3,CV_64FC1);
|
||||
e1 = cvCreateMat(3,1,CV_64FC1), e2 = cvCreateMat(3,1,CV_64FC1);
|
||||
|
||||
|
||||
double x1, y1, x2, y2;
|
||||
double scale;
|
||||
double f1, f2, a, b, c, d;
|
||||
@@ -272,7 +272,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
y1 = points1->data.db[p*2+1];
|
||||
x2 = points2->data.db[p*2];
|
||||
y2 = points2->data.db[p*2+1];
|
||||
|
||||
|
||||
cvSetZero(T1i);
|
||||
cvSetReal2D(T1i,0,0,1);
|
||||
cvSetReal2D(T1i,1,1,1);
|
||||
@@ -288,7 +288,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvGEMM(T2i,F,1,0,0,tmp33,CV_GEMM_A_T);
|
||||
cvSetZero(TFT);
|
||||
cvGEMM(tmp33,T1i,1,0,0,TFT);
|
||||
|
||||
|
||||
// Compute the right epipole e1 from F * e1 = 0
|
||||
cvSetZero(U);
|
||||
cvSetZero(S);
|
||||
@@ -303,7 +303,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvSetReal2D(e1,1,0,-cvGetReal2D(e1,1,0));
|
||||
cvSetReal2D(e1,2,0,-cvGetReal2D(e1,2,0));
|
||||
}
|
||||
|
||||
|
||||
// Compute the left epipole e2 from e2' * F = 0 => F' * e2 = 0
|
||||
cvSetZero(TFTt);
|
||||
cvTranspose(TFT, TFTt);
|
||||
@@ -321,7 +321,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvSetReal2D(e2,1,0,-cvGetReal2D(e2,1,0));
|
||||
cvSetReal2D(e2,2,0,-cvGetReal2D(e2,2,0));
|
||||
}
|
||||
|
||||
|
||||
// Replace F by R2 * F * R1'
|
||||
cvSetZero(R1);
|
||||
cvSetReal2D(R1,0,0,cvGetReal2D(e1,0,0));
|
||||
@@ -337,7 +337,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvSetReal2D(R2,2,2,1);
|
||||
cvGEMM(R2,TFT,1,0,0,tmp33);
|
||||
cvGEMM(tmp33,R1,1,0,0,RTFTR,CV_GEMM_B_T);
|
||||
|
||||
|
||||
// Set f1 = e1(3), f2 = e2(3), a = F22, b = F23, c = F32, d = F33
|
||||
f1 = cvGetReal2D(e1,2,0);
|
||||
f2 = cvGetReal2D(e2,2,0);
|
||||
@@ -345,7 +345,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
b = cvGetReal2D(RTFTR,1,2);
|
||||
c = cvGetReal2D(RTFTR,2,1);
|
||||
d = cvGetReal2D(RTFTR,2,2);
|
||||
|
||||
|
||||
// Form the polynomial g(t) = k6*t⁶ + k5*t⁵ + k4*t⁴ + k3*t³ + k2*t² + k1*t + k0
|
||||
// from f1, f2, a, b, c and d
|
||||
cvSetReal2D(polynomial,0,6,( +b*c*c*f1*f1*f1*f1*a-a*a*d*f1*f1*f1*f1*c ));
|
||||
@@ -355,11 +355,11 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvSetReal2D(polynomial,0,2,( +4*a*b*b*b+4*b*b*f2*f2*c*d+4*f2*f2*f2*f2*c*d*d*d-a*a*d*c+b*c*c*a+4*a*b*f2*f2*d*d-2*a*d*d*f1*f1*b+2*b*b*c*f1*f1*d ));
|
||||
cvSetReal2D(polynomial,0,1,( +f2*f2*f2*f2*d*d*d*d+b*b*b*b+2*b*b*f2*f2*d*d-a*a*d*d+b*b*c*c ));
|
||||
cvSetReal2D(polynomial,0,0,( -a*d*d*b+b*b*c*d ));
|
||||
|
||||
|
||||
// Solve g(t) for t to get 6 roots
|
||||
cvSetZero(result);
|
||||
cvSolvePoly(polynomial, result, 100, 20);
|
||||
|
||||
|
||||
// Evaluate the cost function s(t) at the real part of the 6 roots
|
||||
t_min = DBL_MAX;
|
||||
s_val = 1./(f1*f1) + (c*c)/(a*a+f2*f2*c*c);
|
||||
@@ -371,7 +371,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
t_min = t;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// find the optimal x1 and y1 as the points on l1 and l2 closest to the origin
|
||||
tmp31->data.db[0] = t_min*t_min*f1;
|
||||
tmp31->data.db[1] = t_min;
|
||||
@@ -383,7 +383,7 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvGEMM(tmp33,tmp31,1,0,0,tmp31_2);
|
||||
x1 = tmp31_2->data.db[0];
|
||||
y1 = tmp31_2->data.db[1];
|
||||
|
||||
|
||||
tmp31->data.db[0] = f2*pow(c*t_min+d,2);
|
||||
tmp31->data.db[1] = -(a*t_min+b)*(c*t_min+d);
|
||||
tmp31->data.db[2] = f2*f2*pow(c*t_min+d,2) + pow(a*t_min+b,2);
|
||||
@@ -394,14 +394,14 @@ cvCorrectMatches(CvMat *F_, CvMat *points1_, CvMat *points2_, CvMat *new_points1
|
||||
cvGEMM(tmp33,tmp31,1,0,0,tmp31_2);
|
||||
x2 = tmp31_2->data.db[0];
|
||||
y2 = tmp31_2->data.db[1];
|
||||
|
||||
|
||||
// Return the points in the matrix format that the user wants
|
||||
points1->data.db[p*2] = x1;
|
||||
points1->data.db[p*2+1] = y1;
|
||||
points2->data.db[p*2] = x2;
|
||||
points2->data.db[p*2+1] = y2;
|
||||
}
|
||||
|
||||
|
||||
if( new_points1 )
|
||||
cvConvert( points1, new_points1 );
|
||||
if( new_points2 )
|
||||
|
Reference in New Issue
Block a user