a big patch; use special proxy types (Input/OutputArray, Input/OutputArrayOfArrays) for passing in vectors, matrices etc.

This commit is contained in:
Vadim Pisarevsky
2011-04-17 13:14:45 +00:00
parent 335370a7c0
commit abeeb40d46
94 changed files with 10831 additions and 9631 deletions

View File

@@ -1894,49 +1894,43 @@ cvDrawChessboardCorners( CvArr* _image, CvSize pattern_size,
}
}
namespace cv
{
bool findChessboardCorners( const Mat& image, Size patternSize,
vector<Point2f>& corners, int flags )
bool cv::findChessboardCorners( const InputArray& _image, Size patternSize,
OutputArray corners, int flags )
{
int count = patternSize.area()*2;
corners.resize(count);
CvMat _image = image;
bool ok = cvFindChessboardCorners(&_image, patternSize,
(CvPoint2D32f*)&corners[0], &count, flags ) > 0;
if(count >= 0)
corners.resize(count);
vector<Point2f> tmpcorners(count+1);
CvMat c_image = _image.getMat();
bool ok = cvFindChessboardCorners(&c_image, patternSize,
(CvPoint2D32f*)&tmpcorners[0], &count, flags ) > 0;
if( count > 0 )
{
tmpcorners.resize(count);
Mat(tmpcorners).copyTo(corners);
}
else
corners.release();
return ok;
}
void drawChessboardCorners( Mat& image, Size patternSize,
const Mat& corners,
void cv::drawChessboardCorners( InputOutputArray _image, Size patternSize,
const InputArray& _corners,
bool patternWasFound )
{
if( corners.cols == 0 || corners.rows == 0 )
Mat corners = _corners.getMat();
if( !corners.empty() )
return;
CvMat _image = image;
CvMat c_image = _image.getMat();
int nelems = corners.checkVector(2, CV_32F, true);
CV_Assert(nelems >= 0);
cvDrawChessboardCorners( &_image, patternSize, (CvPoint2D32f*)corners.data,
cvDrawChessboardCorners( &c_image, patternSize, (CvPoint2D32f*)corners.data,
nelems, patternWasFound );
}
void drawChessboardCorners( Mat& image, Size patternSize,
const vector<Point2f>& corners,
bool patternWasFound )
{
if( corners.empty() )
return;
CvMat _image = image;
cvDrawChessboardCorners( &_image, patternSize, (CvPoint2D32f*)&corners[0],
(int)corners.size(), patternWasFound );
}
bool findCirclesGrid( const Mat& image, Size patternSize,
vector<Point2f>& centers, int flags )
bool cv::findCirclesGrid( const InputArray& _image, Size patternSize,
OutputArray _centers, int flags )
{
Mat image = _image.getMat();
vector<Point2f> centers;
SimpleBlobDetector::Params params;
if(flags & CALIB_CB_WHITE_CIRCLES)
{
@@ -1957,6 +1951,7 @@ bool findCirclesGrid( const Mat& image, Size patternSize,
{
CirclesGridClusterFinder circlesGridClusterFinder;
circlesGridClusterFinder.findGrid(points, patternSize, centers);
Mat(centers).copyTo(_centers);
return !centers.empty();
}
@@ -2005,10 +2000,10 @@ bool findCirclesGrid( const Mat& image, Size patternSize,
if (i != 0)
{
Mat orgPointsMat;
transform(Mat(centers), orgPointsMat, H.inv());
convertPointsHomogeneous(orgPointsMat, centers);
transform(centers, orgPointsMat, H.inv());
convertPointsFromHomogeneous(orgPointsMat, centers);
}
Mat(centers).copyTo(_centers);
return true;
}
@@ -2020,10 +2015,8 @@ bool findCirclesGrid( const Mat& image, Size patternSize,
H = CirclesGridFinder::rectifyGrid(boxFinder.getDetectedGridSize(), centers, points, points);
}
}
Mat(centers).copyTo(_centers);
return false;
}
}
/* End of file. */

View File

@@ -2753,10 +2753,11 @@ CV_IMPL int cvStereoRectifyUncalibrated(
}
void cv::reprojectImageTo3D( const Mat& disparity,
Mat& _3dImage, const Mat& Q,
void cv::reprojectImageTo3D( const InputArray& _disparity,
OutputArray __3dImage, const InputArray& _Qmat,
bool handleMissingValues, int dtype )
{
Mat disparity = _disparity.getMat(), Q = _Qmat.getMat();
int stype = disparity.type();
CV_Assert( stype == CV_8UC1 || stype == CV_16SC1 ||
@@ -2771,7 +2772,8 @@ void cv::reprojectImageTo3D( const Mat& disparity,
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
}
_3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
__3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
Mat _3dImage = __3dImage.getMat();
const double bigZ = 10000.;
double q[4][4];
@@ -3092,45 +3094,56 @@ cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
namespace cv
{
static void collectCalibrationData( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<vector<Point2f> >& imagePoints2,
Mat& objPtMat, Mat& imgPtMat, Mat* imgPtMat2,
static void collectCalibrationData( const InputArrayOfArrays& objectPoints,
const InputArrayOfArrays& imagePoints1,
const InputArrayOfArrays& imagePoints2,
Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
Mat& npoints )
{
size_t i, j = 0, ni = 0, nimages = objectPoints.size(), total = 0;
CV_Assert(nimages > 0 && nimages == imagePoints.size() &&
(!imgPtMat2 || nimages == imagePoints2.size()));
int nimages = (int)objectPoints.total();
int i, j = 0, ni = 0, total = 0;
CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
(!imgPtMat2 || nimages == (int)imagePoints2.total()));
for( i = 0; i < nimages; i++ )
{
ni = objectPoints[i].size();
CV_Assert(ni == imagePoints[i].size() && (!imgPtMat2 || ni == imagePoints2[i].size()));
ni = objectPoints.getMat(i).checkVector(3, CV_32F);
CV_Assert( ni >= 0 );
total += ni;
}
npoints.create(1, (int)nimages, CV_32S);
objPtMat.create(1, (int)total, DataType<Point3f>::type);
imgPtMat.create(1, (int)total, DataType<Point2f>::type);
objPtMat.create(1, (int)total, CV_32FC3);
imgPtMat1.create(1, (int)total, CV_32FC2);
Point2f* imgPtData2 = 0;
if( imgPtMat2 )
{
imgPtMat2->create(1, (int)total, DataType<Point2f>::type);
imgPtMat2->create(1, (int)total, CV_32FC2);
imgPtData2 = imgPtMat2->ptr<Point2f>();
}
Point3f* objPtData = objPtMat.ptr<Point3f>();
Point2f* imgPtData = imgPtMat.ptr<Point2f>();
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
for( i = 0; i < nimages; i++, j += ni )
{
ni = objectPoints[i].size();
((int*)npoints.data)[i] = (int)ni;
std::copy(objectPoints[i].begin(), objectPoints[i].end(), objPtData + j);
std::copy(imagePoints[i].begin(), imagePoints[i].end(), imgPtData + j);
if( imgPtMat2 )
std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
Mat objpt = objectPoints.getMat(i);
Mat imgpt1 = imagePoints1.getMat(i);
ni = objpt.checkVector(3, CV_32F);
int ni1 = imgpt1.checkVector(2, CV_32F);
CV_Assert( ni > 0 && ni == ni1 );
npoints.at<int>(i) = ni;
memcpy( objPtData + j, objpt.data, ni*sizeof(objPtData[0]) );
memcpy( imgPtData1 + j, imgpt1.data, ni*sizeof(imgPtData1[0]) );
if( imgPtData2 )
{
Mat imgpt2 = imagePoints2.getMat(i);
int ni2 = imgpt2.checkVector(2, CV_32F);
CV_Assert( ni == ni2 );
memcpy( imgPtData2 + j, imgpt2.data, ni*sizeof(imgPtData2[0]) );
}
}
}
@@ -3162,126 +3175,153 @@ static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
}
void cv::Rodrigues(const Mat& src, Mat& dst)
void cv::Rodrigues(const InputArray& _src, OutputArray _dst, OutputArray _jacobian)
{
Mat src = _src.getMat();
bool v2m = src.cols == 1 || src.rows == 1;
dst.create(3, v2m ? 3 : 1, src.depth());
CvMat _src = src, _dst = dst;
bool ok = cvRodrigues2(&_src, &_dst, 0) > 0;
_dst.create(3, v2m ? 3 : 1, src.depth());
Mat dst = _dst.getMat();
CvMat _csrc = src, _cdst = dst, _cjacobian;
if( _jacobian.needed() )
{
_jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth());
_cjacobian = _jacobian.getMat();
}
bool ok = cvRodrigues2(&_csrc, &_cdst, _jacobian.needed() ? &_cjacobian : 0) > 0;
if( !ok )
dst = Scalar(0);
}
void cv::Rodrigues(const Mat& src, Mat& dst, Mat& jacobian)
void cv::matMulDeriv( const InputArray& _Amat, const InputArray& _Bmat,
OutputArray _dABdA, OutputArray _dABdB )
{
bool v2m = src.cols == 1 || src.rows == 1;
dst.create(3, v2m ? 3 : 1, src.depth());
jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth());
CvMat _src = src, _dst = dst, _jacobian = jacobian;
bool ok = cvRodrigues2(&_src, &_dst, &_jacobian) > 0;
if( !ok )
dst = Scalar(0);
}
void cv::matMulDeriv( const Mat& A, const Mat& B, Mat& dABdA, Mat& dABdB )
{
dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());
dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());
CvMat matA = A, matB = B, _dABdA = dABdA, _dABdB = dABdB;
cvCalcMatMulDeriv(&matA, &matB, &_dABdA, &_dABdB);
}
void cv::composeRT( const Mat& rvec1, const Mat& tvec1,
const Mat& rvec2, const Mat& tvec2,
Mat& rvec3, Mat& tvec3 )
{
rvec3.create(rvec1.size(), rvec1.type());
tvec3.create(tvec1.size(), tvec1.type());
CvMat _rvec1 = rvec1, _tvec1 = tvec1, _rvec2 = rvec2,
_tvec2 = tvec2, _rvec3 = rvec3, _tvec3 = tvec3;
cvComposeRT(&_rvec1, &_tvec1, &_rvec2, &_tvec2, &_rvec3, &_tvec3, 0, 0, 0, 0, 0, 0, 0, 0);
Mat A = _Amat.getMat(), B = _Bmat.getMat();
_dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());
_dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());
CvMat matA = A, matB = B, c_dABdA = _dABdA.getMat(), c_dABdB = _dABdB.getMat();
cvCalcMatMulDeriv(&matA, &matB, &c_dABdA, &c_dABdB);
}
void cv::composeRT( const Mat& rvec1, const Mat& tvec1,
const Mat& rvec2, const Mat& tvec2,
Mat& rvec3, Mat& tvec3,
Mat& dr3dr1, Mat& dr3dt1,
Mat& dr3dr2, Mat& dr3dt2,
Mat& dt3dr1, Mat& dt3dt1,
Mat& dt3dr2, Mat& dt3dt2 )
void cv::composeRT( const InputArray& _rvec1, const InputArray& _tvec1,
const InputArray& _rvec2, const InputArray& _tvec2,
OutputArray _rvec3, OutputArray _tvec3,
OutputArray _dr3dr1, OutputArray _dr3dt1,
OutputArray _dr3dr2, OutputArray _dr3dt2,
OutputArray _dt3dr1, OutputArray _dt3dt1,
OutputArray _dt3dr2, OutputArray _dt3dt2 )
{
Mat rvec1 = _rvec1.getMat(), tvec1 = _tvec1.getMat();
Mat rvec2 = _rvec2.getMat(), tvec2 = _tvec2.getMat();
int rtype = rvec1.type();
rvec3.create(rvec1.size(), rtype);
tvec3.create(tvec1.size(), rtype);
dr3dr1.create(3, 3, rtype); dr3dt1.create(3, 3, rtype);
dr3dr2.create(3, 3, rtype); dr3dt2.create(3, 3, rtype);
dt3dr1.create(3, 3, rtype); dt3dt1.create(3, 3, rtype);
dt3dr2.create(3, 3, rtype); dt3dt2.create(3, 3, rtype);
_rvec3.create(rvec1.size(), rtype);
_tvec3.create(tvec1.size(), rtype);
Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat();
CvMat _rvec1 = rvec1, _tvec1 = tvec1, _rvec2 = rvec2,
_tvec2 = tvec2, _rvec3 = rvec3, _tvec3 = tvec3;
CvMat _dr3dr1 = dr3dr1, _dr3dt1 = dr3dt1, _dr3dr2 = dr3dr2, _dr3dt2 = dr3dt2;
CvMat _dt3dr1 = dt3dr1, _dt3dt1 = dt3dt1, _dt3dr2 = dt3dr2, _dt3dt2 = dt3dt2;
cvComposeRT(&_rvec1, &_tvec1, &_rvec2, &_tvec2, &_rvec3, &_tvec3,
&_dr3dr1, &_dr3dt1, &_dr3dr2, &_dr3dt2,
&_dt3dr1, &_dt3dt1, &_dt3dr2, &_dt3dt2);
CvMat c_rvec1 = rvec1, c_tvec1 = tvec1, c_rvec2 = rvec2,
c_tvec2 = tvec2, c_rvec3 = rvec3, c_tvec3 = tvec3;
CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2;
CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0;
if( _dr3dr1.needed() )
{
_dr3dr1.create(3, 3, rtype);
p_dr3dr1 = &(c_dr3dr1 = _dr3dr1.getMat());
}
if( _dr3dt1.needed() )
{
_dr3dt1.create(3, 3, rtype);
p_dr3dt1 = &(c_dr3dt1 = _dr3dt1.getMat());
}
if( _dr3dr2.needed() )
{
_dr3dr2.create(3, 3, rtype);
p_dr3dr2 = &(c_dr3dr2 = _dr3dr2.getMat());
}
if( _dr3dt2.needed() )
{
_dr3dt2.create(3, 3, rtype);
p_dr3dt2 = &(c_dr3dt2 = _dr3dt2.getMat());
}
if( _dt3dr1.needed() )
{
_dt3dr1.create(3, 3, rtype);
p_dt3dr1 = &(c_dt3dr1 = _dt3dr1.getMat());
}
if( _dt3dt1.needed() )
{
_dt3dt1.create(3, 3, rtype);
p_dt3dt1 = &(c_dt3dt1 = _dt3dt1.getMat());
}
if( _dt3dr2.needed() )
{
_dt3dr2.create(3, 3, rtype);
p_dt3dr2 = &(c_dt3dr2 = _dt3dr2.getMat());
}
if( _dt3dt2.needed() )
{
_dt3dt2.create(3, 3, rtype);
p_dt3dt2 = &(c_dt3dt2 = _dt3dt2.getMat());
}
cvComposeRT(&c_rvec1, &c_tvec1, &c_rvec2, &c_tvec2, &c_rvec3, &c_tvec3,
p_dr3dr1, p_dr3dt1, p_dr3dr2, p_dr3dt2,
p_dt3dr1, p_dt3dt1, p_dt3dr2, p_dt3dt2);
}
void cv::projectPoints( const Mat& opoints,
const Mat& rvec, const Mat& tvec,
const Mat& cameraMatrix,
const Mat& distCoeffs,
vector<Point2f>& ipoints )
{
CV_Assert(opoints.isContinuous() && opoints.depth() == CV_32F &&
((opoints.rows == 1 && opoints.channels() == 3) ||
opoints.cols*opoints.channels() == 3));
ipoints.resize(opoints.cols*opoints.rows*opoints.channels()/3);
CvMat _objectPoints = opoints, _imagePoints = Mat(ipoints);
CvMat _rvec = rvec, _tvec = tvec, _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix,
distCoeffs.data ? &_distCoeffs : 0,
&_imagePoints, 0, 0, 0, 0, 0, 0 );
}
void cv::projectPoints( const Mat& opoints,
const Mat& rvec, const Mat& tvec,
const Mat& cameraMatrix,
const Mat& distCoeffs,
vector<Point2f>& ipoints,
Mat& dpdrot, Mat& dpdt, Mat& dpdf,
Mat& dpdc, Mat& dpddist,
void cv::projectPoints( const InputArray& _opoints,
const InputArray& _rvec,
const InputArray& _tvec,
const InputArray& _cameraMatrix,
const InputArray& _distCoeffs,
OutputArray _ipoints,
OutputArray _jacobian,
double aspectRatio )
{
CV_Assert(opoints.isContinuous() && opoints.depth() == CV_32F &&
((opoints.rows == 1 && opoints.channels() == 3) ||
opoints.cols*opoints.channels() == 3));
Mat opoints = _opoints.getMat();
int npoints = opoints.checkVector(3), depth = opoints.depth();
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
int npoints = opoints.cols*opoints.rows*opoints.channels()/3;
ipoints.resize(npoints);
dpdrot.create(npoints*2, 3, CV_64F);
dpdt.create(npoints*2, 3, CV_64F);
dpdf.create(npoints*2, 2, CV_64F);
dpdc.create(npoints*2, 2, CV_64F);
dpddist.create(npoints*2, distCoeffs.rows + distCoeffs.cols - 1, CV_64F);
CvMat _objectPoints = opoints, _imagePoints = Mat(ipoints);
CvMat _rvec = rvec, _tvec = tvec, _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
CvMat _dpdrot = dpdrot, _dpdt = dpdt, _dpdf = dpdf, _dpdc = dpdc, _dpddist = dpddist;
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix, &_distCoeffs,
&_imagePoints, &_dpdrot, &_dpdt, &_dpdf, &_dpdc, &_dpddist, aspectRatio );
_ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
CvMat imagePoints = _ipoints.getMat();
CvMat objectPoints = opoints;
CvMat cameraMatrix = _cameraMatrix.getMat();
CvMat rvec = _rvec.getMat(), tvec = _tvec.getMat();
CvMat distCoeffs = _distCoeffs.getMat();
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
if( _jacobian.needed() )
{
_jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F);
Mat jacobian = _jacobian.getMat();
pdpdrot = &(dpdrot = jacobian.colRange(0, 3));
pdpdt = &(dpdt = jacobian.colRange(3, 6));
pdpdf = &(dpdf = jacobian.colRange(6, 8));
pdpdc = &(dpdc = jacobian.colRange(8, 10));
pdpddist = &(dpddist = jacobian.colRange(10, 10+ndistCoeffs));
}
cvProjectPoints2( &objectPoints, &rvec, &tvec, &cameraMatrix, &distCoeffs,
&imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio );
}
cv::Mat cv::initCameraMatrix2D( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
cv::Mat cv::initCameraMatrix2D( const InputArrayOfArrays& objectPoints,
const InputArrayOfArrays& imagePoints,
Size imageSize, double aspectRatio )
{
Mat objPt, imgPt, npoints, cameraMatrix(3, 3, CV_64F);
collectCalibrationData( objectPoints, imagePoints, vector<vector<Point2f> >(),
collectCalibrationData( objectPoints, imagePoints, InputArrayOfArrays(),
objPt, imgPt, 0, npoints );
CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints, _cameraMatrix = cameraMatrix;
cvInitIntrinsicParams2D( &_objPt, &_imgPt, &_npoints,
@@ -3290,60 +3330,73 @@ cv::Mat cv::initCameraMatrix2D( const vector<vector<Point3f> >& objectPoints,
}
double cv::calibrateCamera( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<Mat>& rvecs, vector<Mat>& tvecs, int flags )
double cv::calibrateCamera( const InputArrayOfArrays& _objectPoints,
const InputArrayOfArrays& _imagePoints,
Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArray _rvecs, OutputArray _tvecs, int flags )
{
int rtype = CV_64F;
Mat cameraMatrix = _cameraMatrix.getMat();
cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
Mat distCoeffs = _distCoeffs.getMat();
distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
if( !(flags & CALIB_RATIONAL_MODEL) )
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
size_t i, nimages = objectPoints.size();
size_t i, nimages = _objectPoints.total();
CV_Assert( nimages > 0 );
Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
collectCalibrationData( objectPoints, imagePoints, vector<vector<Point2f> >(),
collectCalibrationData( _objectPoints, _imagePoints, InputArrayOfArrays(),
objPt, imgPt, 0, npoints );
CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints;
CvMat _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
CvMat _rvecM = rvecM, _tvecM = tvecM;
CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvecM = rvecM, c_tvecM = tvecM;
double reprojErr = cvCalibrateCamera2(&_objPt, &_imgPt, &_npoints, imageSize,
&_cameraMatrix, &_distCoeffs, &_rvecM,
&_tvecM, flags );
rvecs.resize(nimages);
tvecs.resize(nimages);
double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
&c_cameraMatrix, &c_distCoeffs, &c_rvecM,
&c_tvecM, flags );
_rvecs.create(nimages, 1, CV_64FC3);
_tvecs.create(nimages, 1, CV_64FC3);
for( i = 0; i < nimages; i++ )
{
rvecM.row((int)i).copyTo(rvecs[i]);
tvecM.row((int)i).copyTo(tvecs[i]);
_rvecs.create(3, 1, CV_64F, i, true);
_tvecs.create(3, 1, CV_64F, i, true);
Mat rv = _rvecs.getMat(i), tv = _tvecs.getMat(i);
memcpy(rv.data, rvecM.ptr<double>(i), 3*sizeof(double));
memcpy(tv.data, tvecM.ptr<double>(i), 3*sizeof(double));
}
cameraMatrix.copyTo(_cameraMatrix);
distCoeffs.copyTo(_distCoeffs);
return reprojErr;
}
void cv::calibrationMatrixValues( const Mat& cameraMatrix, Size imageSize,
void cv::calibrationMatrixValues( const InputArray& _cameraMatrix, Size imageSize,
double apertureWidth, double apertureHeight,
double& fovx, double& fovy, double& focalLength,
Point2d& principalPoint, double& aspectRatio )
{
CvMat _cameraMatrix = cameraMatrix;
cvCalibrationMatrixValues( &_cameraMatrix, imageSize, apertureWidth, apertureHeight,
CvMat c_cameraMatrix = _cameraMatrix.getMat();
cvCalibrationMatrixValues( &c_cameraMatrix, imageSize, apertureWidth, apertureHeight,
&fovx, &fovy, &focalLength, (CvPoint2D64f*)&principalPoint, &aspectRatio );
}
double cv::stereoCalibrate( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints1,
const vector<vector<Point2f> >& imagePoints2,
Mat& cameraMatrix1, Mat& distCoeffs1,
Mat& cameraMatrix2, Mat& distCoeffs2,
Size imageSize, Mat& R, Mat& T,
Mat& E, Mat& F, TermCriteria criteria,
double cv::stereoCalibrate( const InputArrayOfArrays& _objectPoints,
const InputArrayOfArrays& _imagePoints1,
const InputArrayOfArrays& _imagePoints2,
InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
Size imageSize, OutputArray _Rmat, OutputArray _Tmat,
OutputArray _Emat, OutputArray _Fmat, TermCriteria criteria,
int flags )
{
int rtype = CV_64F;
Mat cameraMatrix1 = _cameraMatrix1.getMat();
Mat cameraMatrix2 = _cameraMatrix2.getMat();
Mat distCoeffs1 = _distCoeffs1.getMat();
Mat distCoeffs2 = _distCoeffs2.getMat();
cameraMatrix1 = prepareCameraMatrix(cameraMatrix1, rtype);
cameraMatrix2 = prepareCameraMatrix(cameraMatrix2, rtype);
distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);
@@ -3355,176 +3408,210 @@ double cv::stereoCalibrate( const vector<vector<Point3f> >& objectPoints,
distCoeffs2 = distCoeffs2.rows == 1 ? distCoeffs2.colRange(0, 5) : distCoeffs2.rowRange(0, 5);
}
R.create(3, 3, rtype);
T.create(3, 1, rtype);
E.create(3, 3, rtype);
F.create(3, 3, rtype);
_Rmat.create(3, 3, rtype);
_Tmat.create(3, 1, rtype);
Mat objPt, imgPt, imgPt2, npoints;
collectCalibrationData( objectPoints, imagePoints1, imagePoints2,
collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2,
objPt, imgPt, &imgPt2, npoints );
CvMat _objPt = objPt, _imgPt = imgPt, _imgPt2 = imgPt2, _npoints = npoints;
CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
CvMat matR = R, matT = T, matE = E, matF = F;
CvMat c_objPt = objPt, c_imgPt = imgPt, c_imgPt2 = imgPt2, c_npoints = npoints;
CvMat c_cameraMatrix1 = cameraMatrix1, c_distCoeffs1 = distCoeffs1;
CvMat c_cameraMatrix2 = cameraMatrix2, c_distCoeffs2 = distCoeffs2;
CvMat c_matR = _Rmat.getMat(), c_matT = _Tmat.getMat(), c_matE, c_matF, *p_matE = 0, *p_matF = 0;
if( _Emat.needed() )
{
_Emat.create(3, 3, rtype);
p_matE = &(c_matE = _Emat.getMat());
}
if( _Fmat.needed() )
{
_Fmat.create(3, 3, rtype);
p_matF = &(c_matF = _Fmat.getMat());
}
return cvStereoCalibrate(&_objPt, &_imgPt, &_imgPt2, &_npoints, &_cameraMatrix1,
&_distCoeffs1, &_cameraMatrix2, &_distCoeffs2, imageSize,
&matR, &matT, &matE, &matF, criteria, flags );
double err = cvStereoCalibrate(&c_objPt, &c_imgPt, &c_imgPt2, &c_npoints, &c_cameraMatrix1,
&c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, imageSize,
&c_matR, &c_matT, p_matE, p_matF, criteria, flags );
cameraMatrix1.copyTo(_cameraMatrix1);
cameraMatrix2.copyTo(_cameraMatrix2);
distCoeffs1.copyTo(_distCoeffs1);
distCoeffs2.copyTo(_distCoeffs2);
return err;
}
void cv::stereoRectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix2, const Mat& distCoeffs2,
Size imageSize, const Mat& R, const Mat& T,
Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
int flags )
{
int rtype = CV_64F;
R1.create(3, 3, rtype);
R2.create(3, 3, rtype);
P1.create(3, 4, rtype);
P2.create(3, 4, rtype);
Q.create(4, 4, rtype);
CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
CvMat matR = R, matT = T, _R1 = R1, _R2 = R2, _P1 = P1, _P2 = P2, matQ = Q;
cvStereoRectify( &_cameraMatrix1, &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2,
imageSize, &matR, &matT, &_R1, &_R2, &_P1, &_P2, &matQ, flags );
}
void cv::stereoRectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix2, const Mat& distCoeffs2,
Size imageSize, const Mat& R, const Mat& T,
Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
void cv::stereoRectify( const InputArray& _cameraMatrix1, const InputArray& _distCoeffs1,
const InputArray& _cameraMatrix2, const InputArray& _distCoeffs2,
Size imageSize, const InputArray& _Rmat, const InputArray& _Tmat,
OutputArray _Rmat1, OutputArray _Rmat2,
OutputArray _Pmat1, OutputArray _Pmat2,
OutputArray _Qmat, int flags,
double alpha, Size newImageSize,
Rect* validPixROI1, Rect* validPixROI2,
int flags )
Rect* validPixROI1, Rect* validPixROI2 )
{
CvMat c_cameraMatrix1 = _cameraMatrix1.getMat();
CvMat c_cameraMatrix2 = _cameraMatrix2.getMat();
CvMat c_distCoeffs1 = _distCoeffs1.getMat();
CvMat c_distCoeffs2 = _distCoeffs2.getMat();
CvMat c_R = _Rmat.getMat(), c_T = _Tmat.getMat();
int rtype = CV_64F;
R1.create(3, 3, rtype);
R2.create(3, 3, rtype);
P1.create(3, 4, rtype);
P2.create(3, 4, rtype);
Q.create(4, 4, rtype);
CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
CvMat matR = R, matT = T, _R1 = R1, _R2 = R2, _P1 = P1, _P2 = P2, matQ = Q;
cvStereoRectify( &_cameraMatrix1, &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2,
imageSize, &matR, &matT, &_R1, &_R2, &_P1, &_P2, &matQ, flags,
alpha, newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
_Rmat1.create(3, 3, rtype);
_Rmat2.create(3, 3, rtype);
_Pmat1.create(3, 4, rtype);
_Pmat2.create(3, 4, rtype);
CvMat c_R1 = _Rmat1.getMat(), c_R2 = _Rmat2.getMat(), c_P1 = _Pmat1.getMat(), c_P2 = _Pmat2.getMat();
CvMat c_Q, *p_Q = 0;
if( _Qmat.needed() )
{
_Qmat.create(4, 4, rtype);
p_Q = &(c_Q = _Qmat.getMat());
}
cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, &c_distCoeffs1, &c_distCoeffs2,
imageSize, &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha,
newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
}
bool cv::stereoRectifyUncalibrated( const Mat& points1, const Mat& points2,
const Mat& F, Size imgSize,
Mat& H1, Mat& H2, double threshold )
bool cv::stereoRectifyUncalibrated( const InputArray& _points1, const InputArray& _points2,
const InputArray& _Fmat, Size imgSize,
OutputArray _Hmat1, OutputArray _Hmat2, double threshold )
{
int rtype = CV_64F;
H1.create(3, 3, rtype);
H2.create(3, 3, rtype);
CvMat _pt1 = points1, _pt2 = points2, matF, *pF=0, _H1 = H1, _H2 = H2;
_Hmat1.create(3, 3, rtype);
_Hmat2.create(3, 3, rtype);
Mat F = _Fmat.getMat();
CvMat c_pt1 = _points1.getMat(), c_pt2 = _points2.getMat();
CvMat c_F, *p_F=0, c_H1 = _Hmat1.getMat(), c_H2 = _Hmat2.getMat();
if( F.size() == Size(3, 3) )
pF = &(matF = F);
return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, imgSize, &_H1, &_H2, threshold) > 0;
p_F = &(c_F = F);
return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, imgSize, &c_H1, &c_H2, threshold) > 0;
}
cv::Mat cv::getOptimalNewCameraMatrix( const Mat& cameraMatrix, const Mat& distCoeffs,
Size imgSize, double alpha, Size newImgSize,
Rect* validPixROI )
cv::Mat cv::getOptimalNewCameraMatrix( const InputArray& _cameraMatrix,
const InputArray& _distCoeffs,
Size imgSize, double alpha, Size newImgSize,
Rect* validPixROI )
{
Mat newCameraMatrix(3, 3, cameraMatrix.type());
CvMat _cameraMatrix = cameraMatrix,
_distCoeffs = distCoeffs,
_newCameraMatrix = newCameraMatrix;
cvGetOptimalNewCameraMatrix(&_cameraMatrix, &_distCoeffs, imgSize,
alpha, &_newCameraMatrix,
CvMat c_cameraMatrix = _cameraMatrix.getMat(), c_distCoeffs = _distCoeffs.getMat();
Mat newCameraMatrix(3, 3, CV_MAT_TYPE(c_cameraMatrix.type));
CvMat c_newCameraMatrix = newCameraMatrix;
cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, imgSize,
alpha, &c_newCameraMatrix,
newImgSize, (CvRect*)validPixROI);
return newCameraMatrix;
}
void cv::RQDecomp3x3( const Mat& M, Mat& R, Mat& Q )
cv::Vec3d cv::RQDecomp3x3( const InputArray& _Mmat,
OutputArray _Rmat,
OutputArray _Qmat,
OutputArray _Qx,
OutputArray _Qy,
OutputArray _Qz )
{
R.create(3, 3, M.type());
Q.create(3, 3, M.type());
CvMat matM = M, matR = R, matQ = Q;
cvRQDecomp3x3(&matM, &matR, &matQ, 0, 0, 0, 0);
}
cv::Vec3d cv::RQDecomp3x3( const Mat& M, Mat& R, Mat& Q,
Mat& Qx, Mat& Qy, Mat& Qz )
{
R.create(3, 3, M.type());
Q.create(3, 3, M.type());
Mat M = _Mmat.getMat();
_Rmat.create(3, 3, M.type());
_Qmat.create(3, 3, M.type());
Vec3d eulerAngles;
CvMat matM = M, matR = R, matQ = Q, _Qx = Qx, _Qy = Qy, _Qz = Qz;
cvRQDecomp3x3(&matM, &matR, &matQ, &_Qx, &_Qy, &_Qz, (CvPoint3D64f*)&eulerAngles[0]);
CvMat matM = M, matR = _Rmat.getMat(), matQ = _Qmat.getMat(), Qx, Qy, Qz, *pQx=0, *pQy=0, *pQz=0;
if( _Qx.needed() )
{
_Qx.create(3, 3, M.type());
pQx = &(Qx = _Qx.getMat());
}
if( _Qy.needed() )
{
_Qy.create(3, 3, M.type());
pQy = &(Qy = _Qy.getMat());
}
if( _Qz.needed() )
{
_Qz.create(3, 3, M.type());
pQz = &(Qz = _Qz.getMat());
}
cvRQDecomp3x3(&matM, &matR, &matQ, pQx, pQy, pQz, (CvPoint3D64f*)&eulerAngles[0]);
return eulerAngles;
}
void cv::decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,
Mat& rotMatrix, Mat& transVect )
void cv::decomposeProjectionMatrix( const InputArray& _projMatrix, OutputArray _cameraMatrix,
OutputArray _rotMatrix, OutputArray _transVect,
OutputArray _rotMatrixX, OutputArray _rotMatrixY,
OutputArray _rotMatrixZ, OutputArray _eulerAngles )
{
Mat projMatrix = _projMatrix.getMat();
int type = projMatrix.type();
cameraMatrix.create(3, 3, type);
rotMatrix.create(3, 3, type);
transVect.create(4, 1, type);
CvMat _projMatrix = projMatrix, _cameraMatrix = cameraMatrix;
CvMat _rotMatrix = rotMatrix, _transVect = transVect;
cvDecomposeProjectionMatrix(&_projMatrix, &_cameraMatrix, &_rotMatrix,
&_transVect, 0, 0, 0, 0);
}
void cv::decomposeProjectionMatrix( const Mat& projMatrix, Mat& cameraMatrix,
Mat& rotMatrix, Mat& transVect,
Mat& rotMatrixX, Mat& rotMatrixY,
Mat& rotMatrixZ, Vec3d& eulerAngles )
{
int type = projMatrix.type();
cameraMatrix.create(3, 3, type);
rotMatrix.create(3, 3, type);
transVect.create(4, 1, type);
rotMatrixX.create(3, 3, type);
rotMatrixY.create(3, 3, type);
rotMatrixZ.create(3, 3, type);
CvMat _projMatrix = projMatrix, _cameraMatrix = cameraMatrix;
CvMat _rotMatrix = rotMatrix, _transVect = transVect;
CvMat _rotMatrixX = rotMatrixX, _rotMatrixY = rotMatrixY;
CvMat _rotMatrixZ = rotMatrixZ;
cvDecomposeProjectionMatrix(&_projMatrix, &_cameraMatrix, &_rotMatrix,
&_transVect, &_rotMatrixX, &_rotMatrixY,
&_rotMatrixZ, (CvPoint3D64f*)&eulerAngles[0]);
_cameraMatrix.create(3, 3, type);
_rotMatrix.create(3, 3, type);
_transVect.create(4, 1, type);
CvMat c_projMatrix = projMatrix, c_cameraMatrix = _cameraMatrix.getMat();
CvMat c_rotMatrix = _rotMatrix.getMat(), c_transVect = _transVect.getMat();
CvMat c_rotMatrixX, *p_rotMatrixX = 0;
CvMat c_rotMatrixY, *p_rotMatrixY = 0;
CvMat c_rotMatrixZ, *p_rotMatrixZ = 0;
CvPoint3D64f *p_eulerAngles = 0;
if( _rotMatrixX.needed() )
{
_rotMatrixX.create(3, 3, type);
p_rotMatrixX = &(c_rotMatrixX = _rotMatrixX.getMat());
}
if( _rotMatrixY.needed() )
{
_rotMatrixY.create(3, 3, type);
p_rotMatrixY = &(c_rotMatrixY = _rotMatrixY.getMat());
}
if( _rotMatrixZ.needed() )
{
_rotMatrixZ.create(3, 3, type);
p_rotMatrixZ = &(c_rotMatrixZ = _rotMatrixZ.getMat());
}
if( _eulerAngles.needed() )
{
_eulerAngles.create(3, 1, CV_64F, -1, true);
p_eulerAngles = (CvPoint3D64f*)_eulerAngles.getMat().data;
}
cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix,
&c_transVect, p_rotMatrixX, p_rotMatrixY,
p_rotMatrixZ, p_eulerAngles);
}
namespace cv
{
static void adjust3rdMatrix(const vector<vector<Point2f> >& imgpt1_0,
const vector<vector<Point2f> >& imgpt3_0,
static void adjust3rdMatrix(const InputArrayOfArrays& _imgpt1_0,
const InputArrayOfArrays& _imgpt3_0,
const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix3, const Mat& distCoeffs3,
const Mat& R1, const Mat& R3, const Mat& P1, Mat& P3 )
{
size_t n1 = _imgpt1_0.total(), n3 = _imgpt3_0.total();
vector<Point2f> imgpt1, imgpt3;
for( int i = 0; i < (int)std::min(imgpt1_0.size(), imgpt3_0.size()); i++ )
for( int i = 0; i < (int)std::min(n1, n3); i++ )
{
if( !imgpt1_0[i].empty() && !imgpt3_0[i].empty() )
{
std::copy(imgpt1_0[i].begin(), imgpt1_0[i].end(), std::back_inserter(imgpt1));
std::copy(imgpt3_0[i].begin(), imgpt3_0[i].end(), std::back_inserter(imgpt3));
}
Mat pt1 = _imgpt1_0.getMat(i), pt3 = _imgpt3_0.getMat(i);
int ni1 = pt1.checkVector(2, CV_32F), ni3 = pt3.checkVector(2, CV_32F);
CV_Assert( ni1 > 0 && ni1 == ni3 );
const Point2f* pt1data = pt1.ptr<Point2f>();
const Point2f* pt3data = pt3.ptr<Point2f>();
std::copy(pt1data, pt1data + ni1, std::back_inserter(imgpt1));
std::copy(pt3data, pt3data + ni3, std::back_inserter(imgpt3));
}
undistortPoints(Mat(imgpt1), imgpt1, cameraMatrix1, distCoeffs1, R1, P1);
undistortPoints(Mat(imgpt3), imgpt3, cameraMatrix3, distCoeffs3, R3, P3);
undistortPoints(imgpt1, imgpt1, cameraMatrix1, distCoeffs1, R1, P1);
undistortPoints(imgpt3, imgpt3, cameraMatrix3, distCoeffs3, R3, P3);
double y1_ = 0, y2_ = 0, y1y1_ = 0, y1y2_ = 0;
size_t n = imgpt1.size();
@@ -3555,20 +3642,31 @@ static void adjust3rdMatrix(const vector<vector<Point2f> >& imgpt1_0,
}
float cv::rectify3Collinear( const Mat& cameraMatrix1, const Mat& distCoeffs1,
const Mat& cameraMatrix2, const Mat& distCoeffs2,
const Mat& cameraMatrix3, const Mat& distCoeffs3,
const vector<vector<Point2f> >& imgpt1,
const vector<vector<Point2f> >& imgpt3,
Size imageSize, const Mat& R12, const Mat& T12, const Mat& R13, const Mat& T13,
Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q,
float cv::rectify3Collinear( const InputArray& _cameraMatrix1, const InputArray& _distCoeffs1,
const InputArray& _cameraMatrix2, const InputArray& _distCoeffs2,
const InputArray& _cameraMatrix3, const InputArray& _distCoeffs3,
const InputArrayOfArrays& _imgpt1,
const InputArrayOfArrays& _imgpt3,
Size imageSize, const InputArray& _Rmat12, const InputArray& _Tmat12,
const InputArray& _Rmat13, const InputArray& _Tmat13,
OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3,
OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3,
OutputArray _Qmat,
double alpha, Size /*newImgSize*/,
Rect* roi1, Rect* roi2, int flags )
{
// first, rectify the 1-2 stereo pair
stereoRectify( cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
imageSize, R12, T12, R1, R2, P1, P2, Q,
alpha, imageSize, roi1, roi2, flags );
stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2,
imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat,
flags, alpha, imageSize, roi1, roi2 );
Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat();
_Rmat3.create(3, 3, CV_64F);
_Pmat3.create(3, 4, CV_64F);
Mat P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat();
Mat R3 = _Rmat3.getMat(), P3 = _Pmat3.getMat();
// recompute rectification transforms for cameras 1 & 2.
Mat om, r_r, r_r13;
@@ -3608,8 +3706,9 @@ float cv::rectify3Collinear( const Mat& cameraMatrix1, const Mat& distCoeffs1,
P3.at<double>(0,3) *= P3.at<double>(0,0);
P3.at<double>(1,3) *= P3.at<double>(1,1);
if( !imgpt1.empty() && imgpt3.empty() )
adjust3rdMatrix(imgpt1, imgpt3, cameraMatrix1, distCoeffs1, cameraMatrix3, distCoeffs3, R1, R3, P1, P3);
if( !_imgpt1.empty() && _imgpt3.empty() )
adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(),
_cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3);
return (float)((P3.at<double>(idx,3)/P3.at<double>(idx,idx))/
(P2.at<double>(idx,3)/P2.at<double>(idx,idx)));

View File

@@ -280,9 +280,9 @@ void CirclesGridClusterFinder::rectifyPatternPoints(const cv::Size &patternSize,
Mat homography = findHomography(Mat(sortedCorners), Mat(idealPoints), 0);
Mat rectifiedPointsMat;
transform(Mat(patternPoints), rectifiedPointsMat, homography);
transform(patternPoints, rectifiedPointsMat, homography);
rectifiedPatternPoints.clear();
convertPointsHomogeneous(rectifiedPointsMat, rectifiedPatternPoints);
convertPointsFromHomogeneous(rectifiedPointsMat, rectifiedPatternPoints);
}
void CirclesGridClusterFinder::parsePatternPoints(const cv::Size &patternSize, const std::vector<cv::Point2f> &patternPoints, const std::vector<cv::Point2f> &rectifiedPatternPoints, std::vector<cv::Point2f> &centers)
@@ -727,7 +727,7 @@ Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const vector<Point2f>&
Mat dstKeypointsMat;
transform(Mat(srcKeypoints), dstKeypointsMat, H);
vector<Point2f> dstKeypoints;
convertPointsHomogeneous(dstKeypointsMat, dstKeypoints);
convertPointsFromHomogeneous(dstKeypointsMat, dstKeypoints);
warpedKeypoints.clear();
for (size_t i = 0; i < dstKeypoints.size(); i++)
@@ -969,7 +969,7 @@ void CirclesGridFinder::findBasis(const vector<Point2f> &samples, vector<Point2f
Mat centers;
const int clustersCount = 4;
kmeans(Mat(samples).reshape(1, 0), clustersCount, bestLabels, termCriteria, parameters.kmeansAttempts,
KMEANS_RANDOM_CENTERS, &centers);
KMEANS_RANDOM_CENTERS, centers);
assert( centers.type() == CV_32FC1 );
vector<int> basisIndices;

View File

@@ -1044,114 +1044,95 @@ CV_IMPL void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )
}
}
namespace cv
cv::Mat cv::findHomography( const InputArray& _points1, const InputArray& _points2,
int method, double ransacReprojThreshold, OutputArray _mask )
{
static Mat _findHomography( const Mat& points1, const Mat& points2,
int method, double ransacReprojThreshold,
vector<uchar>* mask )
{
CV_Assert(points1.isContinuous() && points2.isContinuous() &&
points1.type() == points2.type() &&
((points1.rows == 1 && points1.channels() == 2) ||
points1.cols*points1.channels() == 2) &&
((points2.rows == 1 && points2.channels() == 2) ||
points2.cols*points2.channels() == 2));
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
int npoints = points1.checkVector(2);
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
points1.type() == points2.type());
Mat H(3, 3, CV_64F);
CvMat _pt1 = Mat(points1), _pt2 = Mat(points2);
CvMat matH = H, _mask, *pmask = 0;
if( mask )
CvMat _pt1 = points1, _pt2 = points2;
CvMat matH = H, c_mask, *p_mask = 0;
if( _mask.needed() )
{
mask->resize(points1.cols*points1.rows*points1.channels()/2);
pmask = &(_mask = cvMat(1, (int)mask->size(), CV_8U, (void*)&(*mask)[0]));
_mask.create(npoints, 1, CV_8U, -1, true);
p_mask = &(c_mask = _mask.getMat());
}
bool ok = cvFindHomography( &_pt1, &_pt2, &matH, method, ransacReprojThreshold, pmask ) > 0;
bool ok = cvFindHomography( &_pt1, &_pt2, &matH, method, ransacReprojThreshold, p_mask ) > 0;
if( !ok )
H = Scalar(0);
return H;
}
static Mat _findFundamentalMat( const Mat& points1, const Mat& points2,
cv::Mat cv::findFundamentalMat( const InputArray& _points1, const InputArray& _points2,
int method, double param1, double param2,
vector<uchar>* mask )
OutputArray _mask )
{
CV_Assert(points1.checkVector(2) >= 0 && points2.checkVector(2) >= 0 &&
(points1.depth() == CV_32F || points1.depth() == CV_32S) &&
points1.depth() == points2.depth());
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
int npoints = points1.checkVector(2);
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
points1.type() == points2.type());
Mat F(3, 3, CV_64F);
CvMat _pt1 = Mat(points1), _pt2 = Mat(points2);
CvMat matF = F, _mask, *pmask = 0;
if( mask )
CvMat _pt1 = points1, _pt2 = points2;
CvMat matF = F, c_mask, *p_mask = 0;
if( _mask.needed() )
{
mask->resize(points1.cols*points1.rows*points1.channels()/2);
pmask = &(_mask = cvMat(1, (int)mask->size(), CV_8U, (void*)&(*mask)[0]));
_mask.create(npoints, 1, CV_8U, -1, true);
p_mask = &(c_mask = _mask.getMat());
}
int n = cvFindFundamentalMat( &_pt1, &_pt2, &matF, method, param1, param2, pmask );
int n = cvFindFundamentalMat( &_pt1, &_pt2, &matF, method, param1, param2, p_mask );
if( n <= 0 )
F = Scalar(0);
return F;
}
void cv::computeCorrespondEpilines( const InputArray& _points, int whichImage,
const InputArray& _Fmat, OutputArray _lines )
{
Mat points = _points.getMat();
int npoints = points.checkVector(2);
CV_Assert( npoints >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S));
}
cv::Mat cv::findHomography( const Mat& srcPoints, const Mat& dstPoints,
vector<uchar>& mask, int method,
double ransacReprojThreshold )
{
return _findHomography(srcPoints, dstPoints, method, ransacReprojThreshold, &mask);
_lines.create(npoints, 1, CV_32FC3, -1, true);
CvMat c_points = points, c_lines = _lines.getMat(), c_F = _Fmat.getMat();
cvComputeCorrespondEpilines(&c_points, whichImage, &c_F, &c_lines);
}
cv::Mat cv::findHomography( const Mat& srcPoints, const Mat& dstPoints,
int method, double ransacReprojThreshold )
void cv::convertPointsFromHomogeneous( const InputArray& _src, OutputArray _dst )
{
return _findHomography(srcPoints, dstPoints, method, ransacReprojThreshold, 0);
}
Mat src = _src.getMat();
int npoints = src.checkVector(3), cn = 3;
if( npoints < 0 )
{
npoints = src.checkVector(4);
if( npoints >= 0 )
cn = 4;
}
CV_Assert( npoints >= 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
cv::Mat cv::findFundamentalMat( const Mat& points1, const Mat& points2,
vector<uchar>& mask, int method, double param1, double param2 )
{
return _findFundamentalMat( points1, points2, method, param1, param2, &mask );
_dst.create(npoints, 1, CV_MAKETYPE(CV_32F, cn-1));
CvMat c_src = src, c_dst = _dst.getMat();
cvConvertPointsHomogeneous(&c_src, &c_dst);
}
cv::Mat cv::findFundamentalMat( const Mat& points1, const Mat& points2,
int method, double param1, double param2 )
void cv::convertPointsToHomogeneous( const InputArray& _src, OutputArray _dst )
{
return _findFundamentalMat( points1, points2, method, param1, param2, 0 );
}
void cv::computeCorrespondEpilines( const Mat& points, int whichImage,
const Mat& F, vector<Vec3f>& lines )
{
CV_Assert(points.checkVector(2) >= 0 &&
(points.depth() == CV_32F || points.depth() == CV_32S));
Mat src = _src.getMat();
int npoints = src.checkVector(2), cn = 2;
if( npoints < 0 )
{
npoints = src.checkVector(3);
if( npoints >= 0 )
cn = 3;
}
CV_Assert( npoints >= 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
lines.resize(points.cols*points.rows*points.channels()/2);
CvMat _points = points, _lines = Mat(lines), matF = F;
cvComputeCorrespondEpilines(&_points, whichImage, &matF, &_lines);
}
void cv::convertPointsHomogeneous( const Mat& src, vector<Point3f>& dst )
{
int srccn = src.checkVector(2) >= 0 ? 2 : src.checkVector(4) >= 0 ? 4 : -1;
CV_Assert( srccn > 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
dst.resize(src.cols*src.rows*src.channels()/srccn);
CvMat _src = src, _dst = Mat(dst);
cvConvertPointsHomogeneous(&_src, &_dst);
}
void cv::convertPointsHomogeneous( const Mat& src, vector<Point2f>& dst )
{
CV_Assert(src.checkVector(3) >= 0 &&
(src.depth() == CV_32F || src.depth() == CV_32S));
dst.resize(src.cols*src.rows*src.channels()/3);
CvMat _src = Mat(src), _dst = Mat(dst);
cvConvertPointsHomogeneous(&_src, &_dst);
_dst.create(npoints, 1, CV_MAKETYPE(CV_32F, cn+1));
CvMat c_src = src, c_dst = _dst.getMat();
cvConvertPointsHomogeneous(&c_src, &c_dst);
}
/* End of file. */

View File

@@ -452,34 +452,34 @@ bool cv::Affine3DEstimator::checkSubset( const CvMat* ms1, int count )
return j == i;
}
int cv::estimateAffine3D(const Mat& from, const Mat& to, Mat& out, vector<uchar>& outliers, double param1, double param2)
int cv::estimateAffine3D(const InputArray& _from, const InputArray& _to,
OutputArray _out, OutputArray _outliers,
double param1, double param2)
{
int count = from.cols*from.rows*from.channels()/3;
Mat from = _from.getMat(), to = _to.getMat();
int count = from.checkVector(3, CV_32F);
CV_Assert( count >= 4 && from.isContinuous() && to.isContinuous() &&
from.depth() == CV_32F && to.depth() == CV_32F &&
((from.rows == 1 && from.channels() == 3) || from.cols*from.channels() == 3) &&
((to.rows == 1 && to.channels() == 3) || to.cols*to.channels() == 3) &&
count == to.cols*to.rows*to.channels()/3);
CV_Assert( count >= 0 && to.checkVector(3, CV_32F) == count );
out.create(3, 4, CV_64F);
outliers.resize(count);
fill(outliers.begin(), outliers.end(), (uchar)1);
_out.create(3, 4, CV_64F);
Mat out = _out.getMat();
_outliers.create(count, 1, CV_8U, -1, true);
Mat outliers = _outliers.getMat();
outliers = Scalar::all(1);
vector<Point3d> dFrom;
vector<Point3d> dTo;
copy(from.ptr<Point3f>(), from.ptr<Point3f>() + count, back_inserter(dFrom));
copy(to.ptr<Point3f>(), to.ptr<Point3f>() + count, back_inserter(dTo));
Mat dFrom, dTo;
from.convertTo(dFrom, CV_64F);
to.convertTo(dTo, CV_64F);
CvMat F3x4 = out;
CvMat mask = cvMat( 1, count, CV_8U, &outliers[0] );
CvMat m1 = cvMat( 1, count, CV_64FC3, &dFrom[0] );
CvMat m2 = cvMat( 1, count, CV_64FC3, &dTo[0] );
CvMat mask = outliers;
CvMat m1 = dFrom;
CvMat m2 = dTo;
const double epsilon = numeric_limits<double>::epsilon();
param1 = param1 <= 0 ? 3 : param1;
param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
return Affine3DEstimator().runRANSAC(&m1,& m2, &F3x4, &mask, param1, param2 );
return Affine3DEstimator().runRANSAC(&m1, &m2, &F3x4, &mask, param1, param2 );
}

View File

@@ -223,9 +223,15 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
return 1;
}
}
}
bool find4QuadCornerSubpix(const Mat& img, std::vector<Point2f>& corners, Size region_size)
bool cv::find4QuadCornerSubpix(const InputArray& _img, InputOutputArray _corners, Size region_size)
{
Mat img = _img.getMat(), cornersM = _corners.getMat();
int ncorners = cornersM.checkVector(2, CV_32F);
CV_Assert( ncorners >= 0 );
Point2f* corners = cornersM.ptr<Point2f>();
const int nbins = 256;
float ranges[] = {0, 256};
const float* _ranges = ranges;
@@ -238,7 +244,7 @@ bool find4QuadCornerSubpix(const Mat& img, std::vector<Point2f>& corners, Size r
Mat black_comp, white_comp;
for(size_t i = 0; i < corners.size(); i++)
for(int i = 0; i < ncorners; i++)
{
int channels = 0;
Rect roi(cvRound(corners[i].x - region_size.width), cvRound(corners[i].y - region_size.height),
@@ -362,5 +368,3 @@ bool find4QuadCornerSubpix(const Mat& img, std::vector<Point2f>& corners, Size r
return true;
}
}; // namespace std

View File

@@ -1,324 +1,329 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
void cv::solvePnP( const Mat& opoints, const Mat& ipoints,
const Mat& cameraMatrix, const Mat& distCoeffs,
Mat& rvec, Mat& tvec, bool useExtrinsicGuess )
void cv::solvePnP( const InputArray& _opoints, const InputArray& _ipoints,
const InputArray& _cameraMatrix, const InputArray& _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess )
{
CV_Assert(opoints.isContinuous() && opoints.depth() == CV_32F &&
((opoints.rows == 1 && opoints.channels() == 3) ||
opoints.cols*opoints.channels() == 3) &&
ipoints.isContinuous() && ipoints.depth() == CV_32F &&
((ipoints.rows == 1 && ipoints.channels() == 2) ||
ipoints.cols*ipoints.channels() == 2));
rvec.create(3, 1, CV_64F);
tvec.create(3, 1, CV_64F);
CvMat _objectPoints = opoints, _imagePoints = ipoints;
CvMat _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
CvMat _rvec = rvec, _tvec = tvec;
cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints, &_cameraMatrix,
distCoeffs.data ? &_distCoeffs : 0,
&_rvec, &_tvec, useExtrinsicGuess );
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = opoints.checkVector(3, CV_32F);
CV_Assert( npoints >= 0 && npoints == ipoints.checkVector(2, CV_32F) );
_rvec.create(3, 1, CV_64F);
_tvec.create(3, 1, CV_64F);
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = _cameraMatrix.getMat(), c_distCoeffs = _distCoeffs.getMat();
CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
}
namespace cv
{
namespace pnpransac
{
const int MIN_POINTS_COUNT = 4;
void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
namespace pnpransac
{
modif_points.create(1, points.cols, CV_32FC3);
Mat R(3, 3, CV_64FC1);
Rodrigues(rvec, R);
Mat transformation(3, 4, CV_64F);
Mat r = transformation.colRange(0, 2);
R.copyTo(r);
Mat t = transformation.colRange(3, 4);
tvec.copyTo(t);
transform(points, modif_points, transformation);
}
class Mutex
{
public:
Mutex() {}
void lock()
const int MIN_POINTS_COUNT = 4;
void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
{
#ifdef HAVE_TBB
slock.acquire(resultsMutex);
#endif
modif_points.create(1, points.cols, CV_32FC3);
Mat R(3, 3, CV_64FC1);
Rodrigues(rvec, R);
Mat transformation(3, 4, CV_64F);
Mat r = transformation.colRange(0, 2);
R.copyTo(r);
Mat t = transformation.colRange(3, 4);
tvec.copyTo(t);
transform(points, modif_points, transformation);
}
void unlock()
class Mutex
{
#ifdef HAVE_TBB
slock.release();
#endif
}
private:
#ifdef HAVE_TBB
tbb::mutex resultsMutex;
tbb::mutex::scoped_lock slock;
#endif
};
struct CameraParameters
{
void init(Mat _intrinsics, Mat _distCoeffs)
{
_intrinsics.copyTo(intrinsics);
_distCoeffs.copyTo(distortion);
}
Mat intrinsics;
Mat distortion;
};
struct Parameters
{
int iterationsCount;
float reprojectionError;
int minInliersCount;
bool useExtrinsicGuess;
CameraParameters camera;
};
void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
{
Mat modelObjectPoints(1, MIN_POINTS_COUNT, CV_32FC3), modelImagePoints(1, MIN_POINTS_COUNT, CV_32FC2);
for (size_t i = 0, colIndex = 0; i < pointsMask.size(); i++)
{
if (pointsMask[i])
{
Mat colModelImagePoints = modelImagePoints(Rect(colIndex, 0, 1, 1));
imagePoints.col(i).copyTo(colModelImagePoints);
Mat colModelObjectPoints = modelObjectPoints(Rect(colIndex, 0, 1, 1));
objectPoints.col(i).copyTo(colModelObjectPoints);
colIndex = colIndex+1;
}
}
//filter same 3d points, hang in solvePnP
double eps = 1e-10;
int num_same_points = 0;
for (int i = 0; i < MIN_POINTS_COUNT; i++)
for (int j = i + 1; j < MIN_POINTS_COUNT; j++)
{
if (norm(modelObjectPoints.at<Vec3f>(0, i) - modelObjectPoints.at<Vec3f>(0, j)) < eps)
num_same_points++;
}
if (num_same_points > 0)
return;
Mat localRvec, localTvec;
rvecInit.copyTo(localRvec);
tvecInit.copyTo(localTvec);
solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, params.useExtrinsicGuess);
vector<Point2f> projected_points;
projected_points.resize(objectPoints.cols);
projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
Mat rotatedPoints;
project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
vector<int> localInliers;
for (size_t i = 0; i < objectPoints.cols; i++)
{
Point2f p(imagePoints.at<Vec2f>(0, i)[0], imagePoints.at<Vec2f>(0, i)[1]);
if ((norm(p - projected_points[i]) < params.reprojectionError)
&& (rotatedPoints.at<Vec3f>(0, i)[2] > 0)) //hack
{
localInliers.push_back(i);
}
}
if (localInliers.size() > inliers.size())
{
resultsMutex.lock();
inliers.clear();
inliers.resize(localInliers.size());
memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
resultsMutex.unlock();
}
}
class PnPSolver
{
public:
void operator()( const BlockedRange& r ) const
{
vector<char> pointsMask(objectPoints.cols, 0);
memset(&pointsMask[0], 1, MIN_POINTS_COUNT );
for( size_t i=r.begin(); i!=r.end(); ++i )
public:
Mutex() {}
void lock()
{
generateVar(pointsMask);
pnpTask(pointsMask, objectPoints, imagePoints, parameters,
inliers, rvec, tvec, initRvec, initTvec, syncMutex);
if ((int)inliers.size() > parameters.minInliersCount)
{
#ifdef HAVE_TBB
tbb::task::self().cancel_group_execution();
#else
break;
#endif
}
#ifdef HAVE_TBB
slock.acquire(resultsMutex);
#endif
}
void unlock()
{
#ifdef HAVE_TBB
slock.release();
#endif
}
private:
#ifdef HAVE_TBB
tbb::mutex resultsMutex;
tbb::mutex::scoped_lock slock;
#endif
};
struct CameraParameters
{
void init(Mat _intrinsics, Mat _distCoeffs)
{
_intrinsics.copyTo(intrinsics);
_distCoeffs.copyTo(distortion);
}
Mat intrinsics;
Mat distortion;
};
struct Parameters
{
int iterationsCount;
float reprojectionError;
int minInliersCount;
bool useExtrinsicGuess;
CameraParameters camera;
};
void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
{
Mat modelObjectPoints(1, MIN_POINTS_COUNT, CV_32FC3), modelImagePoints(1, MIN_POINTS_COUNT, CV_32FC2);
for (size_t i = 0, colIndex = 0; i < pointsMask.size(); i++)
{
if (pointsMask[i])
{
Mat colModelImagePoints = modelImagePoints(Rect(colIndex, 0, 1, 1));
imagePoints.col(i).copyTo(colModelImagePoints);
Mat colModelObjectPoints = modelObjectPoints(Rect(colIndex, 0, 1, 1));
objectPoints.col(i).copyTo(colModelObjectPoints);
colIndex = colIndex+1;
}
}
//filter same 3d points, hang in solvePnP
double eps = 1e-10;
int num_same_points = 0;
for (int i = 0; i < MIN_POINTS_COUNT; i++)
for (int j = i + 1; j < MIN_POINTS_COUNT; j++)
{
if (norm(modelObjectPoints.at<Vec3f>(0, i) - modelObjectPoints.at<Vec3f>(0, j)) < eps)
num_same_points++;
}
if (num_same_points > 0)
return;
Mat localRvec, localTvec;
rvecInit.copyTo(localRvec);
tvecInit.copyTo(localTvec);
solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, params.useExtrinsicGuess);
vector<Point2f> projected_points;
projected_points.resize(objectPoints.cols);
projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
Mat rotatedPoints;
project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
vector<int> localInliers;
for (int i = 0; i < objectPoints.cols; i++)
{
Point2f p(imagePoints.at<Vec2f>(0, i)[0], imagePoints.at<Vec2f>(0, i)[1]);
if ((norm(p - projected_points[i]) < params.reprojectionError)
&& (rotatedPoints.at<Vec3f>(0, i)[2] > 0)) //hack
{
localInliers.push_back(i);
}
}
if (localInliers.size() > inliers.size())
{
resultsMutex.lock();
inliers.clear();
inliers.resize(localInliers.size());
memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
resultsMutex.unlock();
}
}
PnPSolver(const Mat& objectPoints, const Mat& imagePoints, const Parameters& parameters,
Mat& rvec, Mat& tvec, vector<int>& inliers):
objectPoints(objectPoints), imagePoints(imagePoints), parameters(parameters),
rvec(rvec), tvec(tvec), inliers(inliers)
class PnPSolver
{
rvec.copyTo(initRvec);
tvec.copyTo(initTvec);
}
private:
const Mat& objectPoints;
const Mat& imagePoints;
const Parameters& parameters;
vector<int>& inliers;
Mat &rvec, &tvec;
Mat initRvec, initTvec;
static RNG generator;
static Mutex syncMutex;
void generateVar(vector<char>& mask) const
{
size_t size = mask.size();
for (size_t i = 0; i < size; i++)
{
int i1 = generator.uniform(0, size);
int i2 = generator.uniform(0, size);
char curr = mask[i1];
mask[i1] = mask[i2];
mask[i2] = curr;
}
}
};
Mutex PnPSolver::syncMutex;
RNG PnPSolver::generator;
}
}
void cv::solvePnPRansac(const Mat& opoints, const Mat& ipoints,
const Mat& cameraMatrix, const Mat& distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess,
int iterationsCount, float reprojectionError, int minInliersCount, vector<int>* inliers)
{
CV_Assert(opoints.isContinuous());
CV_Assert(opoints.depth() == CV_32F);
CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
CV_Assert(ipoints.isContinuous());
CV_Assert(ipoints.depth() == CV_32F);
CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);
rvec.create(3, 1, CV_64FC1);
tvec.create(3, 1, CV_64FC1);
Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1);
if (minInliersCount <= 0)
minInliersCount = objectPoints.cols;
cv::pnpransac::Parameters params;
params.iterationsCount = iterationsCount;
params.minInliersCount = minInliersCount;
params.reprojectionError = reprojectionError;
params.useExtrinsicGuess = useExtrinsicGuess;
params.camera.init(cameraMatrix, distCoeffs);
vector<int> localInliers;
Mat localRvec, localTvec;
rvec.copyTo(localRvec);
tvec.copyTo(localTvec);
if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT)
{
parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params,
localRvec, localTvec, localInliers));
}
if (localInliers.size() >= pnpransac::MIN_POINTS_COUNT)
{
size_t pointsCount = localInliers.size();
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
int index;
for (size_t i = 0; i < localInliers.size(); i++)
{
index = localInliers[i];
Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
imagePoints.col(index).copyTo(colInlierImagePoints);
Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
objectPoints.col(index).copyTo(colInlierObjectPoints);
public:
void operator()( const BlockedRange& r ) const
{
vector<char> pointsMask(objectPoints.cols, 0);
memset(&pointsMask[0], 1, MIN_POINTS_COUNT );
for( int i=r.begin(); i!=r.end(); ++i )
{
generateVar(pointsMask);
pnpTask(pointsMask, objectPoints, imagePoints, parameters,
inliers, rvec, tvec, initRvec, initTvec, syncMutex);
if ((int)inliers.size() > parameters.minInliersCount)
{
#ifdef HAVE_TBB
tbb::task::self().cancel_group_execution();
#else
break;
#endif
}
}
}
PnPSolver(const Mat& objectPoints, const Mat& imagePoints, const Parameters& parameters,
Mat& rvec, Mat& tvec, vector<int>& inliers):
objectPoints(objectPoints), imagePoints(imagePoints), parameters(parameters),
rvec(rvec), tvec(tvec), inliers(inliers)
{
rvec.copyTo(initRvec);
tvec.copyTo(initTvec);
}
private:
const Mat& objectPoints;
const Mat& imagePoints;
const Parameters& parameters;
Mat &rvec, &tvec;
vector<int>& inliers;
Mat initRvec, initTvec;
static RNG generator;
static Mutex syncMutex;
void generateVar(vector<char>& mask) const
{
size_t size = mask.size();
for (size_t i = 0; i < size; i++)
{
int i1 = generator.uniform(0, size);
int i2 = generator.uniform(0, size);
char curr = mask[i1];
mask[i1] = mask[i2];
mask[i2] = curr;
}
}
};
Mutex PnPSolver::syncMutex;
RNG PnPSolver::generator;
}
solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true);
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
if (inliers)
*inliers = localInliers;
}
else
{
tvec.setTo(Scalar(0));
Mat R = Mat::ones(3, 3, CV_64F);
Rodrigues(R, rvec);
}
return;
}
void cv::solvePnPRansac(const InputArray& _opoints, const InputArray& _ipoints,
const InputArray& _cameraMatrix, const InputArray& _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
int iterationsCount, float reprojectionError, int minInliersCount,
OutputArray _inliers)
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
CV_Assert(opoints.isContinuous());
CV_Assert(opoints.depth() == CV_32F);
CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
CV_Assert(ipoints.isContinuous());
CV_Assert(ipoints.depth() == CV_32F);
CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);
_rvec.create(3, 1, CV_64FC1);
_tvec.create(3, 1, CV_64FC1);
Mat rvec = _rvec.getMat();
Mat tvec = _tvec.getMat();
Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1);
if (minInliersCount <= 0)
minInliersCount = objectPoints.cols;
cv::pnpransac::Parameters params;
params.iterationsCount = iterationsCount;
params.minInliersCount = minInliersCount;
params.reprojectionError = reprojectionError;
params.useExtrinsicGuess = useExtrinsicGuess;
params.camera.init(cameraMatrix, distCoeffs);
vector<int> localInliers;
Mat localRvec, localTvec;
rvec.copyTo(localRvec);
tvec.copyTo(localTvec);
if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT)
{
parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params,
localRvec, localTvec, localInliers));
}
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
{
size_t pointsCount = localInliers.size();
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
int index;
for (size_t i = 0; i < localInliers.size(); i++)
{
index = localInliers[i];
Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
imagePoints.col(index).copyTo(colInlierImagePoints);
Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
objectPoints.col(index).copyTo(colInlierObjectPoints);
}
solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true);
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
if (_inliers.needed())
Mat(localInliers).copyTo(_inliers);
}
else
{
tvec.setTo(Scalar(0));
Mat R = Mat::eye(3, 3, CV_64F);
Rodrigues(R, rvec);
if( _inliers.needed() )
_inliers.release();
}
return;
}

View File

@@ -920,10 +920,13 @@ void StereoBM::init(int _preset, int _ndisparities, int _SADWindowSize)
state->SADWindowSize = _SADWindowSize;
}
void StereoBM::operator()( const Mat& left, const Mat& right, Mat& disparity, int disptype )
void StereoBM::operator()( const InputArray& _left, const InputArray& _right,
OutputArray _disparity, int disptype )
{
Mat left = _left.getMat(), right = _right.getMat();
CV_Assert( disptype == CV_16S || disptype == CV_32F );
disparity.create(left.size(), disptype);
_disparity.create(left.size(), disptype);
Mat disparity = _disparity.getMat();
findStereoCorrespondenceBM(left, right, disparity, state);
}

View File

@@ -819,8 +819,49 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
typedef cv::Point_<short> Point2s;
void filterSpeckles( Mat& img, double _newval, int maxSpeckleSize, double _maxDiff, Mat& _buf )
void StereoSGBM::operator ()( const InputArray& _left, const InputArray& _right,
OutputArray _disp )
{
Mat left = _left.getMat(), right = _right.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == DataType<PixType>::depth );
_disp.create( left.size(), CV_16S );
Mat disp = _disp.getMat();
computeDisparitySGBM( left, right, disp, *this, buffer );
medianBlur(disp, disp, 3);
if( speckleWindowSize > 0 )
filterSpeckles(disp, (minDisparity - 1)*DISP_SCALE, speckleWindowSize, DISP_SCALE*speckleRange, buffer);
}
Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity,
int numberOfDisparities,
int SADWindowSize )
{
int SW2 = SADWindowSize/2;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities - 1;
int xmin = max(roi1.x, roi2.x + maxD) + SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int ymin = max(roi1.y, roi2.y) + SW2;
int ymax = min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;
Rect r(xmin, ymin, xmax - xmin, ymax - ymin);
return r.width > 0 && r.height > 0 ? r : Rect();
}
}
void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSize,
double _maxDiff, InputOutputArray __buf )
{
Mat img = _img.getMat();
Mat temp, &_buf = __buf.needed() ? __buf.getMatRef() : temp;
CV_Assert( img.type() == CV_16SC1 );
int newVal = cvRound(_newval);
@@ -916,44 +957,11 @@ void filterSpeckles( Mat& img, double _newval, int maxSpeckleSize, double _maxDi
}
}
}
void StereoSGBM::operator ()( const Mat& left, const Mat& right, Mat& disp )
{
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == DataType<PixType>::depth );
disp.create( left.size(), CV_16S );
computeDisparitySGBM( left, right, disp, *this, buffer );
medianBlur(disp, disp, 3);
if( speckleWindowSize > 0 )
filterSpeckles(disp, (minDisparity - 1)*DISP_SCALE, speckleWindowSize, DISP_SCALE*speckleRange, buffer);
}
Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity,
int numberOfDisparities,
int SADWindowSize )
{
int SW2 = SADWindowSize/2;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities - 1;
int xmin = max(roi1.x, roi2.x + maxD) + SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int ymin = max(roi1.y, roi2.y) + SW2;
int ymax = min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;
Rect r(xmin, ymin, xmax - xmin, ymax - ymin);
return r.width > 0 && r.height > 0 ? r : Rect();
}
void validateDisparity( Mat& disp, const Mat& cost, int minDisparity, int numberOfDisparities, int disp12MaxDiff )
void cv::validateDisparity( InputOutputArray _disp, const InputArray& _cost, int minDisparity,
int numberOfDisparities, int disp12MaxDiff )
{
Mat disp = _disp.getMat(), cost = _cost.getMat();
int cols = disp.cols, rows = disp.rows;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities;
int x, minX1 = max(maxD, 0), maxX1 = cols + min(minD, 0);
@@ -1030,8 +1038,6 @@ void validateDisparity( Mat& disp, const Mat& cost, int minDisparity, int number
}
}
}
}
CvRect cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize )