removed trailing backspaces, reduced number of warnings (under MSVC2010 x64) for size_t to int conversion, added handling of samples launch without parameters (should not have abnormal termination if there was no paramaters supplied)
This commit is contained in:
@@ -13,14 +13,15 @@
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void myhelp()
|
||||
void help()
|
||||
{
|
||||
|
||||
printf("\nSigh: This program is not complete/will be replaced. \n"
|
||||
"So: Use this just to see hints of how to use things like Rodrigues\n"
|
||||
" conversions, finding the fundamental matrix, using descriptor\n"
|
||||
" finding and matching in features2d and using camera parameters\n"
|
||||
);
|
||||
printf("\nSigh: This program is not complete/will be replaced. \n"
|
||||
"So: Use this just to see hints of how to use things like Rodrigues\n"
|
||||
" conversions, finding the fundamental matrix, using descriptor\n"
|
||||
" finding and matching in features2d and using camera parameters\n"
|
||||
"Usage: build3dmodel -i <intrinsics_filename>\n"
|
||||
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -33,12 +34,12 @@ static bool readCameraMatrix(const string& filename,
|
||||
fs["image_height"] >> calibratedImageSize.height;
|
||||
fs["distortion_coefficients"] >> distCoeffs;
|
||||
fs["camera_matrix"] >> cameraMatrix;
|
||||
|
||||
|
||||
if( distCoeffs.type() != CV_64F )
|
||||
distCoeffs = Mat_<double>(distCoeffs);
|
||||
if( cameraMatrix.type() != CV_64F )
|
||||
cameraMatrix = Mat_<double>(cameraMatrix);
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -154,19 +155,19 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
{
|
||||
float F[9]={0};
|
||||
int dsize = descriptors1.cols;
|
||||
|
||||
|
||||
Mat Fhdr = Mat(3, 3, CV_32F, F);
|
||||
_F.convertTo(Fhdr, CV_32F);
|
||||
matches.clear();
|
||||
|
||||
for( size_t i = 0; i < keypoints1.size(); i++ )
|
||||
|
||||
for( int i = 0; i < (int)keypoints1.size(); i++ )
|
||||
{
|
||||
Point2f p1 = keypoints1[i].pt;
|
||||
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
|
||||
int bestIdx1 = -1, bestIdx2 = -1;
|
||||
const float* d1 = descriptors1.ptr<float>(i);
|
||||
|
||||
for( size_t j = 0; j < keypoints2.size(); j++ )
|
||||
|
||||
for( int j = 0; j < (int)keypoints2.size(); j++ )
|
||||
{
|
||||
Point2f p2 = keypoints2[j].pt;
|
||||
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
|
||||
@@ -177,7 +178,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
const float* d2 = descriptors2.ptr<float>(j);
|
||||
double dist = 0;
|
||||
int k = 0;
|
||||
|
||||
|
||||
for( ; k <= dsize - 8; k += 8 )
|
||||
{
|
||||
float t0 = d1[k] - d2[k], t1 = d1[k+1] - d2[k+1];
|
||||
@@ -186,11 +187,11 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
float t6 = d1[k+6] - d2[k+6], t7 = d1[k+7] - d2[k+7];
|
||||
dist += t0*t0 + t1*t1 + t2*t2 + t3*t3 +
|
||||
t4*t4 + t5*t5 + t6*t6 + t7*t7;
|
||||
|
||||
|
||||
if( dist >= bestDist2 )
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if( dist < bestDist2 )
|
||||
{
|
||||
for( ; k < dsize; k++ )
|
||||
@@ -198,7 +199,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
float t = d1[k] - d2[k];
|
||||
dist += t*t;
|
||||
}
|
||||
|
||||
|
||||
if( dist < bestDist1 )
|
||||
{
|
||||
bestDist2 = bestDist1;
|
||||
@@ -213,7 +214,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if( bestIdx1 >= 0 && bestDist1 < bestDist2*ratio )
|
||||
{
|
||||
Point2f p2 = keypoints1[bestIdx1].pt;
|
||||
@@ -224,21 +225,21 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
continue;
|
||||
double threshold = bestDist1/ratio;
|
||||
const float* d22 = descriptors2.ptr<float>(bestIdx1);
|
||||
size_t i1 = 0;
|
||||
for( ; i1 < keypoints1.size(); i1++ )
|
||||
int i1 = 0;
|
||||
for( ; i1 < (int)keypoints1.size(); i1++ )
|
||||
{
|
||||
if( i1 == i )
|
||||
continue;
|
||||
Point2f p1 = keypoints1[i1].pt;
|
||||
const float* d11 = descriptors1.ptr<float>(i1);
|
||||
double dist = 0;
|
||||
|
||||
|
||||
e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
|
||||
p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
|
||||
F[6]*p1.x + F[7]*p1.y + F[8];
|
||||
if( fabs(e) > eps )
|
||||
continue;
|
||||
|
||||
|
||||
for( int k = 0; k < dsize; k++ )
|
||||
{
|
||||
float t = d11[k] - d22[k];
|
||||
@@ -246,7 +247,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
||||
if( dist >= threshold )
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if( dist < threshold )
|
||||
break;
|
||||
}
|
||||
@@ -334,7 +335,7 @@ void triangulatePoint_test(void)
|
||||
randu(tvec1, Scalar::all(-10), Scalar::all(10));
|
||||
randu(rvec2, Scalar::all(-10), Scalar::all(10));
|
||||
randu(tvec2, Scalar::all(-10), Scalar::all(10));
|
||||
|
||||
|
||||
randu(objptmat, Scalar::all(-10), Scalar::all(10));
|
||||
double eps = 1e-2;
|
||||
randu(deltamat1, Scalar::all(-eps), Scalar::all(eps));
|
||||
@@ -343,10 +344,10 @@ void triangulatePoint_test(void)
|
||||
Mat_<float> cameraMatrix(3,3);
|
||||
double fx = 1000., fy = 1010., cx = 400.5, cy = 300.5;
|
||||
cameraMatrix << fx, 0, cx, 0, fy, cy, 0, 0, 1;
|
||||
|
||||
|
||||
projectPoints(Mat(objpt)+Mat(delta1), rvec1, tvec1, cameraMatrix, Mat(), imgpt1);
|
||||
projectPoints(Mat(objpt)+Mat(delta2), rvec2, tvec2, cameraMatrix, Mat(), imgpt2);
|
||||
|
||||
|
||||
vector<Point3f> objptt(n);
|
||||
vector<Point2f> pts(2);
|
||||
vector<Mat> Rv(2), tv(2);
|
||||
@@ -390,10 +391,10 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
PointModel& model )
|
||||
{
|
||||
int progressBarSize = 10;
|
||||
|
||||
|
||||
const double Feps = 5;
|
||||
const double DescriptorRatio = 0.7;
|
||||
|
||||
|
||||
vector<vector<KeyPoint> > allkeypoints;
|
||||
vector<int> dstart;
|
||||
vector<float> alldescriptorsVec;
|
||||
@@ -402,23 +403,23 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
int descriptorSize = 0;
|
||||
Mat descriptorbuf;
|
||||
Set2i pairs, keypointsIdxMap;
|
||||
|
||||
|
||||
model.points.clear();
|
||||
model.didx.clear();
|
||||
|
||||
|
||||
dstart.push_back(0);
|
||||
|
||||
|
||||
size_t nimages = imageList.size();
|
||||
size_t nimagePairs = (nimages - 1)*nimages/2 - nimages;
|
||||
|
||||
|
||||
printf("\nComputing descriptors ");
|
||||
|
||||
|
||||
// 1. find all the keypoints and all the descriptors
|
||||
for( size_t i = 0; i < nimages; i++ )
|
||||
{
|
||||
Mat img = imread(imageList[i], 1), gray;
|
||||
cvtColor(img, gray, CV_BGR2GRAY);
|
||||
|
||||
|
||||
vector<KeyPoint> keypoints;
|
||||
detector->detect(gray, keypoints);
|
||||
descriptorExtractor->compute(gray, keypoints, descriptorbuf);
|
||||
@@ -426,7 +427,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
for( size_t k = 0; k < keypoints.size(); k++ )
|
||||
keypoints[k].pt += roiofs;
|
||||
allkeypoints.push_back(keypoints);
|
||||
|
||||
|
||||
Mat buf = descriptorbuf;
|
||||
if( !buf.isContinuous() || buf.type() != CV_32F )
|
||||
{
|
||||
@@ -434,41 +435,41 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
descriptorbuf.convertTo(buf, CV_32F);
|
||||
}
|
||||
descriptorSize = buf.cols;
|
||||
|
||||
|
||||
size_t prev = alldescriptorsVec.size();
|
||||
size_t delta = buf.rows*buf.cols;
|
||||
alldescriptorsVec.resize(prev + delta);
|
||||
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
|
||||
alldescriptorsVec.begin() + prev);
|
||||
dstart.push_back(dstart.back() + keypoints.size());
|
||||
|
||||
dstart.push_back(dstart.back() + (int)keypoints.size());
|
||||
|
||||
Mat R, t;
|
||||
unpackPose(poseList[i], R, t);
|
||||
Rs.push_back(R);
|
||||
ts.push_back(t);
|
||||
|
||||
|
||||
if( (i+1)*progressBarSize/nimages > i*progressBarSize/nimages )
|
||||
{
|
||||
putchar('.');
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
Mat alldescriptors(alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
|
||||
|
||||
Mat alldescriptors((int)alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
|
||||
&alldescriptorsVec[0]);
|
||||
|
||||
|
||||
printf("\nOk. total images = %d. total keypoints = %d\n",
|
||||
(int)nimages, alldescriptors.rows);
|
||||
|
||||
printf("\nFinding correspondences ");
|
||||
|
||||
|
||||
int pairsFound = 0;
|
||||
|
||||
|
||||
vector<Point2f> pts_k(2);
|
||||
vector<Mat> Rs_k(2), ts_k(2);
|
||||
//namedWindow("img1", 1);
|
||||
//namedWindow("img2", 1);
|
||||
|
||||
|
||||
// 2. find pairwise correspondences
|
||||
for( size_t i = 0; i < nimages; i++ )
|
||||
for( size_t j = i+1; j < nimages; j++ )
|
||||
@@ -477,47 +478,47 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
const vector<KeyPoint>& keypoints2 = allkeypoints[j];
|
||||
Mat descriptors1 = alldescriptors.rowRange(dstart[i], dstart[i+1]);
|
||||
Mat descriptors2 = alldescriptors.rowRange(dstart[j], dstart[j+1]);
|
||||
|
||||
|
||||
Mat F = getFundamentalMat(Rs[i], ts[i], Rs[j], ts[j], cameraMatrix);
|
||||
|
||||
|
||||
findConstrainedCorrespondences( F, keypoints1, keypoints2,
|
||||
descriptors1, descriptors2,
|
||||
pairwiseMatches, Feps, DescriptorRatio );
|
||||
|
||||
|
||||
//pairsFound += (int)pairwiseMatches.size();
|
||||
|
||||
|
||||
//Mat img1 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
|
||||
//Mat img2 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)j), 1);
|
||||
|
||||
|
||||
//double avg_err = 0;
|
||||
for( size_t k = 0; k < pairwiseMatches.size(); k++ )
|
||||
{
|
||||
int i1 = pairwiseMatches[k][0], i2 = pairwiseMatches[k][1];
|
||||
|
||||
|
||||
pts_k[0] = keypoints1[i1].pt;
|
||||
pts_k[1] = keypoints2[i2].pt;
|
||||
Rs_k[0] = Rs[i]; Rs_k[1] = Rs[j];
|
||||
ts_k[0] = ts[i]; ts_k[1] = ts[j];
|
||||
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
|
||||
|
||||
|
||||
vector<Point3f> objpts;
|
||||
objpts.push_back(objpt);
|
||||
vector<Point2f> imgpts1, imgpts2;
|
||||
projectPoints(Mat(objpts), Rs_k[0], ts_k[0], cameraMatrix, Mat(), imgpts1);
|
||||
projectPoints(Mat(objpts), Rs_k[1], ts_k[1], cameraMatrix, Mat(), imgpts2);
|
||||
|
||||
|
||||
double e1 = norm(imgpts1[0] - keypoints1[i1].pt);
|
||||
double e2 = norm(imgpts2[0] - keypoints2[i2].pt);
|
||||
if( e1 + e2 > 5 )
|
||||
continue;
|
||||
|
||||
|
||||
pairsFound++;
|
||||
|
||||
|
||||
//model.points.push_back(objpt);
|
||||
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
|
||||
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
|
||||
keypointsIdxMap[Pair2i(i,i1)] = 1;
|
||||
keypointsIdxMap[Pair2i(j,i2)] = 1;
|
||||
keypointsIdxMap[Pair2i((int)i,i1)] = 1;
|
||||
keypointsIdxMap[Pair2i((int)j,i2)] = 1;
|
||||
//CV_Assert(e1 < 5 && e2 < 5);
|
||||
//Scalar color(rand()%256,rand()%256, rand()%256);
|
||||
//circle(img1, keypoints1[i1].pt, 2, color, -1, CV_AA);
|
||||
@@ -527,41 +528,41 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
//imshow("img1", img1);
|
||||
//imshow("img2", img2);
|
||||
//waitKey();
|
||||
|
||||
|
||||
if( (i+1)*progressBarSize/nimagePairs > i*progressBarSize/nimagePairs )
|
||||
{
|
||||
putchar('.');
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
printf("\nOk. Total pairs = %d\n", pairsFound );
|
||||
|
||||
|
||||
// 3. build the keypoint clusters
|
||||
vector<Pair2i> keypointsIdx;
|
||||
Set2i::iterator kpidx_it = keypointsIdxMap.begin(), kpidx_end = keypointsIdxMap.end();
|
||||
|
||||
|
||||
for( ; kpidx_it != kpidx_end; ++kpidx_it )
|
||||
keypointsIdx.push_back(kpidx_it->first);
|
||||
|
||||
printf("\nClustering correspondences ");
|
||||
|
||||
|
||||
vector<int> labels;
|
||||
int nclasses = partition( keypointsIdx, labels, EqKeypoints(&dstart, &pairs) );
|
||||
|
||||
printf("\nOk. Total classes (i.e. 3d points) = %d\n", nclasses );
|
||||
|
||||
model.descriptors.create(keypointsIdx.size(), descriptorSize, CV_32F);
|
||||
|
||||
model.descriptors.create((int)keypointsIdx.size(), descriptorSize, CV_32F);
|
||||
model.didx.resize(nclasses);
|
||||
model.points.resize(nclasses);
|
||||
|
||||
|
||||
vector<vector<Pair2i> > clusters(nclasses);
|
||||
for( size_t i = 0; i < keypointsIdx.size(); i++ )
|
||||
clusters[labels[i]].push_back(keypointsIdx[i]);
|
||||
|
||||
|
||||
// 4. now compute 3D points corresponding to each cluster and fill in the model data
|
||||
printf("\nComputing 3D coordinates ");
|
||||
|
||||
|
||||
int globalDIdx = 0;
|
||||
for( int k = 0; k < nclasses; k++ )
|
||||
{
|
||||
@@ -575,7 +576,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
int imgidx = clusters[k][i].first, ptidx = clusters[k][i].second;
|
||||
Mat dstrow = model.descriptors.row(globalDIdx);
|
||||
alldescriptors.row(dstart[imgidx] + ptidx).copyTo(dstrow);
|
||||
|
||||
|
||||
model.didx[k][i] = globalDIdx++;
|
||||
pts_k[i] = allkeypoints[imgidx][ptidx].pt;
|
||||
Rs_k[i] = Rs[imgidx];
|
||||
@@ -583,7 +584,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
}
|
||||
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
|
||||
model.points[k] = objpt;
|
||||
|
||||
|
||||
if( (i+1)*progressBarSize/nclasses > i*progressBarSize/nclasses )
|
||||
{
|
||||
putchar('.');
|
||||
@@ -600,10 +601,10 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
{
|
||||
img = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
|
||||
projectPoints(Mat(model.points), Rs[i], ts[i], cameraMatrix, Mat(), imagePoints);
|
||||
|
||||
|
||||
for( int k = 0; k < (int)imagePoints.size(); k++ )
|
||||
circle(img, imagePoints[k], 2, Scalar(0,255,0), -1, CV_AA, 0);
|
||||
|
||||
|
||||
imshow("Test", img);
|
||||
int c = waitKey();
|
||||
if( c == 'q' || c == 'Q' )
|
||||
@@ -614,75 +615,73 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
triangulatePoint_test();
|
||||
|
||||
const char* help = "Usage: build3dmodel -i <intrinsics_filename>\n"
|
||||
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n";
|
||||
|
||||
if(argc < 3)
|
||||
{
|
||||
puts(help);
|
||||
myhelp();
|
||||
return 0;
|
||||
}
|
||||
const char* intrinsicsFilename = 0;
|
||||
const char* modelName = 0;
|
||||
const char* modelName = 0;
|
||||
const char* detectorName = "SURF";
|
||||
const char* descriptorExtractorName = "SURF";
|
||||
|
||||
vector<Point3f> modelBox;
|
||||
vector<string> imageList;
|
||||
vector<Rect> roiList;
|
||||
vector<Vec6f> poseList;
|
||||
|
||||
vector<string> imageList;
|
||||
vector<Rect> roiList;
|
||||
vector<Vec6f> poseList;
|
||||
|
||||
if(argc < 3)
|
||||
{
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
for( int i = 1; i < argc; i++ )
|
||||
{
|
||||
if( strcmp(argv[i], "-i") == 0 )
|
||||
intrinsicsFilename = argv[++i];
|
||||
else if( strcmp(argv[i], "-m") == 0 )
|
||||
modelName = argv[++i];
|
||||
else if( strcmp(argv[i], "-d") == 0 )
|
||||
intrinsicsFilename = argv[++i];
|
||||
else if( strcmp(argv[i], "-m") == 0 )
|
||||
modelName = argv[++i];
|
||||
else if( strcmp(argv[i], "-d") == 0 )
|
||||
detectorName = argv[++i];
|
||||
else if( strcmp(argv[i], "-de") == 0 )
|
||||
descriptorExtractorName = argv[++i];
|
||||
else
|
||||
{
|
||||
printf("Incorrect option\n");
|
||||
puts(help);
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
help();
|
||||
printf("Incorrect option\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if( !intrinsicsFilename || !modelName )
|
||||
{
|
||||
printf("Some of the required parameters are missing\n");
|
||||
puts(help);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( !intrinsicsFilename || !modelName )
|
||||
{
|
||||
printf("Some of the required parameters are missing\n");
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
triangulatePoint_test();
|
||||
|
||||
Mat cameraMatrix, distCoeffs;
|
||||
Size calibratedImageSize;
|
||||
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize);
|
||||
|
||||
|
||||
Ptr<FeatureDetector> detector = FeatureDetector::create(detectorName);
|
||||
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create(descriptorExtractorName);
|
||||
|
||||
|
||||
string modelIndexFilename = format("%s_segm/frame_index.yml", modelName);
|
||||
if(!readModelViews( modelIndexFilename, modelBox, imageList, roiList, poseList))
|
||||
{
|
||||
printf("Can not read the model. Check the parameters and the working directory\n");
|
||||
puts(help);
|
||||
return 0;
|
||||
}
|
||||
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
PointModel model;
|
||||
model.name = modelName;
|
||||
build3dmodel( detector, descriptorExtractor, modelBox,
|
||||
imageList, roiList, poseList, cameraMatrix, model );
|
||||
string outputModelName = format("%s_model.yml.gz", modelName);
|
||||
|
||||
|
||||
|
||||
|
||||
printf("\nDone! Now saving the model ...\n");
|
||||
writeModel(outputModelName, modelName, model);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user