removed trailing backspaces, reduced number of warnings (under MSVC2010 x64) for size_t to int conversion, added handling of samples launch without parameters (should not have abnormal termination if there was no paramaters supplied)
This commit is contained in:
parent
092beae2d5
commit
6e38b6aaed
@ -3187,7 +3187,7 @@ static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
|
|||||||
return distCoeffs;
|
return distCoeffs;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
} // namespace cv
|
||||||
|
|
||||||
|
|
||||||
void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
|
void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
|
||||||
@ -3361,7 +3361,8 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
|
|||||||
if( !(flags & CALIB_RATIONAL_MODEL) )
|
if( !(flags & CALIB_RATIONAL_MODEL) )
|
||||||
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
|
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
|
||||||
|
|
||||||
size_t i, nimages = _objectPoints.total();
|
int i;
|
||||||
|
size_t nimages = _objectPoints.total();
|
||||||
CV_Assert( nimages > 0 );
|
CV_Assert( nimages > 0 );
|
||||||
Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
|
Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
|
||||||
collectCalibrationData( _objectPoints, _imagePoints, noArray(),
|
collectCalibrationData( _objectPoints, _imagePoints, noArray(),
|
||||||
@ -3373,10 +3374,10 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
|
|||||||
double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
|
double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
|
||||||
&c_cameraMatrix, &c_distCoeffs, &c_rvecM,
|
&c_cameraMatrix, &c_distCoeffs, &c_rvecM,
|
||||||
&c_tvecM, flags );
|
&c_tvecM, flags );
|
||||||
_rvecs.create(nimages, 1, CV_64FC3);
|
_rvecs.create((int)nimages, 1, CV_64FC3);
|
||||||
_tvecs.create(nimages, 1, CV_64FC3);
|
_tvecs.create((int)nimages, 1, CV_64FC3);
|
||||||
|
|
||||||
for( i = 0; i < nimages; i++ )
|
for( i = 0; i < (int)nimages; i++ )
|
||||||
{
|
{
|
||||||
_rvecs.create(3, 1, CV_64F, i, true);
|
_rvecs.create(3, 1, CV_64F, i, true);
|
||||||
_tvecs.create(3, 1, CV_64F, i, true);
|
_tvecs.create(3, 1, CV_64F, i, true);
|
||||||
|
@ -376,13 +376,13 @@ void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, vector<
|
|||||||
channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
|
channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
|
||||||
// Use class_id member to get indices into initial keypoints vector
|
// Use class_id member to get indices into initial keypoints vector
|
||||||
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
|
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
|
||||||
channelKeypoints[ci][ki].class_id = ki;
|
channelKeypoints[ci][ki].class_id = (int)ki;
|
||||||
|
|
||||||
descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
|
descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
|
||||||
idxs[ci].resize( channelKeypoints[ci].size() );
|
idxs[ci].resize( channelKeypoints[ci].size() );
|
||||||
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
|
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
|
||||||
{
|
{
|
||||||
idxs[ci][ki] = ki;
|
idxs[ci][ki] = (int)ki;
|
||||||
}
|
}
|
||||||
std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
|
std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ HarrisResponse::HarrisResponse(const cv::Mat& image, double k) :
|
|||||||
dX_offsets_.resize(7 * 9);
|
dX_offsets_.resize(7 * 9);
|
||||||
dY_offsets_.resize(7 * 9);
|
dY_offsets_.resize(7 * 9);
|
||||||
std::vector<int>::iterator dX_offsets = dX_offsets_.begin(), dY_offsets = dY_offsets_.begin();
|
std::vector<int>::iterator dX_offsets = dX_offsets_.begin(), dY_offsets = dY_offsets_.begin();
|
||||||
unsigned int image_step = image.step1();
|
unsigned int image_step = (unsigned int)image.step1();
|
||||||
for (size_t y = 0; y <= 6 * image_step; y += image_step)
|
for (size_t y = 0; y <= 6 * image_step; y += image_step)
|
||||||
{
|
{
|
||||||
int dX_offset = y + 2, dY_offset = y + 2 * image_step;
|
int dX_offset = y + 2, dY_offset = y + 2 * image_step;
|
||||||
|
@ -20,7 +20,7 @@ void help()
|
|||||||
"see facedetect.cmd for one call:\n"
|
"see facedetect.cmd for one call:\n"
|
||||||
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n"
|
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n"
|
||||||
"Hit any key to quit.\n"
|
"Hit any key to quit.\n"
|
||||||
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
|
"Using OpenCV version " << CV_VERSION << "\n" << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
void detectAndDraw( Mat& img,
|
void detectAndDraw( Mat& img,
|
||||||
@ -130,6 +130,7 @@ int main( int argc, const char** argv )
|
|||||||
}
|
}
|
||||||
|
|
||||||
waitKey(0);
|
waitKey(0);
|
||||||
|
|
||||||
_cleanup_:
|
_cleanup_:
|
||||||
cvReleaseCapture( &capture );
|
cvReleaseCapture( &capture );
|
||||||
}
|
}
|
||||||
|
@ -20,10 +20,9 @@ void help()
|
|||||||
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
|
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
|
||||||
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
|
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
|
||||||
"on planar objects.\n"
|
"on planar objects.\n"
|
||||||
"Call:\n"
|
"Usage:\n"
|
||||||
"./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n"
|
"./find_obj <object_filename> <scene_filename>, default is box.png and box_in_scene.png\n\n");
|
||||||
);
|
return;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// define whether to use approximate nearest-neighbor search
|
// define whether to use approximate nearest-neighbor search
|
||||||
@ -214,8 +213,19 @@ int main(int argc, char** argv)
|
|||||||
const char* object_filename = argc == 3 ? argv[1] : "box.png";
|
const char* object_filename = argc == 3 ? argv[1] : "box.png";
|
||||||
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
|
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
|
||||||
|
|
||||||
CvMemStorage* storage = cvCreateMemStorage(0);
|
|
||||||
help();
|
help();
|
||||||
|
|
||||||
|
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||||
|
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||||
|
if( !object || !image )
|
||||||
|
{
|
||||||
|
fprintf( stderr, "Can not load %s and/or %s\n",
|
||||||
|
object_filename, scene_filename );
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
CvMemStorage* storage = cvCreateMemStorage(0);
|
||||||
|
|
||||||
cvNamedWindow("Object", 1);
|
cvNamedWindow("Object", 1);
|
||||||
cvNamedWindow("Object Correspond", 1);
|
cvNamedWindow("Object Correspond", 1);
|
||||||
|
|
||||||
@ -232,30 +242,24 @@ int main(int argc, char** argv)
|
|||||||
{{255,255,255}}
|
{{255,255,255}}
|
||||||
};
|
};
|
||||||
|
|
||||||
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
|
||||||
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
|
||||||
if( !object || !image )
|
|
||||||
{
|
|
||||||
fprintf( stderr, "Can not load %s and/or %s\n"
|
|
||||||
"Usage: find_obj [<object_filename> <scene_filename>]\n",
|
|
||||||
object_filename, scene_filename );
|
|
||||||
exit(-1);
|
|
||||||
}
|
|
||||||
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
|
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
|
||||||
cvCvtColor( object, object_color, CV_GRAY2BGR );
|
cvCvtColor( object, object_color, CV_GRAY2BGR );
|
||||||
|
|
||||||
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
|
CvSeq* objectKeypoints = 0, *objectDescriptors = 0;
|
||||||
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
|
CvSeq* imageKeypoints = 0, *imageDescriptors = 0;
|
||||||
int i;
|
int i;
|
||||||
CvSURFParams params = cvSURFParams(500, 1);
|
CvSURFParams params = cvSURFParams(500, 1);
|
||||||
|
|
||||||
double tt = (double)cvGetTickCount();
|
double tt = (double)cvGetTickCount();
|
||||||
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
|
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
|
||||||
printf("Object Descriptors: %d\n", objectDescriptors->total);
|
printf("Object Descriptors: %d\n", objectDescriptors->total);
|
||||||
|
|
||||||
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
|
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
|
||||||
printf("Image Descriptors: %d\n", imageDescriptors->total);
|
printf("Image Descriptors: %d\n", imageDescriptors->total);
|
||||||
tt = (double)cvGetTickCount() - tt;
|
tt = (double)cvGetTickCount() - tt;
|
||||||
|
|
||||||
printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
|
printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
|
||||||
|
|
||||||
CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
|
CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
|
||||||
CvPoint dst_corners[4];
|
CvPoint dst_corners[4];
|
||||||
IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
|
IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
|
||||||
|
@ -16,10 +16,13 @@ void help()
|
|||||||
"Format:" << endl <<
|
"Format:" << endl <<
|
||||||
" classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
|
" classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
|
||||||
" or" << endl <<
|
" or" << endl <<
|
||||||
" classifier_file(to read) test_image"
|
" classifier_file(to read) test_image" << "\n" << endl <<
|
||||||
"Using OpenCV version %s\n" << CV_VERSION << "\n"
|
"Using OpenCV version " << CV_VERSION << "\n" << endl;
|
||||||
<< endl;
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generates random perspective transform of image
|
* Generates random perspective transform of image
|
||||||
*/
|
*/
|
||||||
@ -131,7 +134,7 @@ void testCalonderClassifier( const string& classifierFilename, const string& img
|
|||||||
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
|
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
|
||||||
for( size_t mi = 0; mi < matches.size(); mi++ )
|
for( size_t mi = 0; mi < matches.size(); mi++ )
|
||||||
{
|
{
|
||||||
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>(mi,0)) < 4 ) // inlier
|
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
|
||||||
matchesMask[mi] = 1;
|
matchesMask[mi] = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,38 +12,41 @@ using namespace cv;
|
|||||||
void help()
|
void help()
|
||||||
{
|
{
|
||||||
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
|
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
|
||||||
"descriptor classifier"
|
"descriptor classifier\n"
|
||||||
"Usage:\n"
|
"Usage:\n"
|
||||||
"./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n"
|
"./find_obj_ferns <object_filename> <scene_filename>, default: box.png and box_in_scene.png\n\n");
|
||||||
"\n");
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char** argv)
|
int main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
const char* object_filename = argc > 1 ? argv[1] : "box.png";
|
const char* object_filename = argc > 1 ? argv[1] : "box.png";
|
||||||
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
|
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
|
||||||
int i;
|
|
||||||
help();
|
help();
|
||||||
cvNamedWindow("Object", 1);
|
|
||||||
cvNamedWindow("Image", 1);
|
|
||||||
cvNamedWindow("Object Correspondence", 1);
|
|
||||||
|
|
||||||
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||||
Mat image;
|
Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||||
|
|
||||||
double imgscale = 1;
|
if( !object.data || !scene.data )
|
||||||
|
|
||||||
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
|
||||||
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
|
|
||||||
|
|
||||||
|
|
||||||
if( !object.data || !image.data )
|
|
||||||
{
|
{
|
||||||
fprintf( stderr, "Can not load %s and/or %s\n"
|
fprintf( stderr, "Can not load %s and/or %s\n",
|
||||||
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
|
|
||||||
object_filename, scene_filename );
|
object_filename, scene_filename );
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double imgscale = 1;
|
||||||
|
Mat image;
|
||||||
|
|
||||||
|
resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
|
||||||
|
|
||||||
|
cvNamedWindow("Object", 1);
|
||||||
|
cvNamedWindow("Image", 1);
|
||||||
|
cvNamedWindow("Object Correspondence", 1);
|
||||||
|
|
||||||
Size patchSize(32, 32);
|
Size patchSize(32, 32);
|
||||||
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
|
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
|
||||||
ldetector.setVerbose(true);
|
ldetector.setVerbose(true);
|
||||||
@ -139,10 +142,12 @@ int main(int argc, char** argv)
|
|||||||
circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
|
circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
|
||||||
circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
|
circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
|
||||||
}
|
}
|
||||||
|
|
||||||
imwrite("correspond.png", correspond );
|
imwrite("correspond.png", correspond );
|
||||||
imshow( "Object", objectColor );
|
imshow( "Object", objectColor );
|
||||||
imshow( "Image", imageColor );
|
imshow( "Image", imageColor );
|
||||||
|
|
||||||
waitKey(0);
|
waitKey(0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -13,14 +13,15 @@
|
|||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
void myhelp()
|
void help()
|
||||||
{
|
{
|
||||||
|
printf("\nSigh: This program is not complete/will be replaced. \n"
|
||||||
printf("\nSigh: This program is not complete/will be replaced. \n"
|
"So: Use this just to see hints of how to use things like Rodrigues\n"
|
||||||
"So: Use this just to see hints of how to use things like Rodrigues\n"
|
" conversions, finding the fundamental matrix, using descriptor\n"
|
||||||
" conversions, finding the fundamental matrix, using descriptor\n"
|
" finding and matching in features2d and using camera parameters\n"
|
||||||
" finding and matching in features2d and using camera parameters\n"
|
"Usage: build3dmodel -i <intrinsics_filename>\n"
|
||||||
);
|
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -159,14 +160,14 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
|||||||
_F.convertTo(Fhdr, CV_32F);
|
_F.convertTo(Fhdr, CV_32F);
|
||||||
matches.clear();
|
matches.clear();
|
||||||
|
|
||||||
for( size_t i = 0; i < keypoints1.size(); i++ )
|
for( int i = 0; i < (int)keypoints1.size(); i++ )
|
||||||
{
|
{
|
||||||
Point2f p1 = keypoints1[i].pt;
|
Point2f p1 = keypoints1[i].pt;
|
||||||
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
|
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
|
||||||
int bestIdx1 = -1, bestIdx2 = -1;
|
int bestIdx1 = -1, bestIdx2 = -1;
|
||||||
const float* d1 = descriptors1.ptr<float>(i);
|
const float* d1 = descriptors1.ptr<float>(i);
|
||||||
|
|
||||||
for( size_t j = 0; j < keypoints2.size(); j++ )
|
for( int j = 0; j < (int)keypoints2.size(); j++ )
|
||||||
{
|
{
|
||||||
Point2f p2 = keypoints2[j].pt;
|
Point2f p2 = keypoints2[j].pt;
|
||||||
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
|
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
|
||||||
@ -224,8 +225,8 @@ static void findConstrainedCorrespondences(const Mat& _F,
|
|||||||
continue;
|
continue;
|
||||||
double threshold = bestDist1/ratio;
|
double threshold = bestDist1/ratio;
|
||||||
const float* d22 = descriptors2.ptr<float>(bestIdx1);
|
const float* d22 = descriptors2.ptr<float>(bestIdx1);
|
||||||
size_t i1 = 0;
|
int i1 = 0;
|
||||||
for( ; i1 < keypoints1.size(); i1++ )
|
for( ; i1 < (int)keypoints1.size(); i1++ )
|
||||||
{
|
{
|
||||||
if( i1 == i )
|
if( i1 == i )
|
||||||
continue;
|
continue;
|
||||||
@ -440,7 +441,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
|||||||
alldescriptorsVec.resize(prev + delta);
|
alldescriptorsVec.resize(prev + delta);
|
||||||
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
|
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
|
||||||
alldescriptorsVec.begin() + prev);
|
alldescriptorsVec.begin() + prev);
|
||||||
dstart.push_back(dstart.back() + keypoints.size());
|
dstart.push_back(dstart.back() + (int)keypoints.size());
|
||||||
|
|
||||||
Mat R, t;
|
Mat R, t;
|
||||||
unpackPose(poseList[i], R, t);
|
unpackPose(poseList[i], R, t);
|
||||||
@ -454,7 +455,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat alldescriptors(alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
|
Mat alldescriptors((int)alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
|
||||||
&alldescriptorsVec[0]);
|
&alldescriptorsVec[0]);
|
||||||
|
|
||||||
printf("\nOk. total images = %d. total keypoints = %d\n",
|
printf("\nOk. total images = %d. total keypoints = %d\n",
|
||||||
@ -516,8 +517,8 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
|||||||
//model.points.push_back(objpt);
|
//model.points.push_back(objpt);
|
||||||
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
|
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
|
||||||
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
|
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
|
||||||
keypointsIdxMap[Pair2i(i,i1)] = 1;
|
keypointsIdxMap[Pair2i((int)i,i1)] = 1;
|
||||||
keypointsIdxMap[Pair2i(j,i2)] = 1;
|
keypointsIdxMap[Pair2i((int)j,i2)] = 1;
|
||||||
//CV_Assert(e1 < 5 && e2 < 5);
|
//CV_Assert(e1 < 5 && e2 < 5);
|
||||||
//Scalar color(rand()%256,rand()%256, rand()%256);
|
//Scalar color(rand()%256,rand()%256, rand()%256);
|
||||||
//circle(img1, keypoints1[i1].pt, 2, color, -1, CV_AA);
|
//circle(img1, keypoints1[i1].pt, 2, color, -1, CV_AA);
|
||||||
@ -551,7 +552,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
|||||||
|
|
||||||
printf("\nOk. Total classes (i.e. 3d points) = %d\n", nclasses );
|
printf("\nOk. Total classes (i.e. 3d points) = %d\n", nclasses );
|
||||||
|
|
||||||
model.descriptors.create(keypointsIdx.size(), descriptorSize, CV_32F);
|
model.descriptors.create((int)keypointsIdx.size(), descriptorSize, CV_32F);
|
||||||
model.didx.resize(nclasses);
|
model.didx.resize(nclasses);
|
||||||
model.points.resize(nclasses);
|
model.points.resize(nclasses);
|
||||||
|
|
||||||
@ -614,26 +615,22 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
|
|||||||
|
|
||||||
int main(int argc, char** argv)
|
int main(int argc, char** argv)
|
||||||
{
|
{
|
||||||
triangulatePoint_test();
|
|
||||||
|
|
||||||
const char* help = "Usage: build3dmodel -i <intrinsics_filename>\n"
|
|
||||||
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n";
|
|
||||||
|
|
||||||
if(argc < 3)
|
|
||||||
{
|
|
||||||
puts(help);
|
|
||||||
myhelp();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
const char* intrinsicsFilename = 0;
|
const char* intrinsicsFilename = 0;
|
||||||
const char* modelName = 0;
|
const char* modelName = 0;
|
||||||
const char* detectorName = "SURF";
|
const char* detectorName = "SURF";
|
||||||
const char* descriptorExtractorName = "SURF";
|
const char* descriptorExtractorName = "SURF";
|
||||||
|
|
||||||
vector<Point3f> modelBox;
|
vector<Point3f> modelBox;
|
||||||
vector<string> imageList;
|
vector<string> imageList;
|
||||||
vector<Rect> roiList;
|
vector<Rect> roiList;
|
||||||
vector<Vec6f> poseList;
|
vector<Vec6f> poseList;
|
||||||
|
|
||||||
|
if(argc < 3)
|
||||||
|
{
|
||||||
|
help();
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
for( int i = 1; i < argc; i++ )
|
for( int i = 1; i < argc; i++ )
|
||||||
{
|
{
|
||||||
if( strcmp(argv[i], "-i") == 0 )
|
if( strcmp(argv[i], "-i") == 0 )
|
||||||
@ -646,19 +643,21 @@ int main(int argc, char** argv)
|
|||||||
descriptorExtractorName = argv[++i];
|
descriptorExtractorName = argv[++i];
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
help();
|
||||||
printf("Incorrect option\n");
|
printf("Incorrect option\n");
|
||||||
puts(help);
|
return -1;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if( !intrinsicsFilename || !modelName )
|
if( !intrinsicsFilename || !modelName )
|
||||||
{
|
{
|
||||||
printf("Some of the required parameters are missing\n");
|
printf("Some of the required parameters are missing\n");
|
||||||
puts(help);
|
help();
|
||||||
return 0;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
triangulatePoint_test();
|
||||||
|
|
||||||
Mat cameraMatrix, distCoeffs;
|
Mat cameraMatrix, distCoeffs;
|
||||||
Size calibratedImageSize;
|
Size calibratedImageSize;
|
||||||
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize);
|
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize);
|
||||||
@ -670,8 +669,8 @@ int main(int argc, char** argv)
|
|||||||
if(!readModelViews( modelIndexFilename, modelBox, imageList, roiList, poseList))
|
if(!readModelViews( modelIndexFilename, modelBox, imageList, roiList, poseList))
|
||||||
{
|
{
|
||||||
printf("Can not read the model. Check the parameters and the working directory\n");
|
printf("Can not read the model. Check the parameters and the working directory\n");
|
||||||
puts(help);
|
help();
|
||||||
return 0;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
PointModel model;
|
PointModel model;
|
||||||
|
@ -12,18 +12,22 @@ void help()
|
|||||||
cout <<
|
cout <<
|
||||||
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
||||||
"edge template and a query edge image.\n"
|
"edge template and a query edge image.\n"
|
||||||
"Call:\n"
|
"Usage:\n"
|
||||||
"./chamfer [<image edge map> <template edge map>]\n"
|
"./chamfer <image edge map> <template edge map>,"
|
||||||
"By default\n"
|
" By default the inputs are logo_in_clutter.png logo.png\n" << endl;
|
||||||
"the inputs are ./chamfer logo_in_clutter.png logo.png\n"<< endl;
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int main( int argc, char** argv )
|
int main( int argc, char** argv )
|
||||||
{
|
{
|
||||||
if( argc != 1 && argc != 3 )
|
if( argc != 3 )
|
||||||
{
|
{
|
||||||
help();
|
help();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
|
Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
|
||||||
Mat cimg;
|
Mat cimg;
|
||||||
cvtColor(img, cimg, CV_GRAY2BGR);
|
cvtColor(img, cimg, CV_GRAY2BGR);
|
||||||
@ -41,7 +45,7 @@ int main( int argc, char** argv )
|
|||||||
int best = chamerMatching( img, tpl, results, costs );
|
int best = chamerMatching( img, tpl, results, costs );
|
||||||
if( best < 0 )
|
if( best < 0 )
|
||||||
{
|
{
|
||||||
cout << "not found;\n";
|
cout << "matching not found\n";
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,7 +56,10 @@ int main( int argc, char** argv )
|
|||||||
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
|
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
|
||||||
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
|
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
imshow("result", cimg);
|
imshow("result", cimg);
|
||||||
|
|
||||||
waitKey();
|
waitKey();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -9,8 +9,8 @@ using namespace std;
|
|||||||
void help()
|
void help()
|
||||||
{
|
{
|
||||||
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
|
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
|
||||||
"Call:\n"
|
"Usage:\n"
|
||||||
"./houghlines [image_len -- Default is pic1.png\n" << endl;
|
"./houghlines <image_name>, Default is pic1.png\n" << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char** argv)
|
int main(int argc, char** argv)
|
||||||
@ -20,10 +20,11 @@ int main(int argc, char** argv)
|
|||||||
Mat src = imread(filename, 0);
|
Mat src = imread(filename, 0);
|
||||||
if(src.empty())
|
if(src.empty())
|
||||||
{
|
{
|
||||||
cout << "can not open " << filename << endl;
|
|
||||||
cout << "Usage: houghlines <image_name>" << endl;
|
|
||||||
}
|
|
||||||
help();
|
help();
|
||||||
|
cout << "can not open " << filename << endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
Mat dst, cdst;
|
Mat dst, cdst;
|
||||||
Canny(src, dst, 50, 200, 3);
|
Canny(src, dst, 50, 200, 3);
|
||||||
cvtColor(dst, cdst, CV_GRAY2BGR);
|
cvtColor(dst, cdst, CV_GRAY2BGR);
|
||||||
@ -57,6 +58,7 @@ int main(int argc, char** argv)
|
|||||||
imshow("detected lines", cdst);
|
imshow("detected lines", cdst);
|
||||||
|
|
||||||
waitKey();
|
waitKey();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user