Doxygen tutorials: cpp done
This commit is contained in:
@@ -4,10 +4,10 @@ AKAZE local features matching {#tutorial_akaze_matching}
|
||||
Introduction
|
||||
------------
|
||||
|
||||
In this tutorial we will learn how to use [AKAZE]_ local features to detect and match keypoints on
|
||||
In this tutorial we will learn how to use AKAZE @cite ANB13 local features to detect and match keypoints on
|
||||
two images.
|
||||
|
||||
We will find keypoints on a pair of images with given homography matrix, match them and count the
|
||||
|
||||
number of inliers (i. e. matches that fit in the given homography).
|
||||
|
||||
You can find expanded version of this example here:
|
||||
@@ -18,7 +18,7 @@ Data
|
||||
|
||||
We are going to use images 1 and 3 from *Graffity* sequence of Oxford dataset.
|
||||
|
||||

|
||||

|
||||
|
||||
Homography is given by a 3 by 3 matrix:
|
||||
@code{.none}
|
||||
@@ -35,92 +35,92 @@ You can find the images (*graf1.png*, *graf3.png*) and homography (*H1to3p.xml*)
|
||||
|
||||
### Explanation
|
||||
|
||||
1. **Load images and homography**
|
||||
@code{.cpp}
|
||||
Mat img1 = imread("graf1.png", IMREAD_GRAYSCALE);
|
||||
Mat img2 = imread("graf3.png", IMREAD_GRAYSCALE);
|
||||
-# **Load images and homography**
|
||||
@code{.cpp}
|
||||
Mat img1 = imread("graf1.png", IMREAD_GRAYSCALE);
|
||||
Mat img2 = imread("graf3.png", IMREAD_GRAYSCALE);
|
||||
|
||||
Mat homography;
|
||||
FileStorage fs("H1to3p.xml", FileStorage::READ);
|
||||
fs.getFirstTopLevelNode() >> homography;
|
||||
@endcode
|
||||
We are loading grayscale images here. Homography is stored in the xml created with FileStorage.
|
||||
Mat homography;
|
||||
FileStorage fs("H1to3p.xml", FileStorage::READ);
|
||||
fs.getFirstTopLevelNode() >> homography;
|
||||
@endcode
|
||||
We are loading grayscale images here. Homography is stored in the xml created with FileStorage.
|
||||
|
||||
1. **Detect keypoints and compute descriptors using AKAZE**
|
||||
@code{.cpp}
|
||||
vector<KeyPoint> kpts1, kpts2;
|
||||
Mat desc1, desc2;
|
||||
-# **Detect keypoints and compute descriptors using AKAZE**
|
||||
@code{.cpp}
|
||||
vector<KeyPoint> kpts1, kpts2;
|
||||
Mat desc1, desc2;
|
||||
|
||||
AKAZE akaze;
|
||||
akaze(img1, noArray(), kpts1, desc1);
|
||||
akaze(img2, noArray(), kpts2, desc2);
|
||||
@endcode
|
||||
We create AKAZE object and use it's *operator()* functionality. Since we don't need the *mask*
|
||||
parameter, *noArray()* is used.
|
||||
AKAZE akaze;
|
||||
akaze(img1, noArray(), kpts1, desc1);
|
||||
akaze(img2, noArray(), kpts2, desc2);
|
||||
@endcode
|
||||
We create AKAZE object and use it's *operator()* functionality. Since we don't need the *mask*
|
||||
parameter, *noArray()* is used.
|
||||
|
||||
1. **Use brute-force matcher to find 2-nn matches**
|
||||
@code{.cpp}
|
||||
BFMatcher matcher(NORM_HAMMING);
|
||||
vector< vector<DMatch> > nn_matches;
|
||||
matcher.knnMatch(desc1, desc2, nn_matches, 2);
|
||||
@endcode
|
||||
We use Hamming distance, because AKAZE uses binary descriptor by default.
|
||||
-# **Use brute-force matcher to find 2-nn matches**
|
||||
@code{.cpp}
|
||||
BFMatcher matcher(NORM_HAMMING);
|
||||
vector< vector<DMatch> > nn_matches;
|
||||
matcher.knnMatch(desc1, desc2, nn_matches, 2);
|
||||
@endcode
|
||||
We use Hamming distance, because AKAZE uses binary descriptor by default.
|
||||
|
||||
1. **Use 2-nn matches to find correct keypoint matches**
|
||||
@code{.cpp}
|
||||
for(size_t i = 0; i < nn_matches.size(); i++) {
|
||||
DMatch first = nn_matches[i][0];
|
||||
float dist1 = nn_matches[i][0].distance;
|
||||
float dist2 = nn_matches[i][1].distance;
|
||||
-# **Use 2-nn matches to find correct keypoint matches**
|
||||
@code{.cpp}
|
||||
for(size_t i = 0; i < nn_matches.size(); i++) {
|
||||
DMatch first = nn_matches[i][0];
|
||||
float dist1 = nn_matches[i][0].distance;
|
||||
float dist2 = nn_matches[i][1].distance;
|
||||
|
||||
if(dist1 < nn_match_ratio * dist2) {
|
||||
matched1.push_back(kpts1[first.queryIdx]);
|
||||
matched2.push_back(kpts2[first.trainIdx]);
|
||||
if(dist1 < nn_match_ratio * dist2) {
|
||||
matched1.push_back(kpts1[first.queryIdx]);
|
||||
matched2.push_back(kpts2[first.trainIdx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@endcode
|
||||
If the closest match is *ratio* closer than the second closest one, then the match is correct.
|
||||
@endcode
|
||||
If the closest match is *ratio* closer than the second closest one, then the match is correct.
|
||||
|
||||
1. **Check if our matches fit in the homography model**
|
||||
@code{.cpp}
|
||||
for(int i = 0; i < matched1.size(); i++) {
|
||||
Mat col = Mat::ones(3, 1, CV_64F);
|
||||
col.at<double>(0) = matched1[i].pt.x;
|
||||
col.at<double>(1) = matched1[i].pt.y;
|
||||
-# **Check if our matches fit in the homography model**
|
||||
@code{.cpp}
|
||||
for(int i = 0; i < matched1.size(); i++) {
|
||||
Mat col = Mat::ones(3, 1, CV_64F);
|
||||
col.at<double>(0) = matched1[i].pt.x;
|
||||
col.at<double>(1) = matched1[i].pt.y;
|
||||
|
||||
col = homography * col;
|
||||
col /= col.at<double>(2);
|
||||
float dist = sqrt( pow(col.at<double>(0) - matched2[i].pt.x, 2) +
|
||||
pow(col.at<double>(1) - matched2[i].pt.y, 2));
|
||||
col = homography * col;
|
||||
col /= col.at<double>(2);
|
||||
float dist = sqrt( pow(col.at<double>(0) - matched2[i].pt.x, 2) +
|
||||
pow(col.at<double>(1) - matched2[i].pt.y, 2));
|
||||
|
||||
if(dist < inlier_threshold) {
|
||||
int new_i = inliers1.size();
|
||||
inliers1.push_back(matched1[i]);
|
||||
inliers2.push_back(matched2[i]);
|
||||
good_matches.push_back(DMatch(new_i, new_i, 0));
|
||||
if(dist < inlier_threshold) {
|
||||
int new_i = inliers1.size();
|
||||
inliers1.push_back(matched1[i]);
|
||||
inliers2.push_back(matched2[i]);
|
||||
good_matches.push_back(DMatch(new_i, new_i, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
@endcode
|
||||
If the distance from first keypoint's projection to the second keypoint is less than threshold,
|
||||
then it it fits in the homography.
|
||||
@endcode
|
||||
If the distance from first keypoint's projection to the second keypoint is less than threshold,
|
||||
then it it fits in the homography.
|
||||
|
||||
We create a new set of matches for the inliers, because it is required by the drawing function.
|
||||
We create a new set of matches for the inliers, because it is required by the drawing function.
|
||||
|
||||
1. **Output results**
|
||||
@code{.cpp}
|
||||
Mat res;
|
||||
drawMatches(img1, inliers1, img2, inliers2, good_matches, res);
|
||||
imwrite("res.png", res);
|
||||
...
|
||||
@endcode
|
||||
Here we save the resulting image and print some statistics.
|
||||
-# **Output results**
|
||||
@code{.cpp}
|
||||
Mat res;
|
||||
drawMatches(img1, inliers1, img2, inliers2, good_matches, res);
|
||||
imwrite("res.png", res);
|
||||
...
|
||||
@endcode
|
||||
Here we save the resulting image and print some statistics.
|
||||
|
||||
### Results
|
||||
|
||||
Found matches
|
||||
-------------
|
||||
|
||||

|
||||

|
||||
|
||||
A-KAZE Matching Results
|
||||
-----------------------
|
||||
|
@@ -152,8 +152,9 @@ A-KAZE Matching Results
|
||||
--------------------------
|
||||
|
||||
.. code-block:: none
|
||||
Keypoints 1: 2943
|
||||
Keypoints 2: 3511
|
||||
Matches: 447
|
||||
Inliers: 308
|
||||
Inlier Ratio: 0.689038
|
||||
|
||||
Keypoints 1 2943
|
||||
Keypoints 2 3511
|
||||
Matches 447
|
||||
Inliers 308
|
||||
Inlier Ratio 0.689038
|
||||
|
@@ -11,16 +11,17 @@ The algorithm is as follows:
|
||||
|
||||
- Detect and describe keypoints on the first frame, manually set object boundaries
|
||||
- For every next frame:
|
||||
1. Detect and describe keypoints
|
||||
2. Match them using bruteforce matcher
|
||||
3. Estimate homography transformation using RANSAC
|
||||
4. Filter inliers from all the matches
|
||||
5. Apply homography transformation to the bounding box to find the object
|
||||
6. Draw bounding box and inliers, compute inlier ratio as evaluation metric
|
||||
-# Detect and describe keypoints
|
||||
-# Match them using bruteforce matcher
|
||||
-# Estimate homography transformation using RANSAC
|
||||
-# Filter inliers from all the matches
|
||||
-# Apply homography transformation to the bounding box to find the object
|
||||
-# Draw bounding box and inliers, compute inlier ratio as evaluation metric
|
||||
|
||||

|
||||

|
||||
|
||||
### Data
|
||||
Data
|
||||
----
|
||||
|
||||
To do the tracking we need a video and object position on the first frame.
|
||||
|
||||
@@ -31,14 +32,16 @@ To run the code you have to specify input and output video path and object bound
|
||||
@code{.none}
|
||||
./planar_tracking blais.mp4 result.avi blais_bb.xml.gz
|
||||
@endcode
|
||||
### Source Code
|
||||
|
||||
Source Code
|
||||
-----------
|
||||
|
||||
@includelineno cpp/tutorial_code/features2D/AKAZE_tracking/planar_tracking.cpp
|
||||
|
||||
### Explanation
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
Tracker class
|
||||
-------------
|
||||
### Tracker class
|
||||
|
||||
This class implements algorithm described abobve using given feature detector and descriptor
|
||||
matcher.
|
||||
@@ -63,62 +66,60 @@ matcher.
|
||||
|
||||
- **Processing frames**
|
||||
|
||||
1. Locate keypoints and compute descriptors
|
||||
@code{.cpp}
|
||||
(*detector)(frame, noArray(), kp, desc);
|
||||
@endcode
|
||||
|
||||
To find matches between frames we have to locate the keypoints first.
|
||||
|
||||
In this tutorial detectors are set up to find about 1000 keypoints on each frame.
|
||||
-# Locate keypoints and compute descriptors
|
||||
@code{.cpp}
|
||||
(*detector)(frame, noArray(), kp, desc);
|
||||
@endcode
|
||||
|
||||
1. Use 2-nn matcher to find correspondences
|
||||
@code{.cpp}
|
||||
matcher->knnMatch(first_desc, desc, matches, 2);
|
||||
for(unsigned i = 0; i < matches.size(); i++) {
|
||||
if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
|
||||
matched1.push_back(first_kp[matches[i][0].queryIdx]);
|
||||
matched2.push_back( kp[matches[i][0].trainIdx]);
|
||||
To find matches between frames we have to locate the keypoints first.
|
||||
|
||||
In this tutorial detectors are set up to find about 1000 keypoints on each frame.
|
||||
|
||||
-# Use 2-nn matcher to find correspondences
|
||||
@code{.cpp}
|
||||
matcher->knnMatch(first_desc, desc, matches, 2);
|
||||
for(unsigned i = 0; i < matches.size(); i++) {
|
||||
if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
|
||||
matched1.push_back(first_kp[matches[i][0].queryIdx]);
|
||||
matched2.push_back( kp[matches[i][0].trainIdx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@endcode
|
||||
|
||||
If the closest match is *nn_match_ratio* closer than the second closest one, then it's a
|
||||
match.
|
||||
@endcode
|
||||
If the closest match is *nn_match_ratio* closer than the second closest one, then it's a
|
||||
match.
|
||||
|
||||
2. Use *RANSAC* to estimate homography transformation
|
||||
@code{.cpp}
|
||||
homography = findHomography(Points(matched1), Points(matched2),
|
||||
RANSAC, ransac_thresh, inlier_mask);
|
||||
@endcode
|
||||
|
||||
If there are at least 4 matches we can use random sample consensus to estimate image
|
||||
transformation.
|
||||
-# Use *RANSAC* to estimate homography transformation
|
||||
@code{.cpp}
|
||||
homography = findHomography(Points(matched1), Points(matched2),
|
||||
RANSAC, ransac_thresh, inlier_mask);
|
||||
@endcode
|
||||
If there are at least 4 matches we can use random sample consensus to estimate image
|
||||
transformation.
|
||||
|
||||
3. Save the inliers
|
||||
@code{.cpp}
|
||||
for(unsigned i = 0; i < matched1.size(); i++) {
|
||||
if(inlier_mask.at<uchar>(i)) {
|
||||
int new_i = static_cast<int>(inliers1.size());
|
||||
inliers1.push_back(matched1[i]);
|
||||
inliers2.push_back(matched2[i]);
|
||||
inlier_matches.push_back(DMatch(new_i, new_i, 0));
|
||||
-# Save the inliers
|
||||
@code{.cpp}
|
||||
for(unsigned i = 0; i < matched1.size(); i++) {
|
||||
if(inlier_mask.at<uchar>(i)) {
|
||||
int new_i = static_cast<int>(inliers1.size());
|
||||
inliers1.push_back(matched1[i]);
|
||||
inliers2.push_back(matched2[i]);
|
||||
inlier_matches.push_back(DMatch(new_i, new_i, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
@endcode
|
||||
|
||||
Since *findHomography* computes the inliers we only have to save the chosen points and
|
||||
matches.
|
||||
@endcode
|
||||
Since *findHomography* computes the inliers we only have to save the chosen points and
|
||||
matches.
|
||||
|
||||
4. Project object bounding box
|
||||
@code{.cpp}
|
||||
perspectiveTransform(object_bb, new_bb, homography);
|
||||
@endcode
|
||||
|
||||
If there is a reasonable number of inliers we can use estimated transformation to locate the
|
||||
object.
|
||||
-# Project object bounding box
|
||||
@code{.cpp}
|
||||
perspectiveTransform(object_bb, new_bb, homography);
|
||||
@endcode
|
||||
|
||||
### Results
|
||||
If there is a reasonable number of inliers we can use estimated transformation to locate the
|
||||
object.
|
||||
|
||||
Results
|
||||
-------
|
||||
|
||||
You can watch the resulting [video on youtube](http://www.youtube.com/watch?v=LWY-w8AGGhE).
|
||||
|
||||
@@ -129,6 +130,7 @@ Inliers 410
|
||||
Inlier ratio 0.58
|
||||
Keypoints 1117
|
||||
@endcode
|
||||
|
||||
*ORB* statistics:
|
||||
@code{.none}
|
||||
Matches 504
|
||||
|
@@ -87,4 +87,4 @@ Result
|
||||
|
||||
Here is the result after applying the BruteForce matcher between the two original images:
|
||||
|
||||

|
||||

|
||||
|
@@ -79,10 +79,10 @@ Explanation
|
||||
Result
|
||||
------
|
||||
|
||||
1. Here is the result of the feature detection applied to the first image:
|
||||
-# Here is the result of the feature detection applied to the first image:
|
||||
|
||||

|
||||

|
||||
|
||||
2. And here is the result for the second image:
|
||||
-# And here is the result for the second image:
|
||||
|
||||

|
||||

|
||||
|
@@ -130,10 +130,10 @@ Explanation
|
||||
Result
|
||||
------
|
||||
|
||||
1. Here is the result of the feature detection applied to the first image:
|
||||
-# Here is the result of the feature detection applied to the first image:
|
||||
|
||||

|
||||

|
||||
|
||||
2. Additionally, we get as console output the keypoints filtered:
|
||||
-# Additionally, we get as console output the keypoints filtered:
|
||||
|
||||

|
||||

|
||||
|
@@ -134,8 +134,8 @@ Explanation
|
||||
Result
|
||||
------
|
||||
|
||||
1. And here is the result for the detected object (highlighted in green)
|
||||
-# And here is the result for the detected object (highlighted in green)
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
|
@@ -122,9 +122,9 @@ Explanation
|
||||
Result
|
||||
------
|
||||
|
||||

|
||||

|
||||
|
||||
Here is the result:
|
||||
|
||||

|
||||

|
||||
|
||||
|
@@ -30,7 +30,7 @@ Explanation
|
||||
Result
|
||||
------
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
|
@@ -111,5 +111,5 @@ Explanation
|
||||
Result
|
||||
------
|
||||
|
||||

|
||||

|
||||
|
||||
|
@@ -201,9 +201,9 @@ Result
|
||||
|
||||
The original image:
|
||||
|
||||

|
||||

|
||||
|
||||
The detected corners are surrounded by a small black circle
|
||||
|
||||

|
||||

|
||||
|
||||
|
Reference in New Issue
Block a user