From 781931a67152767224789b52f447bc598ba6530b Mon Sep 17 00:00:00 2001 From: berak Date: Tue, 20 Oct 2015 11:57:29 +0200 Subject: [PATCH] update python features2d tutorials --- .../py_feature2d/py_brief/py_brief.markdown | 8 +++++--- .../py_feature2d/py_fast/py_fast.markdown | 2 +- .../py_feature_homography.markdown | 4 ++-- doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown | 4 ++-- .../py_sift_intro/py_sift_intro.markdown | 6 +++--- .../py_surf_intro/py_surf_intro.markdown | 12 ++++++------ 6 files changed, 19 insertions(+), 17 deletions(-) diff --git a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown index f1fc1e0ec..e2c0ed428 100644 --- a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown +++ b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown @@ -48,6 +48,8 @@ BRIEF in OpenCV Below code shows the computation of BRIEF descriptors with the help of CenSurE detector. (CenSurE detector is called STAR detector in OpenCV) + +note, that you need [opencv contrib](https://github.com/Itseez/opencv_contrib)) to use this. @code{.py} import numpy as np import cv2 @@ -55,11 +57,11 @@ from matplotlib import pyplot as plt img = cv2.imread('simple.jpg',0) -# Initiate STAR detector -star = cv2.FeatureDetector_create("STAR") +# Initiate FAST detector +star = cv2.xfeatures2d.StarDetector_create() # Initiate BRIEF extractor -brief = cv2.DescriptorExtractor_create("BRIEF") +brief = cv2.BriefDescriptorExtractor_create() # find the keypoints with STAR kp = star.detect(img,None) diff --git a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown index aa45c325d..1534c57ac 100644 --- a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown +++ b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown @@ -101,7 +101,7 @@ from matplotlib import pyplot as plt img = cv2.imread('simple.jpg',0) # Initiate FAST object with default values -fast = cv2.FastFeatureDetector() +fast = cv2.FastFeatureDetector_create() # find and draw the keypoints kp = fast.detect(img,None) diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index 4f5a3234d..4f5efa4a8 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -44,7 +44,7 @@ img1 = cv2.imread('box.png',0) # queryImage img2 = cv2.imread('box_in_scene.png',0) # trainImage # Initiate SIFT detector -sift = cv2.SIFT() +sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) @@ -78,7 +78,7 @@ if len(good)>MIN_MATCH_COUNT: M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() - h,w = img1.shape + h,w,d = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) diff --git a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown index 8130f39ab..c1917eb1a 100644 --- a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown +++ b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown @@ -69,8 +69,8 @@ from matplotlib import pyplot as plt img = cv2.imread('simple.jpg',0) -# Initiate STAR detector -orb = cv2.ORB() +# Initiate ORB detector +orb = cv2.ORB_create() # find the keypoints with ORB kp = orb.detect(img,None) diff --git a/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown b/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown index 05226d58e..b12505adc 100644 --- a/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown @@ -104,7 +104,7 @@ greater than 0.8, they are rejected. It eliminaters around 90% of false matches So this is a summary of SIFT algorithm. For more details and understanding, reading the original paper is highly recommended. Remember one thing, this algorithm is patented. So this algorithm is -included in Non-free module in OpenCV. +included in [the opencv contrib repo](https://github.com/Itseez/opencv_contrib) SIFT in OpenCV -------------- @@ -119,7 +119,7 @@ import numpy as np img = cv2.imread('home.jpg') gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) -sift = cv2.SIFT() +sift = cv2.xfeatures2d.SIFT_create() kp = sift.detect(gray,None) img=cv2.drawKeypoints(gray,kp) @@ -151,7 +151,7 @@ Now to calculate the descriptor, OpenCV provides two methods. We will see the second method: @code{.py} -sift = cv2.SIFT() +sift = cv2.xfeatures2d.SIFT_create() kp, des = sift.detectAndCompute(gray,None) @endcode Here kp will be a list of keypoints and des is a numpy array of shape diff --git a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown index 21998fede..7d5bd93fe 100644 --- a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown @@ -80,7 +80,7 @@ examples are shown in Python terminal since it is just same as SIFT only. # Create SURF object. You can specify params here or later. # Here I set Hessian Threshold to 400 ->>> surf = cv2.SURF(400) +>>> surf = cv2.xfeatures2d.SURF_create(400) # Find keypoints and descriptors directly >>> kp, des = surf.detectAndCompute(img,None) @@ -92,12 +92,12 @@ examples are shown in Python terminal since it is just same as SIFT only. While matching, we may need all those features, but not now. So we increase the Hessian Threshold. @code{.py} # Check present Hessian threshold ->>> print surf.hessianThreshold +>>> print surf.getHessianThreshold() 400.0 # We set it to some 50000. Remember, it is just for representing in picture. # In actual cases, it is better to have a value 300-500 ->>> surf.hessianThreshold = 50000 +>>> surf.setHessianThreshold(50000) # Again compute keypoints and check its number. >>> kp, des = surf.detectAndCompute(img,None) @@ -119,10 +119,10 @@ on wings of butterfly. You can test it with other images. Now I want to apply U-SURF, so that it won't find the orientation. @code{.py} # Check upright flag, if it False, set it to True ->>> print surf.upright +>>> print surf.getUpright() False ->>> surf.upright = True +>>> surf.setUpright(True) # Recompute the feature points and draw it >>> kp = surf.detect(img,None) @@ -143,7 +143,7 @@ Finally we check the descriptor size and change it to 128 if it is only 64-dim. 64 # That means flag, "extended" is False. ->>> surf.extended +>>> surf.getExtended() False # So we make it to True to get 128-dim descriptors.