Merge pull request #2278 from KonstantinMatskevich:ocl_toinputarray

This commit is contained in:
Andrey Pavlenko 2014-02-06 12:55:04 +04:00 committed by OpenCV Buildbot
commit b6675ee2a8
23 changed files with 217 additions and 170 deletions

View File

@ -34,7 +34,7 @@ circle
----------
Draws a circle.
.. ocv:function:: void circle( Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:function:: void circle( InputOutputArray img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:pyfunction:: cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> img
@ -83,9 +83,9 @@ ellipse
-----------
Draws a simple or thick elliptic arc or fills an ellipse sector.
.. ocv:function:: void ellipse( Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:function:: void ellipse( InputOutputArray img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:function:: void ellipse( Mat& img, const RotatedRect& box, const Scalar& color, int thickness=1, int lineType=LINE_8 )
.. ocv:function:: void ellipse( InputOutputArray img, const RotatedRect& box, const Scalar& color, int thickness=1, int lineType=LINE_8 )
.. ocv:pyfunction:: cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> img
@ -331,7 +331,7 @@ line
--------
Draws a line segment connecting two points.
.. ocv:function:: void line( Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:function:: void line( InputOutputArray img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:pyfunction:: cv2.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img
@ -417,7 +417,7 @@ rectangle
-------------
Draws a simple, thick, or filled up-right rectangle.
.. ocv:function:: void rectangle( Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:function:: void rectangle( InputOutputArray img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
.. ocv:function:: void rectangle( Mat& img, Rect rec, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 )
@ -570,7 +570,7 @@ putText
-----------
Draws a text string.
.. ocv:function:: void putText( Mat& img, const String& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=LINE_8, bool bottomLeftOrigin=false )
.. ocv:function:: void putText( InputOutputArray img, const String& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=LINE_8, bool bottomLeftOrigin=false )
.. ocv:pyfunction:: cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> None

View File

@ -507,11 +507,11 @@ CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev
CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);
//! draws the line segment (pt1, pt2) in the image
CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
CV_EXPORTS_W void line(InputOutputArray img, Point pt1, Point pt2, const Scalar& color,
int thickness = 1, int lineType = LINE_8, int shift = 0);
//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2,
CV_EXPORTS_W void rectangle(InputOutputArray img, Point pt1, Point pt2,
const Scalar& color, int thickness = 1,
int lineType = LINE_8, int shift = 0);
@ -521,18 +521,18 @@ CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
int lineType = LINE_8, int shift = 0);
//! draws the circle outline or a solid circle in the image
CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius,
CV_EXPORTS_W void circle(InputOutputArray img, Point center, int radius,
const Scalar& color, int thickness = 1,
int lineType = LINE_8, int shift = 0);
//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image
CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes,
CV_EXPORTS_W void ellipse(InputOutputArray img, Point center, Size axes,
double angle, double startAngle, double endAngle,
const Scalar& color, int thickness = 1,
int lineType = LINE_8, int shift = 0);
//! draws a rotated ellipse in the image
CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color,
CV_EXPORTS_W void ellipse(InputOutputArray img, const RotatedRect& box, const Scalar& color,
int thickness = 1, int lineType = LINE_8);
//! draws a filled convex polygon in the image
@ -582,7 +582,7 @@ CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle,
CV_OUT std::vector<Point>& pts );
//! renders text string in the image
CV_EXPORTS_W void putText( Mat& img, const String& text, Point org,
CV_EXPORTS_W void putText( InputOutputArray img, const String& text, Point org,
int fontFace, double fontScale, Scalar color,
int thickness = 1, int lineType = LINE_8,
bool bottomLeftOrigin = false );

View File

@ -1568,9 +1568,11 @@ PolyLine( Mat& img, const Point* v, int count, bool is_closed,
* External functions *
\****************************************************************************************/
void line( Mat& img, Point pt1, Point pt2, const Scalar& color,
void line( InputOutputArray _img, Point pt1, Point pt2, const Scalar& color,
int thickness, int line_type, int shift )
{
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
line_type = 8;
@ -1582,10 +1584,12 @@ void line( Mat& img, Point pt1, Point pt2, const Scalar& color,
ThickLine( img, pt1, pt2, buf, thickness, line_type, 3, shift );
}
void rectangle( Mat& img, Point pt1, Point pt2,
void rectangle( InputOutputArray _img, Point pt1, Point pt2,
const Scalar& color, int thickness,
int lineType, int shift )
{
Mat img = _img.getMat();
if( lineType == CV_AA && img.depth() != CV_8U )
lineType = 8;
@ -1622,9 +1626,11 @@ void rectangle( Mat& img, Rect rec,
}
void circle( Mat& img, Point center, int radius,
void circle( InputOutputArray _img, Point center, int radius,
const Scalar& color, int thickness, int line_type, int shift )
{
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
line_type = 8;
@ -1647,10 +1653,12 @@ void circle( Mat& img, Point center, int radius,
}
void ellipse( Mat& img, Point center, Size axes,
void ellipse( InputOutputArray _img, Point center, Size axes,
double angle, double start_angle, double end_angle,
const Scalar& color, int thickness, int line_type, int shift )
{
Mat img = _img.getMat();
if( line_type == CV_AA && img.depth() != CV_8U )
line_type = 8;
@ -1672,9 +1680,11 @@ void ellipse( Mat& img, Point center, Size axes,
_end_angle, buf, thickness, line_type );
}
void ellipse(Mat& img, const RotatedRect& box, const Scalar& color,
void ellipse(InputOutputArray _img, const RotatedRect& box, const Scalar& color,
int thickness, int lineType)
{
Mat img = _img.getMat();
if( lineType == CV_AA && img.depth() != CV_8U )
lineType = 8;
@ -1918,11 +1928,12 @@ static const int* getFontData(int fontFace)
}
void putText( Mat& img, const String& text, Point org,
void putText( InputOutputArray _img, const String& text, Point org,
int fontFace, double fontScale, Scalar color,
int thickness, int line_type, bool bottomLeftOrigin )
{
Mat img = _img.getMat();
const int* ascii = getFontData(fontFace);
double buf[4];

View File

@ -25,10 +25,10 @@ Abstract base class for computing descriptors for image keypoints. ::
public:
virtual ~DescriptorExtractor();
void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& descriptors ) const;
void compute( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints,
vector<Mat>& descriptors ) const;
void compute( InputArray image, vector<KeyPoint>& keypoints,
OutputArray descriptors ) const;
void compute( InputArrayOfArrays images, vector<vector<KeyPoint> >& keypoints,
OutputArrayOfArrays descriptors ) const;
virtual void read( const FileNode& );
virtual void write( FileStorage& ) const;
@ -57,9 +57,9 @@ DescriptorExtractor::compute
--------------------------------
Computes the descriptors for a set of keypoints detected in an image (first variant) or image set (second variant).
.. ocv:function:: void DescriptorExtractor::compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const
.. ocv:function:: void DescriptorExtractor::compute( InputArray image, vector<KeyPoint>& keypoints, OutputArray descriptors ) const
.. ocv:function:: void DescriptorExtractor::compute( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, vector<Mat>& descriptors ) const
.. ocv:function:: void DescriptorExtractor::compute( InputArrayOfArrays images, vector<vector<KeyPoint> >& keypoints, OutputArrayOfArrays descriptors ) const
.. ocv:pyfunction:: cv2.DescriptorExtractor_create.compute(image, keypoints[, descriptors]) -> keypoints, descriptors

View File

@ -29,7 +29,7 @@ Abstract interface for extracting and matching a keypoint descriptor. There are
GenericDescriptorMatcher();
virtual ~GenericDescriptorMatcher();
virtual void add( const vector<Mat>& images,
virtual void add( InputArrayOfArrays images,
vector<vector<KeyPoint> >& keypoints );
const vector<Mat>& getTrainImages() const;
@ -40,36 +40,36 @@ Abstract interface for extracting and matching a keypoint descriptor. There are
virtual bool isMaskSupported() = 0;
void classify( const Mat& queryImage,
void classify( InputArray queryImage,
vector<KeyPoint>& queryKeypoints,
const Mat& trainImage,
InputArray trainImage,
vector<KeyPoint>& trainKeypoints ) const;
void classify( const Mat& queryImage,
void classify( InputArray queryImage,
vector<KeyPoint>& queryKeypoints );
/*
* Group of methods to match keypoints from an image pair.
*/
void match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
void match( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
InputArray trainImage, vector<KeyPoint>& trainKeypoints,
vector<DMatch>& matches, const Mat& mask=Mat() ) const;
void knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
void knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
InputArray trainImage, vector<KeyPoint>& trainKeypoints,
vector<vector<DMatch> >& matches, int k,
const Mat& mask=Mat(), bool compactResult=false ) const;
void radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
void radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
InputArray trainImage, vector<KeyPoint>& trainKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const;
/*
* Group of methods to match keypoints from one image to an image set.
*/
void match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
void match( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() );
void knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
void knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, int k,
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
void radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
void radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
@ -89,7 +89,7 @@ GenericDescriptorMatcher::add
---------------------------------
Adds images and their keypoints to the training collection stored in the class instance.
.. ocv:function:: void GenericDescriptorMatcher::add( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints )
.. ocv:function:: void GenericDescriptorMatcher::add( InputArrayOfArrays images, vector<vector<KeyPoint> >& keypoints )
:param images: Image collection.
@ -142,9 +142,9 @@ GenericDescriptorMatcher::classify
--------------------------------------
Classifies keypoints from a query set.
.. ocv:function:: void GenericDescriptorMatcher::classify( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, const Mat& trainImage, vector<KeyPoint>& trainKeypoints ) const
.. ocv:function:: void GenericDescriptorMatcher::classify( InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints ) const
.. ocv:function:: void GenericDescriptorMatcher::classify( const Mat& queryImage, vector<KeyPoint>& queryKeypoints )
.. ocv:function:: void GenericDescriptorMatcher::classify( InputArray queryImage, vector<KeyPoint>& queryKeypoints )
:param queryImage: Query image.
@ -170,9 +170,9 @@ GenericDescriptorMatcher::match
-----------------------------------
Finds the best match in the training set for each keypoint from the query set.
.. ocv:function:: void GenericDescriptorMatcher::match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, const Mat& trainImage, vector<KeyPoint>& trainKeypoints, vector<DMatch>& matches, const Mat& mask=Mat() ) const
.. ocv:function:: void GenericDescriptorMatcher::match(InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints, vector<DMatch>& matches, const Mat& mask=Mat() ) const
.. ocv:function:: void GenericDescriptorMatcher::match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() )
.. ocv:function:: void GenericDescriptorMatcher::match( InputArray queryImage, vector<KeyPoint>& queryKeypoints, vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() )
:param queryImage: Query image.
@ -196,9 +196,9 @@ GenericDescriptorMatcher::knnMatch
--------------------------------------
Finds the ``k`` best matches for each query keypoint.
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, const Mat& trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, int k, const Mat& mask=Mat(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, int k, const Mat& mask=Mat(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, int k, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
.. ocv:function:: void GenericDescriptorMatcher::knnMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, int k, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
The methods are extended variants of ``GenericDescriptorMatch::match``. The parameters are similar, and the semantics is similar to ``DescriptorMatcher::knnMatch``. But this class does not require explicitly computed keypoint descriptors.
@ -208,9 +208,9 @@ GenericDescriptorMatcher::radiusMatch
-----------------------------------------
For each query keypoint, finds the training keypoints not farther than the specified distance.
.. ocv:function:: void GenericDescriptorMatcher::radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, const Mat& trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, float maxDistance, const Mat& mask=Mat(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, InputArray trainImage, vector<KeyPoint>& trainKeypoints, vector<vector<DMatch> >& matches, float maxDistance, const Mat& mask=Mat(), bool compactResult=false ) const
.. ocv:function:: void GenericDescriptorMatcher::radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, float maxDistance, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
.. ocv:function:: void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, vector<KeyPoint>& queryKeypoints, vector<vector<DMatch> >& matches, float maxDistance, const vector<Mat>& masks=vector<Mat>(), bool compactResult=false )
The methods are similar to ``DescriptorMatcher::radius``. But this class does not require explicitly computed keypoint descriptors.
@ -254,7 +254,7 @@ Class used for matching descriptors that can be described as vectors in a finite
VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
virtual ~VectorDescriptorMatcher();
virtual void add( const vector<Mat>& imgCollection,
virtual void add( InputArrayOfArrays imgCollection,
vector<vector<KeyPoint> >& pointCollection );
virtual void clear();
virtual void train();

View File

@ -7,9 +7,9 @@ drawMatches
---------------
Draws the found matches of keypoints from two images.
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( InputArray img1, const vector<KeyPoint>& keypoints1, InputArray img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, InputOutputArray outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char> >& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawMatches( InputArray img1, const vector<KeyPoint>& keypoints1, InputArray img2, const vector<KeyPoint>& keypoints2, const vector<vector<DMatch> >& matches1to2, InputOutputArray outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<vector<char> >& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:pyfunction:: cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches1to2[, outImg[, matchColor[, singlePointColor[, matchesMask[, flags]]]]]) -> outImg
@ -69,7 +69,7 @@ drawKeypoints
-----------------
Draws keypoints.
.. ocv:function:: void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:function:: void drawKeypoints( InputArray image, const vector<KeyPoint>& keypoints, InputOutputArray outImage, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
.. ocv:pyfunction:: cv2.drawKeypoints(image, keypoints[, outImage[, color[, flags]]]) -> outImage

View File

@ -129,11 +129,11 @@ The class declaration is the following: ::
void setVocabulary( const Mat& vocabulary );
const Mat& getVocabulary() const;
void compute( const Mat& image, vector<KeyPoint>& keypoints,
Mat& imgDescriptor,
void compute( InputArray image, vector<KeyPoint>& keypoints,
OutputArray imgDescriptor,
vector<vector<int> >* pointIdxsOfClusters=0,
Mat* descriptors=0 );
void compute( const Mat& descriptors, Mat& imgDescriptor,
void compute( InputArray descriptors, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters=0 );
int descriptorSize() const;
int descriptorType() const;
@ -180,8 +180,8 @@ BOWImgDescriptorExtractor::compute
--------------------------------------
Computes an image descriptor using the set visual vocabulary.
.. ocv:function:: void BOWImgDescriptorExtractor::compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& imgDescriptor, vector<vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 )
.. ocv:function:: void BOWImgDescriptorExtractor::compute( const Mat& keypointDescriptors, Mat& imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters=0 )
.. ocv:function:: void BOWImgDescriptorExtractor::compute( InputArray image, vector<KeyPoint>& keypoints, OutputArray imgDescriptor, vector<vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 )
.. ocv:function:: void BOWImgDescriptorExtractor::compute( InputArray keypointDescriptors, OutputArray imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters=0 )
:param image: Image, for which the descriptor is computed.

View File

@ -156,7 +156,7 @@ public:
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const;
CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
/*
* Compute the descriptors for a keypoints collection detected in image collection.
@ -165,7 +165,7 @@ public:
* Keypoints for which a descriptor cannot be computed are removed.
* descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i].
*/
void compute( const std::vector<Mat>& images, std::vector<std::vector<KeyPoint> >& keypoints, std::vector<Mat>& descriptors ) const;
void compute( InputArrayOfArrays images, std::vector<std::vector<KeyPoint> >& keypoints, OutputArrayOfArrays descriptors ) const;
CV_WRAP virtual int descriptorSize() const = 0;
CV_WRAP virtual int descriptorType() const = 0;
@ -176,7 +176,7 @@ public:
CV_WRAP static Ptr<DescriptorExtractor> create( const String& descriptorExtractorType );
protected:
virtual void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const = 0;
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const = 0;
/*
* Remove keypoints within borderPixels of an image edge.
@ -207,7 +207,7 @@ public:
OutputArray descriptors,
bool useProvidedKeypoints=false ) const = 0;
CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const;
CV_WRAP void compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
// Create feature detector and descriptor extractor by name.
CV_WRAP static Ptr<Feature2D> create( const String& name );
@ -252,7 +252,7 @@ public:
protected:
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
void computeKeypointsNoOrientation(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
@ -337,7 +337,7 @@ public:
protected:
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
CV_PROP_RW int nfeatures;
@ -403,9 +403,9 @@ public:
};
protected:
virtual void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
void buildPattern();
uchar meanIntensity( const Mat& image, const Mat& integral, const float kp_x, const float kp_y,
uchar meanIntensity( InputArray image, InputArray integral, const float kp_x, const float kp_y,
const unsigned int scale, const unsigned int rot, const unsigned int point ) const;
bool orientationNormalized; //true if the orientation is normalized, false otherwise
@ -609,7 +609,7 @@ protected:
};
virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
virtual void findBlobs(const Mat &image, const Mat &binaryImage, std::vector<Center> &centers) const;
virtual void findBlobs(InputArray image, InputArray binaryImage, std::vector<Center> &centers) const;
Params params;
AlgorithmInfo* info() const;
@ -850,7 +850,7 @@ public:
virtual bool empty() const;
protected:
virtual void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
Ptr<DescriptorExtractor> descriptorExtractor;
};
@ -879,9 +879,9 @@ public:
AlgorithmInfo* info() const;
protected:
virtual void computeImpl(const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const;
virtual void computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const;
typedef void(*PixelTestFn)(const Mat&, const std::vector<KeyPoint>&, Mat&);
typedef void(*PixelTestFn)(InputArray, const std::vector<KeyPoint>&, OutputArray);
int bytes_;
PixelTestFn test_fn_;
@ -1214,7 +1214,7 @@ public:
* If inheritor class need perform such prefiltering the method add() must be overloaded.
* In the other class methods programmer has access to the train keypoints by a constant link.
*/
virtual void add( const std::vector<Mat>& images,
virtual void add( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints );
const std::vector<Mat>& getTrainImages() const;
@ -1243,10 +1243,10 @@ public:
* trainKeypoints Keypoints from the train image
*/
// Classify keypoints from query image under one train image.
void classify( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints ) const;
void classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints ) const;
// Classify keypoints from query image under train image collection.
void classify( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints );
void classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints );
/*
* Group of methods to match keypoints from image pair.
@ -1254,32 +1254,32 @@ public:
* train() method is called here.
*/
// Find one best match for each query descriptor (if mask is empty).
void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
void match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<DMatch>& matches, const Mat& mask=Mat() ) const;
// Find k best matches for each query keypoint (in increasing order of distances).
// compactResult is used when mask is not empty. If compactResult is false matches
// vector will have the same size as queryDescriptors rows.
// If compactResult is true matches vector will not contain matches for fully masked out query descriptors.
void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
void knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
const Mat& mask=Mat(), bool compactResult=false ) const;
// Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances).
void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
void radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const Mat& mask=Mat(), bool compactResult=false ) const;
/*
* Group of methods to match keypoints from one image to image set.
* See description of similar methods for matching image pair above.
*/
void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<DMatch>& matches, const std::vector<Mat>& masks=std::vector<Mat>() );
void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void radiusMatch(InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
@ -1303,10 +1303,10 @@ protected:
// In fact the matching is implemented only by the following two methods. These methods suppose
// that the class object has been trained already. Public match methods call these methods
// after calling train().
virtual void knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks, bool compactResult ) = 0;
virtual void radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks, bool compactResult ) = 0;
/*
@ -1365,7 +1365,7 @@ public:
VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
virtual ~VectorDescriptorMatcher();
virtual void add( const std::vector<Mat>& imgCollection,
virtual void add( InputArrayOfArrays imgCollection,
std::vector<std::vector<KeyPoint> >& pointCollection );
virtual void clear();
@ -1381,10 +1381,10 @@ public:
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
virtual void knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks, bool compactResult );
virtual void radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks, bool compactResult );
@ -1411,19 +1411,19 @@ struct CV_EXPORTS DrawMatchesFlags
};
// Draw keypoints.
CV_EXPORTS_W void drawKeypoints( const Mat& image, const std::vector<KeyPoint>& keypoints, CV_OUT Mat& outImage,
CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );
// Draws matches of keypints from two images on output image.
CV_EXPORTS_W void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, CV_OUT Mat& outImg,
CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
const std::vector<char>& matchesMask=std::vector<char>(), int flags=DrawMatchesFlags::DEFAULT );
CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, CV_OUT Mat& outImg,
CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=DrawMatchesFlags::DEFAULT );
@ -1518,9 +1518,9 @@ public:
void setVocabulary( const Mat& vocabulary );
const Mat& getVocabulary() const;
void compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& imgDescriptor,
void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 );
void compute( const Mat& keypointDescriptors, Mat& imgDescriptor,
void compute( InputArray keypointDescriptors, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters=0 );
// compute() is not constant because DescriptorMatcher::match is not constant

View File

@ -140,7 +140,7 @@ const Mat& BOWImgDescriptorExtractor::getVocabulary() const
return vocabulary;
}
void BOWImgDescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& imgDescriptor,
void BOWImgDescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,
std::vector<std::vector<int> >* pointIdxsOfClusters, Mat* descriptors )
{
imgDescriptor.release();
@ -170,7 +170,7 @@ int BOWImgDescriptorExtractor::descriptorType() const
return CV_32FC1;
}
void BOWImgDescriptorExtractor::compute( const Mat& keypointDescriptors, Mat& imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters )
void BOWImgDescriptorExtractor::compute( InputArray keypointDescriptors, OutputArray _imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters )
{
CV_Assert( vocabulary.empty() != false );
@ -187,7 +187,11 @@ void BOWImgDescriptorExtractor::compute( const Mat& keypointDescriptors, Mat& im
pointIdxsOfClusters->resize(clusterCount);
}
imgDescriptor = Mat( 1, clusterCount, descriptorType(), Scalar::all(0.0) );
_imgDescriptor.create(1, clusterCount, descriptorType());
_imgDescriptor.setTo(Scalar::all(0));
Mat imgDescriptor = _imgDescriptor.getMat();
float *dptr = (float*)imgDescriptor.data;
for( size_t i = 0; i < matches.size(); i++ )
{
@ -201,7 +205,7 @@ void BOWImgDescriptorExtractor::compute( const Mat& keypointDescriptors, Mat& im
}
// Normalize image descriptor.
imgDescriptor /= keypointDescriptors.rows;
imgDescriptor /= keypointDescriptors.size().height;
}
}

View File

@ -163,8 +163,9 @@ void SimpleBlobDetector::write( cv::FileStorage& fs ) const
params.write(fs);
}
void SimpleBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage, std::vector<Center> &centers) const
void SimpleBlobDetector::findBlobs(InputArray _image, InputArray _binaryImage, std::vector<Center> &centers) const
{
Mat image = _image.getMat(), binaryImage = _binaryImage.getMat();
(void)image;
centers.clear();

View File

@ -61,8 +61,9 @@ inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
+ sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
}
static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests16(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
@ -71,8 +72,9 @@ static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints,
}
}
static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests32(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
@ -82,8 +84,9 @@ static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints,
}
}
static void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
static void pixelTests64(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
{
Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
for (int i = 0; i < (int)keypoints.size(); ++i)
{
uchar* desc = descriptors.ptr(i);
@ -155,12 +158,12 @@ void BriefDescriptorExtractor::write( FileStorage& fs) const
fs << "descriptorSize" << bytes_;
}
void BriefDescriptorExtractor::computeImpl(const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
void BriefDescriptorExtractor::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
// Construct integral image for fast smoothing (box filter)
Mat sum;
Mat grayImage = image;
Mat grayImage = image.getMat();
if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
///TODO allow the user to pass in a precomputed integral image
@ -173,7 +176,8 @@ void BriefDescriptorExtractor::computeImpl(const Mat& image, std::vector<KeyPoin
//Remove keypoints very close to the border
KeyPointsFilter::runByImageBorder(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);
descriptors = Mat::zeros((int)keypoints.size(), bytes_, CV_8U);
descriptors.create((int)keypoints.size(), bytes_, CV_8U);
descriptors.setTo(Scalar::all(0));
test_fn_(sum, keypoints, descriptors);
}

View File

@ -757,7 +757,7 @@ BRISK::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArra
}
void
BRISK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
BRISK::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}

View File

@ -54,7 +54,7 @@ namespace cv
DescriptorExtractor::~DescriptorExtractor()
{}
void DescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
if( image.empty() || keypoints.empty() )
{
@ -68,8 +68,11 @@ void DescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keyp
computeImpl( image, keypoints, descriptors );
}
void DescriptorExtractor::compute( const std::vector<Mat>& imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, std::vector<Mat>& descCollection ) const
void DescriptorExtractor::compute( InputArrayOfArrays _imageCollection, std::vector<std::vector<KeyPoint> >& pointCollection, OutputArrayOfArrays _descCollection ) const
{
std::vector<Mat> imageCollection, descCollection;
_imageCollection.getMatVector(imageCollection);
_descCollection.getMatVector(descCollection);
CV_Assert( imageCollection.size() == pointCollection.size() );
descCollection.resize( imageCollection.size() );
for( size_t i = 0; i < imageCollection.size(); i++ )
@ -106,7 +109,7 @@ Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExt
}
CV_WRAP void Feature2D::compute( const Mat& image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const
CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
DescriptorExtractor::compute(image, keypoints, descriptors);
}
@ -157,8 +160,9 @@ struct KP_LessThan
const std::vector<KeyPoint>* kp;
};
void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
void OpponentColorDescriptorExtractor::computeImpl( InputArray _bgrImage, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
Mat bgrImage = _bgrImage.getMat();
std::vector<Mat> opponentChannels;
convertBGRImageToOpponentColorSpace( bgrImage, opponentChannels );

View File

@ -50,7 +50,7 @@ namespace cv
/*
* Functions to draw keypoints and matches.
*/
static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& color, int flags )
static inline void _drawKeypoint( InputOutputArray img, const KeyPoint& p, const Scalar& color, int flags )
{
CV_Assert( !img.empty() );
Point center( cvRound(p.pt.x * draw_multiplier), cvRound(p.pt.y * draw_multiplier) );
@ -88,7 +88,7 @@ static inline void _drawKeypoint( Mat& img, const KeyPoint& p, const Scalar& col
}
}
void drawKeypoints( const Mat& image, const std::vector<KeyPoint>& keypoints, Mat& outImage,
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& _color, int flags )
{
if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
@ -120,25 +120,29 @@ void drawKeypoints( const Mat& image, const std::vector<KeyPoint>& keypoints, Ma
}
}
static void _prepareImgAndDrawKeypoints( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
Mat& outImg, Mat& outImg1, Mat& outImg2,
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
const Scalar& singlePointColor, int flags )
{
Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
Mat outImg;
Size img1size = img1.size(), img2size = img2.size();
Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
{
outImg = _outImg.getMat();
if( size.width > outImg.cols || size.height > outImg.rows )
CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
}
else
{
outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
_outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
outImg = _outImg.getMat();
outImg = Scalar::all(0);
outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
if( img1.type() == CV_8U )
cvtColor( img1, outImg1, COLOR_GRAY2BGR );
@ -154,15 +158,15 @@ static void _prepareImgAndDrawKeypoints( const Mat& img1, const std::vector<KeyP
// draw keypoints
if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
{
Mat _outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
Mat _outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
}
}
static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
static inline void _drawMatch( InputOutputArray outImg, InputOutputArray outImg1, InputOutputArray outImg2 ,
const KeyPoint& kp1, const KeyPoint& kp2, const Scalar& matchColor, int flags )
{
RNG& rng = theRNG();
@ -174,7 +178,7 @@ static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
Point2f pt1 = kp1.pt,
pt2 = kp2.pt,
dpt2 = Point2f( std::min(pt2.x+outImg1.cols, float(outImg.cols-1)), pt2.y );
dpt2 = Point2f( std::min(pt2.x+outImg1.size().width, float(outImg.size().width-1)), pt2.y );
line( outImg,
Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
@ -182,9 +186,9 @@ static inline void _drawMatch( Mat& outImg, Mat& outImg1, Mat& outImg2 ,
color, 1, LINE_AA, draw_shift_bits );
}
void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, Mat& outImg,
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
const Scalar& matchColor, const Scalar& singlePointColor,
const std::vector<char>& matchesMask, int flags )
{
@ -211,9 +215,9 @@ void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
}
}
void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
const Mat& img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, Mat& outImg,
void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
const Scalar& matchColor, const Scalar& singlePointColor,
const std::vector<std::vector<char> >& matchesMask, int flags )
{

View File

@ -229,9 +229,9 @@ void FREAK::buildPattern()
}
}
void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
void FREAK::computeImpl( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const
{
Mat image = _image.getMat();
if( image.empty() )
return;
if( keypoints.empty() )
@ -297,7 +297,9 @@ void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat
if( !extAll )
{
// extract the best comparisons only
descriptors = cv::Mat::zeros((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
_descriptors.create((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
#if CV_SSE2
__m128i* ptr= (__m128i*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
#else
@ -415,7 +417,9 @@ void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat
}
else // extract all possible comparisons for selection
{
descriptors = cv::Mat::zeros((int)keypoints.size(), 128, CV_8U);
_descriptors.create((int)keypoints.size(), 128, CV_8U);
_descriptors.setTo(Scalar::all(0));
Mat descriptors = _descriptors.getMat();
std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
for( size_t k = keypoints.size(); k--; )
@ -474,13 +478,14 @@ void FREAK::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat
}
// simply take average on a square patch, not even gaussian approx
uchar FREAK::meanIntensity( const cv::Mat& image, const cv::Mat& integral,
uchar FREAK::meanIntensity( InputArray _image, InputArray _integral,
const float kp_x,
const float kp_y,
const unsigned int scale,
const unsigned int rot,
const unsigned int point) const
{
Mat image = _image.getMat(), integral = _integral.getMat();
// get point position in image
const PatternPoint& FreakPoint = patternLookup[scale*FREAK_NB_ORIENTATION*FREAK_NB_POINTS + rot*FREAK_NB_POINTS + point];
const float xf = FreakPoint.x+kp_x;

View File

@ -1617,9 +1617,11 @@ GenericDescriptorMatcher::GenericDescriptorMatcher()
GenericDescriptorMatcher::~GenericDescriptorMatcher()
{}
void GenericDescriptorMatcher::add( const std::vector<Mat>& images,
void GenericDescriptorMatcher::add( InputArrayOfArrays _images,
std::vector<std::vector<KeyPoint> >& keypoints )
{
std::vector<Mat> images;
_images.getMatVector(images);
CV_Assert( !images.empty() );
CV_Assert( images.size() == keypoints.size() );
@ -1651,8 +1653,8 @@ void GenericDescriptorMatcher::clear()
void GenericDescriptorMatcher::train()
{}
void GenericDescriptorMatcher::classify( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints ) const
void GenericDescriptorMatcher::classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray trainImage, std::vector<KeyPoint>& trainKeypoints ) const
{
std::vector<DMatch> matches;
match( queryImage, queryKeypoints, trainImage, trainKeypoints, matches );
@ -1662,7 +1664,7 @@ void GenericDescriptorMatcher::classify( const Mat& queryImage, std::vector<KeyP
queryKeypoints[matches[i].queryIdx].class_id = trainKeypoints[matches[i].trainIdx].class_id;
}
void GenericDescriptorMatcher::classify( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints )
void GenericDescriptorMatcher::classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints )
{
std::vector<DMatch> matches;
match( queryImage, queryKeypoints, matches );
@ -1672,10 +1674,11 @@ void GenericDescriptorMatcher::classify( const Mat& queryImage, std::vector<KeyP
queryKeypoints[matches[i].queryIdx].class_id = trainPointCollection.getKeyPoint( matches[i].trainIdx, matches[i].trainIdx ).class_id;
}
void GenericDescriptorMatcher::match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
void GenericDescriptorMatcher::match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<DMatch>& matches, const Mat& mask ) const
{
Mat trainImage = _trainImage.getMat();
Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
@ -1683,10 +1686,11 @@ void GenericDescriptorMatcher::match( const Mat& queryImage, std::vector<KeyPoin
vecTrainPoints[0].swap( trainKeypoints );
}
void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
void GenericDescriptorMatcher::knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn, const Mat& mask, bool compactResult ) const
{
Mat trainImage = _trainImage.getMat();
Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
@ -1694,11 +1698,12 @@ void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, std::vector<KeyP
vecTrainPoints[0].swap( trainKeypoints );
}
void GenericDescriptorMatcher::radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const Mat& mask, bool compactResult ) const
{
Mat trainImage = _trainImage.getMat();
Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
@ -1706,7 +1711,7 @@ void GenericDescriptorMatcher::radiusMatch( const Mat& queryImage, std::vector<K
vecTrainPoints[0].swap( trainKeypoints );
}
void GenericDescriptorMatcher::match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void GenericDescriptorMatcher::match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<DMatch>& matches, const std::vector<Mat>& masks )
{
std::vector<std::vector<DMatch> > knnMatches;
@ -1714,7 +1719,7 @@ void GenericDescriptorMatcher::match( const Mat& queryImage, std::vector<KeyPoin
convertMatches( knnMatches, matches );
}
void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void GenericDescriptorMatcher::knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn,
const std::vector<Mat>& masks, bool compactResult )
{
@ -1730,7 +1735,7 @@ void GenericDescriptorMatcher::knnMatch( const Mat& queryImage, std::vector<KeyP
knnMatchImpl( queryImage, queryKeypoints, matches, knn, masks, compactResult );
}
void GenericDescriptorMatcher::radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks, bool compactResult )
{
@ -1792,10 +1797,11 @@ VectorDescriptorMatcher::VectorDescriptorMatcher( const Ptr<DescriptorExtractor>
VectorDescriptorMatcher::~VectorDescriptorMatcher()
{}
void VectorDescriptorMatcher::add( const std::vector<Mat>& imgCollection,
void VectorDescriptorMatcher::add( InputArrayOfArrays _imgCollection,
std::vector<std::vector<KeyPoint> >& pointCollection )
{
std::vector<Mat> descriptors;
std::vector<Mat> imgCollection, descriptors;
_imgCollection.getMatVector(imgCollection);
extractor->compute( imgCollection, pointCollection, descriptors );
matcher->add( descriptors );
@ -1820,7 +1826,7 @@ bool VectorDescriptorMatcher::isMaskSupported()
return matcher->isMaskSupported();
}
void VectorDescriptorMatcher::knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void VectorDescriptorMatcher::knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn,
const std::vector<Mat>& masks, bool compactResult )
{
@ -1829,7 +1835,7 @@ void VectorDescriptorMatcher::knnMatchImpl( const Mat& queryImage, std::vector<K
matcher->knnMatch( queryDescriptors, matches, knn, masks, compactResult );
}
void VectorDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void VectorDescriptorMatcher::radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks, bool compactResult )
{

View File

@ -948,7 +948,7 @@ void ORB::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputA
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
}
void ORB::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
void ORB::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}

View File

@ -2672,10 +2672,10 @@ protected:
// The minimum distance to each training patch with all its affine poses is found over all scales.
// The class ID of a match is returned for each keypoint. The distance is calculated over PCA components
// loaded with DescriptorOneWay::Initialize, kd tree is used for finding minimum distances.
virtual void knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks, bool compactResult );
virtual void radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks, bool compactResult );
@ -2735,10 +2735,10 @@ public:
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
protected:
virtual void knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int k,
const std::vector<Mat>& masks, bool compactResult );
virtual void radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& masks, bool compactResult );
@ -2770,7 +2770,7 @@ public:
virtual bool empty() const;
protected:
virtual void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
RTreeClassifier classifier_;
static const int BORDER_SIZE = 16;
@ -2783,15 +2783,17 @@ CalonderDescriptorExtractor<T>::CalonderDescriptorExtractor(const String& classi
}
template<typename T>
void CalonderDescriptorExtractor<T>::computeImpl( const Mat& image,
void CalonderDescriptorExtractor<T>::computeImpl( InputArray _image,
std::vector<KeyPoint>& keypoints,
Mat& descriptors) const
OutputArray _descriptors) const
{
Mat image = _image.getMat(), descriptors;
// Cannot compute descriptors for keypoints on the image border.
KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE);
/// @todo Check 16-byte aligned
descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType<T>::type);
_descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType<T>::type);
descriptors = _descriptors.getMat();
int patchSize = RandomizedTree::PATCH_SIZE;
int offset = patchSize / 2;

View File

@ -2232,10 +2232,11 @@ namespace cv{
return false;
}
void OneWayDescriptorMatcher::knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void OneWayDescriptorMatcher::knnMatchImpl( InputArray _queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn,
const std::vector<Mat>& /*masks*/, bool /*compactResult*/ )
{
Mat queryImage = _queryImage.getMat();
train();
CV_Assert( knn == 1 ); // knn > 1 unsupported because of bug in OneWayDescriptorBase for this case
@ -2251,10 +2252,12 @@ namespace cv{
}
}
void OneWayDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void OneWayDescriptorMatcher::radiusMatchImpl( InputArray _queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& /*masks*/, bool /*compactResult*/ )
{
Mat queryImage = _queryImage.getMat();
train();
matches.resize( queryKeypoints.size() );

View File

@ -1297,10 +1297,12 @@ void FernDescriptorMatcher::calcBestProbAndMatchIdx( const Mat& image, const Poi
}
}
void FernDescriptorMatcher::knnMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void FernDescriptorMatcher::knnMatchImpl( InputArray _queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, int knn,
const std::vector<Mat>& /*masks*/, bool /*compactResult*/ )
{
Mat queryImage = _queryImage.getMat();
train();
matches.resize( queryKeypoints.size() );
@ -1333,10 +1335,11 @@ void FernDescriptorMatcher::knnMatchImpl( const Mat& queryImage, std::vector<Key
}
}
void FernDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
void FernDescriptorMatcher::radiusMatchImpl( InputArray _queryImage, std::vector<KeyPoint>& queryKeypoints,
std::vector<std::vector<DMatch> >& matches, float maxDistance,
const std::vector<Mat>& /*masks*/, bool /*compactResult*/ )
{
Mat queryImage = _queryImage.getMat();
train();
matches.resize( queryKeypoints.size() );
std::vector<float> signature( (size_t)classifier->getClassCount() );

View File

@ -88,7 +88,7 @@ public:
protected:
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
CV_PROP_RW int nfeatures;
CV_PROP_RW int nOctaveLayers;
@ -143,7 +143,7 @@ public:
protected:
void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
void computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
};
typedef SURF SurfFeatureDetector;

View File

@ -823,7 +823,7 @@ void SIFT::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, Input
(*this)(image.getMat(), mask.getMat(), keypoints, noArray());
}
void SIFT::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
void SIFT::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}

View File

@ -1016,7 +1016,7 @@ void SURF::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, Input
(*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
}
void SURF::computeImpl( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptors) const
void SURF::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
}