added docs for GPU Filter Engine

This commit is contained in:
Vladislav Vinogradov 2011-01-13 13:48:58 +00:00
parent 1a94186195
commit 349e0ece93
8 changed files with 93311 additions and 90233 deletions

View File

@ -556,322 +556,4 @@ Returns block descriptors computed for the whole image.
\end{description}}
\end{description}
\section{Feature detection and description}
\cvclass{gpu::SURFParams\_GPU}
Various SURF algorithm parameters.
\begin{lstlisting}
struct SURFParams_GPU
{
SURFParams_GPU() :
threshold(0.1f),
nOctaves(4),
nIntervals(4),
initialScale(2.f),
l1(3.f/1.5f),
l2(5.f/1.5f),
l3(3.f/1.5f),
l4(1.f/1.5f),
edgeScale(0.81f),
initialStep(1),
extended(true),
featuresRatio(0.01f)
{
}
//! The interest operator threshold
float threshold;
//! The number of octaves to process
int nOctaves;
//! The number of intervals in each octave
int nIntervals;
//! The scale associated with the first interval of the first octave
float initialScale;
//! mask parameter l_1
float l1;
//! mask parameter l_2
float l2;
//! mask parameter l_3
float l3;
//! mask parameter l_4
float l4;
//! The amount to scale the edge rejection mask
float edgeScale;
//! The initial sampling step in pixels.
int initialStep;
//! True, if generate 128-len descriptors, false - 64-len descriptors
bool extended;
//! max features = featuresRatio * img.size().srea()
float featuresRatio;
};
\end{lstlisting}
\cvclass{gpu::SURF\_GPU}
Class for extracting Speeded Up Robust Features from an image.
\begin{lstlisting}
class SURF_GPU : public SURFParams_GPU
{
public:
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
static void uploadKeypoints(const vector<KeyPoint>& keypoints,
GpuMat& keypointsGPU);
//! download keypoints from device to host memory
static void downloadKeypoints(const GpuMat& keypointsGPU,
vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
static void downloadDescriptors(const GpuMat& descriptorsGPU,
vector<float>& descriptors);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
GpuMat sum;
GpuMat sumf;
GpuMat mask1;
GpuMat maskSum;
GpuMat hessianBuffer;
GpuMat maxPosBuffer;
GpuMat featuresBuffer;
};
\end{lstlisting}
The class \texttt{SURF\_GPU} implements Speeded Up Robust Features descriptor. There is fast multi-scale Hessian keypoint detector that can be used to find the keypoints (which is the default option), but the descriptors can be also computed for the user-specified keypoints. Supports only 8 bit grayscale images.
The class \texttt{SURF\_GPU} can store results to GPU and CPU memory and provides static functions to convert results between CPU and GPU version (\texttt{uploadKeypoints}, \texttt{downloadKeypoints}, \texttt{downloadDescriptors}). CPU results has the same format as \hyperref[cv.class.SURF]{cv::SURF} results. GPU results is stored to \texttt{GpuMat}. \texttt{keypoints} matrix is one row matrix with \texttt{CV\_32FC6} type. It contains 6 float values per feature: \texttt{x, y, size, response, angle, octave}. \texttt{descriptors} matrix is \texttt{nFeatures} x \texttt{descriptorSize} matrix with \texttt{CV\_32FC1} type.
The class \texttt{SURF\_GPU} uses some buffers and provides access to it. All buffers can be safely released between function calls.
\cvclass{gpu::BruteForceMatcher\_GPU}
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches between descriptor sets.
\begin{lstlisting}
template<class Distance>
class BruteForceMatcher_GPU
{
public:
// Add descriptors to train descriptor collection.
void add(const std::vector<GpuMat>& descCollection);
// Get train descriptors collection.
const std::vector<GpuMat>& getTrainDescriptors() const;
// Clear train descriptors collection.
void clear();
// Return true if there are not train descriptors in collection.
bool empty() const;
// Return true if the matcher supports mask in match methods.
bool isMaskSupported() const;
void matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
GpuMat& trainIdx, GpuMat& distance,
const GpuMat& mask = GpuMat());
static void matchDownload(const GpuMat& trainIdx,
const GpuMat& distance, std::vector<DMatch>& matches);
void match(const GpuMat& queryDescs, const GpuMat& trainDescs,
std::vector<DMatch>& matches, const GpuMat& mask = GpuMat());
void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
const vector<GpuMat>& masks = std::vector<GpuMat>());
void matchCollection(const GpuMat& queryDescs,
const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& maskCollection);
static void matchDownload(const GpuMat& trainIdx, GpuMat& imgIdx,
const GpuMat& distance, std::vector<DMatch>& matches);
void match(const GpuMat& queryDescs, std::vector<DMatch>& matches,
const std::vector<GpuMat>& masks = std::vector<GpuMat>());
void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
const GpuMat& mask = GpuMat());
static void knnMatchDownload(const GpuMat& trainIdx,
const GpuMat& distance, std::vector< std::vector<DMatch> >& matches,
bool compactResult = false);
void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
std::vector< std::vector<DMatch> >& matches, int k,
const GpuMat& mask = GpuMat(), bool compactResult = false);
void knnMatch(const GpuMat& queryDescs,
std::vector< std::vector<DMatch> >& matches, int knn,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false );
void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
GpuMat& trainIdx, GpuMat& nMatches, GpuMat& distance,
float maxDistance, const GpuMat& mask = GpuMat());
static void radiusMatchDownload(const GpuMat& trainIdx,
const GpuMat& nMatches, const GpuMat& distance,
std::vector< std::vector<DMatch> >& matches,
bool compactResult = false);
void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
std::vector< std::vector<DMatch> >& matches, float maxDistance,
const GpuMat& mask = GpuMat(), bool compactResult = false);
void radiusMatch(const GpuMat& queryDescs,
std::vector< std::vector<DMatch> >& matches, float maxDistance,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false);
private:
std::vector<GpuMat> trainDescCollection;
};
\end{lstlisting}
The class \texttt{BruteForceMatcher\_GPU} has the similar interface with class \hyperref[cv.class.DescriptorMatcher]{cv::DescriptorMatcher}. It has two groups of match methods: for matching descriptors of one image with other image or with image set. Also all functions have alternative: save results to GPU memory or to CPU memory. \texttt{BruteForceMatcher\_GPU} is templated on the distance metric as \hyperref[cv.class.BruteForceMatcher]{cv::BruteForceMatcher}, but supports only \texttt{L1} and \texttt{L2} distance types.
\cvfunc{gpu::BruteForceMatcher\_GPU::match}\label{cppfunc.gpu.BruteForceMatcher.match}
Find the best match for each descriptor from a query set with train descriptors. This function is equivalent of \cvCppCross{DescriptorMatcher::match}.
\cvdefCpp{
void match(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par std::vector<DMatch>\& matches, \par const GpuMat\& mask = GpuMat());
}
\cvdefCpp{
void match(const GpuMat\& queryDescs, \par std::vector<DMatch>\& matches, \par const std::vector<GpuMat>\& masks = std::vector<GpuMat>());
}
\cvfunc{gpu::BruteForceMatcher\_GPU::matchSingle}\label{cppfunc.gpu.BruteForceMatcher.matchSingle}
Find one best match for each query descriptor. Results stored to GPU memory.
\cvdefCpp{
void matchSingle(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par GpuMat\& trainIdx, \par GpuMat\& distance, \par const GpuMat\& mask = GpuMat());
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainDescs} {Train set of descriptors. This will not be added to train descriptors collection stored in class object.}
\cvarg{trainIdx} {One row \texttt{CV\_32SC1} matrix. Will contain best train index for each query. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{distance} {One row \texttt{CV\_32FC1} matrix. Will contain best distance for each query. If some query descriptor masked out in \texttt{mask} it will contain \texttt{FLT\_MAX}.}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::matchCollection}\label{cppfunc.gpu.BruteForceMatcher.matchCollection}
Find one best match for each query descriptor from train collection. Results stored to GPU memory.
\cvdefCpp{
void matchCollection(const GpuMat\& queryDescs, \par const GpuMat\& trainCollection, \par GpuMat\& trainIdx, \par GpuMat\& imgIdx, \par GpuMat\& distance, \par const GpuMat\& maskCollection);
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainCollection} {\texttt{GpuMat} with train collection. It can be obtained from train descriptors collection that was set using \texttt{add} method by \hyperref[cppfunc.gpu.BruteForceMatcher.makeGpuCollection]{makeGpuCollection}. Or it can contain user defined collection. It must be one row matrix, each element is a \texttt{DevMem2D} that points to one train descriptors matrix (matrix must have \texttt{CV\_32FC1} type).}
\cvarg{trainIdx} {One row \texttt{CV\_32SC1} matrix. Will contain best train index for each query. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{imgIdx} {One row \texttt{CV\_32SC1} matrix. Will contain image train index for each query. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{distance} {One row \texttt{CV\_32FC1} matrix. Will contain best distance for each query. If some query descriptor masked out in \texttt{mask} it will contain \texttt{FLT\_MAX}.}
\cvarg{maskCollection}{\texttt{GpuMat} with set of masks. It can be obtained from \texttt{std::vector<GpuMat>} by \hyperref[cppfunc.gpu.BruteForceMatcher.makeGpuCollection]{makeGpuCollection}. Or it can contain user defined mask set. It must be empty matrix or one row matrix, each element is a \texttt{PtrStep} that points to one mask (must have \texttt{CV\_8UC1} type).}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::makeGpuCollection}\label{cppfunc.gpu.BruteForceMatcher.makeGpuCollection}
Make gpu collection of train descriptors and masks in suitable format for \hyperref[cppfunc.gpu.BruteForceMatcher.matchCollection]{matchCollection} function.
\cvdefCpp{
void makeGpuCollection(GpuMat\& trainCollection, \par GpuMat\& maskCollection, \par const vector<GpuMat>\& masks = std::vector<GpuMat>());
}
\cvfunc{gpu::BruteForceMatcher\_GPU::matchDownload}\label{cppfunc.gpu.BruteForceMatcher.matchDownload}
Download \texttt{trainIdx}, \texttt{imgIdx} and \texttt{distance} matrices obtained by \hyperref[cppfunc.gpu.BruteForceMatcher.matchSingle]{matchSingle} or \hyperref[cppfunc.gpu.BruteForceMatcher.matchCollection]{matchCollection} to CPU vector with \hyperref[cv.class.DMatch]{cv::DMatch}.
\cvdefCpp{
static void matchDownload(const GpuMat\& trainIdx, \par const GpuMat\& distance, \par std::vector<DMatch>\& matches);
}
\cvdefCpp{
static void matchDownload(const GpuMat\& trainIdx, \par GpuMat\& imgIdx, \par const GpuMat\& distance, \par std::vector<DMatch>\& matches);
}
\cvfunc{gpu::BruteForceMatcher\_GPU::knnMatch}\label{cppfunc.gpu.BruteForceMatcher.knnMatch}
Find the k best matches for each descriptor from a query set with train descriptors. Found k (or less if not possible) matches are returned in distance increasing order. This function is equivalent of \cvCppCross{DescriptorMatcher::knnMatch}.
\cvdefCpp{
void knnMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par std::vector< std::vector<DMatch> >\& matches, \par int k, \par const GpuMat\& mask = GpuMat(), \par bool compactResult = false);
}
\cvdefCpp{
void knnMatch(const GpuMat\& queryDescs, \par std::vector< std::vector<DMatch> >\& matches, \par int k, \par const std::vector<GpuMat>\& masks = std::vector<GpuMat>(), \par bool compactResult = false );
}
\cvfunc{gpu::BruteForceMatcher\_GPU::knnMatch}\label{cppfunc.gpu.BruteForceMatcher.knnMatchSingle}
Find the k best matches for each descriptor from a query set with train descriptors. Found k (or less if not possible) matches are returned in distance increasing order. Results stored to GPU memory.
\cvdefCpp{
void knnMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par GpuMat\& trainIdx, \par GpuMat\& distance, \par GpuMat\& allDist, \par int k, \par const GpuMat\& mask = GpuMat());
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainDescs} {Train set of descriptors. This will not be added to train descriptors collection stored in class object.}
\cvarg{trainIdx} {Matrix \texttt{nQueries} x \texttt{k} with type \texttt{CV\_32SC1}. \texttt{trainIdx.at<int>(queryIdx, i)} will contain index of i'th best trains. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{distance} {Matrix \texttt{nQuery} x \texttt{k} with type \texttt{CV\_32FC1}. Will contain distance for each query and i'th best trains. If some query descriptor masked out in \texttt{mask} it will contain \texttt{FLT\_MAX}.}
\cvarg{allDist} {Buffer to store all distances between query descriptors and train descriptors. It have size \texttt{nQuery} x \texttt{nTrain} and \texttt{CV\_32F} type. \texttt{allDist.at<float>(queryIdx, trainIdx)} will contain \texttt{FLT\_MAX}, if \texttt{trainIdx} is one from k best, otherwise it will contain distance between \texttt{queryIdx} and \texttt{trainIdx} descriptors.}
\cvarg{k}{Count of best matches will be found per each query descriptor (or less if it's not possible).}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::knnMatchDownload}\label{cppfunc.gpu.BruteForceMatcher.knnMatchDownload}
Download \texttt{trainIdx} and \texttt{distance} matrices obtained by \hyperref[cppfunc.gpu.BruteForceMatcher.knnMatchSingle]{knnMatch} to CPU vector with \hyperref[cv.class.DMatch]{cv::DMatch}. If \texttt{compactResult} is true \texttt{matches} vector will not contain matches for fully masked out query descriptors.
\cvdefCpp{
static void knnMatchDownload(const GpuMat\& trainIdx, \par const GpuMat\& distance, \par std::vector< std::vector<DMatch> >\& matches, \par bool compactResult = false);
}
\cvfunc{gpu::BruteForceMatcher\_GPU::radiusMatch}\label{cppfunc.gpu.BruteForceMatcher.radiusMatch}
Find the best matches for each query descriptor which have distance less than given threshold. Found matches are returned in distance increasing order. This function is equivalent of \cvCppCross{DescriptorMatcher::radiusMatch}. Works only on device with Compute Capability \texttt{>=} 1.1.
\cvdefCpp{
void radiusMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par std::vector< std::vector<DMatch> >\& matches, \par float maxDistance, \par const GpuMat\& mask = GpuMat(), \par bool compactResult = false);
}
\cvdefCpp{
void radiusMatch(const GpuMat\& queryDescs, \par std::vector< std::vector<DMatch> >\& matches, \par float maxDistance, \par const std::vector<GpuMat>\& masks = std::vector<GpuMat>(), \par bool compactResult = false);
}
\cvfunc{gpu::BruteForceMatcher\_GPU::radiusMatch}\label{cppfunc.gpu.BruteForceMatcher.radiusMatchSingle}
Find the best matches for each query descriptor which have distance less than given threshold. Results stored to GPU memory. Results are not sorted by distance increasing order. Works only on device with Compute Capability \texttt{>=} 1.1.
\cvdefCpp{
void radiusMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par GpuMat\& trainIdx, \par GpuMat\& nMatches, \par GpuMat\& distance, \par float maxDistance, \par const GpuMat\& mask = GpuMat());
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainDescs} {Train set of descriptors. This will not be added to train descriptors collection stored in class object.}
\cvarg{trainIdx} {\texttt{trainIdx.at<int>(queryIdx, i)} will contain i'th train index \newline\texttt{(i < min(nMatches.at<unsigned int>(0, queryIdx), trainIdx.cols)}. If \texttt{trainIdx} is empty, it will be created with size \texttt{nQuery} x \texttt{nTrain}. Or it can be allocated by user (it must have \texttt{nQuery} rows and \texttt{CV\_32SC1} type). Cols can be less than \texttt{nTrain}, but it can be that matcher won't find all matches, because it haven't enough memory to store results.}
\cvarg{nMatches} {\texttt{nMatches.at<unsigned int>(0, queryIdx)} will contain matches count for \texttt{queryIdx}. Carefully, \texttt{nMatches} can be greater than \texttt{trainIdx.cols} - it means that matcher didn't find all matches, because it didn't have enough memory.}
\cvarg{distance} {\texttt{distance.at<int>(queryIdx, i)} will contain i'th distance \newline\texttt{(i < min(nMatches.at<unsigned int>(0, queryIdx), trainIdx.cols)}. If \texttt{trainIdx} is empty, it will be created with size \texttt{nQuery} x \texttt{nTrain}. Otherwise it must be also allocated by user (it must have the same size as \texttt{trainIdx} and \texttt{CV\_32FC1} type).}
\cvarg{maxDistance}{The threshold to found match distances.}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::radiusMatchDownload}\label{cppfunc.gpu.BruteForceMatcher.radiusMatchDownload}
Download \texttt{trainIdx}, \texttt{nMatches} and \texttt{distance} matrices obtained by \hyperref[cppfunc.gpu.BruteForceMatcher.radiusMatchSingle]{radiusMatch} to CPU vector with \hyperref[cv.class.DMatch]{cv::DMatch}. If \texttt{compactResult} is true \texttt{matches} vector will not contain matches for fully masked out query descriptors.
\cvdefCpp{
static void radiusMatchDownload(const GpuMat\& trainIdx, \par const GpuMat\& nMatches, \par const GpuMat\& distance, \par std::vector< std::vector<DMatch> >\& matches, \par bool compactResult = false);
}
\fi

317
doc/gpu_features2d.tex Normal file
View File

@ -0,0 +1,317 @@
\section{Feature detection and description}
\cvclass{gpu::SURFParams\_GPU}
Various SURF algorithm parameters.
\begin{lstlisting}
struct SURFParams_GPU
{
SURFParams_GPU() :
threshold(0.1f),
nOctaves(4),
nIntervals(4),
initialScale(2.f),
l1(3.f/1.5f),
l2(5.f/1.5f),
l3(3.f/1.5f),
l4(1.f/1.5f),
edgeScale(0.81f),
initialStep(1),
extended(true),
featuresRatio(0.01f)
{
}
//! The interest operator threshold
float threshold;
//! The number of octaves to process
int nOctaves;
//! The number of intervals in each octave
int nIntervals;
//! The scale associated with the first interval of the first octave
float initialScale;
//! mask parameter l_1
float l1;
//! mask parameter l_2
float l2;
//! mask parameter l_3
float l3;
//! mask parameter l_4
float l4;
//! The amount to scale the edge rejection mask
float edgeScale;
//! The initial sampling step in pixels.
int initialStep;
//! True, if generate 128-len descriptors, false - 64-len descriptors
bool extended;
//! max features = featuresRatio * img.size().srea()
float featuresRatio;
};
\end{lstlisting}
\cvclass{gpu::SURF\_GPU}
Class for extracting Speeded Up Robust Features from an image.
\begin{lstlisting}
class SURF_GPU : public SURFParams_GPU
{
public:
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
//! upload host keypoints to device memory
static void uploadKeypoints(const vector<KeyPoint>& keypoints,
GpuMat& keypointsGPU);
//! download keypoints from device to host memory
static void downloadKeypoints(const GpuMat& keypointsGPU,
vector<KeyPoint>& keypoints);
//! download descriptors from device to host memory
static void downloadDescriptors(const GpuMat& descriptorsGPU,
vector<float>& descriptors);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
GpuMat& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
void operator()(const GpuMat& img, const GpuMat& mask,
std::vector<KeyPoint>& keypoints,
std::vector<float>& descriptors,
bool useProvidedKeypoints = false,
bool calcOrientation = true);
GpuMat sum;
GpuMat sumf;
GpuMat mask1;
GpuMat maskSum;
GpuMat hessianBuffer;
GpuMat maxPosBuffer;
GpuMat featuresBuffer;
};
\end{lstlisting}
The class \texttt{SURF\_GPU} implements Speeded Up Robust Features descriptor. There is fast multi-scale Hessian keypoint detector that can be used to find the keypoints (which is the default option), but the descriptors can be also computed for the user-specified keypoints. Supports only 8 bit grayscale images.
The class \texttt{SURF\_GPU} can store results to GPU and CPU memory and provides static functions to convert results between CPU and GPU version (\texttt{uploadKeypoints}, \texttt{downloadKeypoints}, \texttt{downloadDescriptors}). CPU results has the same format as \hyperref[cv.class.SURF]{cv::SURF} results. GPU results is stored to \texttt{GpuMat}. \texttt{keypoints} matrix is one row matrix with \texttt{CV\_32FC6} type. It contains 6 float values per feature: \texttt{x, y, size, response, angle, octave}. \texttt{descriptors} matrix is \texttt{nFeatures} x \texttt{descriptorSize} matrix with \texttt{CV\_32FC1} type.
The class \texttt{SURF\_GPU} uses some buffers and provides access to it. All buffers can be safely released between function calls.
\cvclass{gpu::BruteForceMatcher\_GPU}
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches between descriptor sets.
\begin{lstlisting}
template<class Distance>
class BruteForceMatcher_GPU
{
public:
// Add descriptors to train descriptor collection.
void add(const std::vector<GpuMat>& descCollection);
// Get train descriptors collection.
const std::vector<GpuMat>& getTrainDescriptors() const;
// Clear train descriptors collection.
void clear();
// Return true if there are not train descriptors in collection.
bool empty() const;
// Return true if the matcher supports mask in match methods.
bool isMaskSupported() const;
void matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
GpuMat& trainIdx, GpuMat& distance,
const GpuMat& mask = GpuMat());
static void matchDownload(const GpuMat& trainIdx,
const GpuMat& distance, std::vector<DMatch>& matches);
void match(const GpuMat& queryDescs, const GpuMat& trainDescs,
std::vector<DMatch>& matches, const GpuMat& mask = GpuMat());
void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
const vector<GpuMat>& masks = std::vector<GpuMat>());
void matchCollection(const GpuMat& queryDescs,
const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& maskCollection);
static void matchDownload(const GpuMat& trainIdx, GpuMat& imgIdx,
const GpuMat& distance, std::vector<DMatch>& matches);
void match(const GpuMat& queryDescs, std::vector<DMatch>& matches,
const std::vector<GpuMat>& masks = std::vector<GpuMat>());
void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
const GpuMat& mask = GpuMat());
static void knnMatchDownload(const GpuMat& trainIdx,
const GpuMat& distance, std::vector< std::vector<DMatch> >& matches,
bool compactResult = false);
void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
std::vector< std::vector<DMatch> >& matches, int k,
const GpuMat& mask = GpuMat(), bool compactResult = false);
void knnMatch(const GpuMat& queryDescs,
std::vector< std::vector<DMatch> >& matches, int knn,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false );
void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
GpuMat& trainIdx, GpuMat& nMatches, GpuMat& distance,
float maxDistance, const GpuMat& mask = GpuMat());
static void radiusMatchDownload(const GpuMat& trainIdx,
const GpuMat& nMatches, const GpuMat& distance,
std::vector< std::vector<DMatch> >& matches,
bool compactResult = false);
void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
std::vector< std::vector<DMatch> >& matches, float maxDistance,
const GpuMat& mask = GpuMat(), bool compactResult = false);
void radiusMatch(const GpuMat& queryDescs,
std::vector< std::vector<DMatch> >& matches, float maxDistance,
const std::vector<GpuMat>& masks = std::vector<GpuMat>(),
bool compactResult = false);
private:
std::vector<GpuMat> trainDescCollection;
};
\end{lstlisting}
The class \texttt{BruteForceMatcher\_GPU} has the similar interface with class \hyperref[cv.class.DescriptorMatcher]{cv::DescriptorMatcher}. It has two groups of match methods: for matching descriptors of one image with other image or with image set. Also all functions have alternative: save results to GPU memory or to CPU memory. \texttt{BruteForceMatcher\_GPU} is templated on the distance metric as \hyperref[cv.class.BruteForceMatcher]{cv::BruteForceMatcher}, but supports only \texttt{L1} and \texttt{L2} distance types.
\cvfunc{gpu::BruteForceMatcher\_GPU::match}\label{cppfunc.gpu.BruteForceMatcher.match}
Find the best match for each descriptor from a query set with train descriptors. This function is equivalent of \cvCppCross{DescriptorMatcher::match}.
\cvdefCpp{
void match(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par std::vector<DMatch>\& matches, \par const GpuMat\& mask = GpuMat());
}
\cvdefCpp{
void match(const GpuMat\& queryDescs, \par std::vector<DMatch>\& matches, \par const std::vector<GpuMat>\& masks = std::vector<GpuMat>());
}
\cvfunc{gpu::BruteForceMatcher\_GPU::matchSingle}\label{cppfunc.gpu.BruteForceMatcher.matchSingle}
Find one best match for each query descriptor. Results stored to GPU memory.
\cvdefCpp{
void matchSingle(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par GpuMat\& trainIdx, \par GpuMat\& distance, \par const GpuMat\& mask = GpuMat());
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainDescs} {Train set of descriptors. This will not be added to train descriptors collection stored in class object.}
\cvarg{trainIdx} {One row \texttt{CV\_32SC1} matrix. Will contain best train index for each query. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{distance} {One row \texttt{CV\_32FC1} matrix. Will contain best distance for each query. If some query descriptor masked out in \texttt{mask} it will contain \texttt{FLT\_MAX}.}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::matchCollection}\label{cppfunc.gpu.BruteForceMatcher.matchCollection}
Find one best match for each query descriptor from train collection. Results stored to GPU memory.
\cvdefCpp{
void matchCollection(const GpuMat\& queryDescs, \par const GpuMat\& trainCollection, \par GpuMat\& trainIdx, \par GpuMat\& imgIdx, \par GpuMat\& distance, \par const GpuMat\& maskCollection);
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainCollection} {\texttt{GpuMat} with train collection. It can be obtained from train descriptors collection that was set using \texttt{add} method by \hyperref[cppfunc.gpu.BruteForceMatcher.makeGpuCollection]{makeGpuCollection}. Or it can contain user defined collection. It must be one row matrix, each element is a \texttt{DevMem2D} that points to one train descriptors matrix (matrix must have \texttt{CV\_32FC1} type).}
\cvarg{trainIdx} {One row \texttt{CV\_32SC1} matrix. Will contain best train index for each query. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{imgIdx} {One row \texttt{CV\_32SC1} matrix. Will contain image train index for each query. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{distance} {One row \texttt{CV\_32FC1} matrix. Will contain best distance for each query. If some query descriptor masked out in \texttt{mask} it will contain \texttt{FLT\_MAX}.}
\cvarg{maskCollection}{\texttt{GpuMat} with set of masks. It can be obtained from \texttt{std::vector<GpuMat>} by \hyperref[cppfunc.gpu.BruteForceMatcher.makeGpuCollection]{makeGpuCollection}. Or it can contain user defined mask set. It must be empty matrix or one row matrix, each element is a \texttt{PtrStep} that points to one mask (must have \texttt{CV\_8UC1} type).}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::makeGpuCollection}\label{cppfunc.gpu.BruteForceMatcher.makeGpuCollection}
Make gpu collection of train descriptors and masks in suitable format for \hyperref[cppfunc.gpu.BruteForceMatcher.matchCollection]{matchCollection} function.
\cvdefCpp{
void makeGpuCollection(GpuMat\& trainCollection, \par GpuMat\& maskCollection, \par const vector<GpuMat>\& masks = std::vector<GpuMat>());
}
\cvfunc{gpu::BruteForceMatcher\_GPU::matchDownload}\label{cppfunc.gpu.BruteForceMatcher.matchDownload}
Download \texttt{trainIdx}, \texttt{imgIdx} and \texttt{distance} matrices obtained by \hyperref[cppfunc.gpu.BruteForceMatcher.matchSingle]{matchSingle} or \hyperref[cppfunc.gpu.BruteForceMatcher.matchCollection]{matchCollection} to CPU vector with \hyperref[cv.class.DMatch]{cv::DMatch}.
\cvdefCpp{
static void matchDownload(const GpuMat\& trainIdx, \par const GpuMat\& distance, \par std::vector<DMatch>\& matches);
}
\cvdefCpp{
static void matchDownload(const GpuMat\& trainIdx, \par GpuMat\& imgIdx, \par const GpuMat\& distance, \par std::vector<DMatch>\& matches);
}
\cvfunc{gpu::BruteForceMatcher\_GPU::knnMatch}\label{cppfunc.gpu.BruteForceMatcher.knnMatch}
Find the k best matches for each descriptor from a query set with train descriptors. Found k (or less if not possible) matches are returned in distance increasing order. This function is equivalent of \cvCppCross{DescriptorMatcher::knnMatch}.
\cvdefCpp{
void knnMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par std::vector< std::vector<DMatch> >\& matches, \par int k, \par const GpuMat\& mask = GpuMat(), \par bool compactResult = false);
}
\cvdefCpp{
void knnMatch(const GpuMat\& queryDescs, \par std::vector< std::vector<DMatch> >\& matches, \par int k, \par const std::vector<GpuMat>\& masks = std::vector<GpuMat>(), \par bool compactResult = false );
}
\cvfunc{gpu::BruteForceMatcher\_GPU::knnMatch}\label{cppfunc.gpu.BruteForceMatcher.knnMatchSingle}
Find the k best matches for each descriptor from a query set with train descriptors. Found k (or less if not possible) matches are returned in distance increasing order. Results stored to GPU memory.
\cvdefCpp{
void knnMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par GpuMat\& trainIdx, \par GpuMat\& distance, \par GpuMat\& allDist, \par int k, \par const GpuMat\& mask = GpuMat());
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainDescs} {Train set of descriptors. This will not be added to train descriptors collection stored in class object.}
\cvarg{trainIdx} {Matrix \texttt{nQueries} x \texttt{k} with type \texttt{CV\_32SC1}. \texttt{trainIdx.at<int>(queryIdx, i)} will contain index of i'th best trains. If some query descriptor masked out in \texttt{mask} it will contain -1.}
\cvarg{distance} {Matrix \texttt{nQuery} x \texttt{k} with type \texttt{CV\_32FC1}. Will contain distance for each query and i'th best trains. If some query descriptor masked out in \texttt{mask} it will contain \texttt{FLT\_MAX}.}
\cvarg{allDist} {Buffer to store all distances between query descriptors and train descriptors. It have size \texttt{nQuery} x \texttt{nTrain} and \texttt{CV\_32F} type. \texttt{allDist.at<float>(queryIdx, trainIdx)} will contain \texttt{FLT\_MAX}, if \texttt{trainIdx} is one from k best, otherwise it will contain distance between \texttt{queryIdx} and \texttt{trainIdx} descriptors.}
\cvarg{k}{Count of best matches will be found per each query descriptor (or less if it's not possible).}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::knnMatchDownload}\label{cppfunc.gpu.BruteForceMatcher.knnMatchDownload}
Download \texttt{trainIdx} and \texttt{distance} matrices obtained by \hyperref[cppfunc.gpu.BruteForceMatcher.knnMatchSingle]{knnMatch} to CPU vector with \hyperref[cv.class.DMatch]{cv::DMatch}. If \texttt{compactResult} is true \texttt{matches} vector will not contain matches for fully masked out query descriptors.
\cvdefCpp{
static void knnMatchDownload(const GpuMat\& trainIdx, \par const GpuMat\& distance, \par std::vector< std::vector<DMatch> >\& matches, \par bool compactResult = false);
}
\cvfunc{gpu::BruteForceMatcher\_GPU::radiusMatch}\label{cppfunc.gpu.BruteForceMatcher.radiusMatch}
Find the best matches for each query descriptor which have distance less than given threshold. Found matches are returned in distance increasing order. This function is equivalent of \cvCppCross{DescriptorMatcher::radiusMatch}. Works only on device with Compute Capability \texttt{>=} 1.1.
\cvdefCpp{
void radiusMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par std::vector< std::vector<DMatch> >\& matches, \par float maxDistance, \par const GpuMat\& mask = GpuMat(), \par bool compactResult = false);
}
\cvdefCpp{
void radiusMatch(const GpuMat\& queryDescs, \par std::vector< std::vector<DMatch> >\& matches, \par float maxDistance, \par const std::vector<GpuMat>\& masks = std::vector<GpuMat>(), \par bool compactResult = false);
}
\cvfunc{gpu::BruteForceMatcher\_GPU::radiusMatch}\label{cppfunc.gpu.BruteForceMatcher.radiusMatchSingle}
Find the best matches for each query descriptor which have distance less than given threshold. Results stored to GPU memory. Results are not sorted by distance increasing order. Works only on device with Compute Capability \texttt{>=} 1.1.
\cvdefCpp{
void radiusMatch(const GpuMat\& queryDescs, \par const GpuMat\& trainDescs, \par GpuMat\& trainIdx, \par GpuMat\& nMatches, \par GpuMat\& distance, \par float maxDistance, \par const GpuMat\& mask = GpuMat());
}
\begin{description}
\cvarg{queryDescs} {Query set of descriptors.}
\cvarg{trainDescs} {Train set of descriptors. This will not be added to train descriptors collection stored in class object.}
\cvarg{trainIdx} {\texttt{trainIdx.at<int>(queryIdx, i)} will contain i'th train index \newline\texttt{(i < min(nMatches.at<unsigned int>(0, queryIdx), trainIdx.cols)}. If \texttt{trainIdx} is empty, it will be created with size \texttt{nQuery} x \texttt{nTrain}. Or it can be allocated by user (it must have \texttt{nQuery} rows and \texttt{CV\_32SC1} type). Cols can be less than \texttt{nTrain}, but it can be that matcher won't find all matches, because it haven't enough memory to store results.}
\cvarg{nMatches} {\texttt{nMatches.at<unsigned int>(0, queryIdx)} will contain matches count for \texttt{queryIdx}. Carefully, \texttt{nMatches} can be greater than \texttt{trainIdx.cols} - it means that matcher didn't find all matches, because it didn't have enough memory.}
\cvarg{distance} {\texttt{distance.at<int>(queryIdx, i)} will contain i'th distance \newline\texttt{(i < min(nMatches.at<unsigned int>(0, queryIdx), trainIdx.cols)}. If \texttt{trainIdx} is empty, it will be created with size \texttt{nQuery} x \texttt{nTrain}. Otherwise it must be also allocated by user (it must have the same size as \texttt{trainIdx} and \texttt{CV\_32FC1} type).}
\cvarg{maxDistance}{The threshold to found match distances.}
\cvarg{mask}{Mask specifying permissible matches between input query and train matrices of descriptors.}
\end{description}
\cvfunc{gpu::BruteForceMatcher\_GPU::radiusMatchDownload}\label{cppfunc.gpu.BruteForceMatcher.radiusMatchDownload}
Download \texttt{trainIdx}, \texttt{nMatches} and \texttt{distance} matrices obtained by \hyperref[cppfunc.gpu.BruteForceMatcher.radiusMatchSingle]{radiusMatch} to CPU vector with \hyperref[cv.class.DMatch]{cv::DMatch}. If \texttt{compactResult} is true \texttt{matches} vector will not contain matches for fully masked out query descriptors.
\cvdefCpp{
static void radiusMatchDownload(const GpuMat\& trainIdx, \par const GpuMat\& nMatches, \par const GpuMat\& distance, \par std::vector< std::vector<DMatch> >\& matches, \par bool compactResult = false);
}

283
doc/gpu_image_filtering.tex Normal file
View File

@ -0,0 +1,283 @@
\section{Image Filtering}
Functions and classes described in this section are used to perform various linear or non-linear filtering operations on 2D images.
See also: \hyperref[section.cpp.cpu.ImageFiltering]{Image Filtering}
\cvclass{gpu::BaseRowFilter\_GPU}\label{class.gpu.BaseRowFilter}
The base class for linear or non-linear filters that process rows of 2D arrays. Such filters are used for the "horizontal" filtering parts in separable filters.
\begin{lstlisting}
class BaseRowFilter_GPU
{
public:
BaseRowFilter_GPU(int ksize_, int anchor_);
virtual ~BaseRowFilter_GPU() {}
virtual void operator()(const GpuMat& src, GpuMat& dst) = 0;
int ksize, anchor;
};
\end{lstlisting}
\cvclass{gpu::BaseColumnFilter\_GPU}\label{class.gpu.BaseColumnFilter}
The base class for linear or non-linear filters that process columns of 2D arrays. Such filters are used for the "vertical" filtering parts in separable filters.
\begin{lstlisting}
class BaseColumnFilter_GPU
{
public:
BaseColumnFilter_GPU(int ksize_, int anchor_);
virtual ~BaseColumnFilter_GPU() {}
virtual void operator()(const GpuMat& src, GpuMat& dst) = 0;
int ksize, anchor;
};
\end{lstlisting}
\cvclass{gpu::BaseFilter\_GPU}\label{class.gpu.BaseFilter}
The base class for non-separable 2D filters.
\begin{lstlisting}
class CV_EXPORTS BaseFilter_GPU
{
public:
BaseFilter_GPU(const Size& ksize_, const Point& anchor_);
virtual ~BaseFilter_GPU() {}
virtual void operator()(const GpuMat& src, GpuMat& dst) = 0;
Size ksize;
Point anchor;
};
\end{lstlisting}
\cvclass{gpu::FilterEngine\_GPU}\label{class.gpu.FilterEngine}
The base class for Filter Engine.
\begin{lstlisting}
class CV_EXPORTS FilterEngine_GPU
{
public:
virtual ~FilterEngine_GPU() {}
virtual void apply(const GpuMat& src, GpuMat& dst,
Rect roi = Rect(0,0,-1,-1)) = 0;
};
\end{lstlisting}
The class can be used to apply an arbitrary filtering operation to an image. It contains all the necessary intermediate buffers. Pointers to the initialized \texttt{FilterEngine\_GPU} instances are returned by various \texttt{create*Filter\_GPU} functions, see below, and they are used inside high-level functions such as \cvCppCross{gpu::filter2D}, \cvCppCross{gpu::erode}, \cvCppCross{gpu::Sobel} etc.
By using \texttt{FilterEngine\_GPU} instead functions you can avoid unnessesary memory allocation for intermediate buffers and get much better performance:
\begin{lstlisting}
while (...)
{
cv::gpu::GpuMat src = getImg();
cv::gpu::GpuMat dst;
// Allocate and release buffers at each iterations
cv::gpu::GaussianBlur(src, dst, ksize, sigma1);
}
// Allocate buffers only once
cv::Ptr<cv::gpu::FilterEngine_GPU> filter =
cv::gpu::createGaussianFilter_GPU(CV_8UC4, ksize, sigma1);
while (...)
{
cv::gpu::GpuMat src = getImg();
cv::gpu::GpuMat dst;
filter->apply(src, dst, cv::Rect(0, 0, src.cols, src.rows));
}
// Release buffers only once
filter.release();
\end{lstlisting}
\texttt{FilterEngine\_GPU} can process a rectangular sub-region of an image. By default, if \texttt{roi == Rect(0,0,-1,-1)}, \texttt{FilterEngine\_GPU} process inner region of image (\texttt{Rect(anchor.x, anchor.y, src\_size.width - ksize.width, src\_size.height - ksize.height)}), because some filters doesn't check indexies outside the image for better perfomace. Which filters supports processing the whole image and which not and image type limitations see below.
The GPU filters doesn't support the in-place mode.
See also: \hyperref[class.gpu.BaseRowFilter]{BaseRowFilter\_GPU}, \hyperref[class.gpu.BaseColumnFilter]{BaseColumnFilter\_GPU}, \hyperref[class.gpu.BaseFilter]{BaseFilter\_GPU}, \hyperref[cppfunc.gpu.createFilter2D]{createFilter2D\_GPU}, \hyperref[cppfunc.gpu.createSeparableFilter]{createSeparableFilter\_GPU}, \hyperref[cppfunc.gpu.createBoxFilter]{createBoxFilter\_GPU}, \hyperref[cppfunc.gpu.createMorphologyFilter]{createMorphologyFilter\_GPU}, \hyperref[cppfunc.gpu.createLinearFilter]{createLinearFilter\_GPU}, \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}, \hyperref[cppfunc.gpu.createDerivFilter]{createDerivFilter\_GPU}, \hyperref[cppfunc.gpu.createGaussianFilter]{createGaussianFilter\_GPU}
\cvfunc{gpu::createFilter2D\_GPU}\label{cppfunc.gpu.createFilter2D}
Create the non-separable filter engine with the specified filter.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createFilter2D\_GPU(\par const Ptr<BaseFilter\_GPU>\& filter2D, \par int srcType, int dstType);
}
Usually this function is used inside high-level functions, like \hyperref[cppfunc.gpu.createLinearFilter]{createLinearFilter\_GPU}, \hyperref[cppfunc.gpu.createBoxFilter]{createBoxFilter\_GPU}.
\cvfunc{gpu::createSeparableFilter\_GPU}\label{cppfunc.gpu.createSeparableFilter}
Create the separable filter engine with the specified filters.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createSeparableFilter\_GPU(\par const Ptr<BaseRowFilter\_GPU>\& rowFilter, \par const Ptr<BaseColumnFilter\_GPU>\& columnFilter, \par int srcType, int bufType, int dstType);
}
Usually this function is used inside high-level functions, like \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}.
\cvfunc{gpu::getRowSumFilter\_GPU}\label{cppfunc.gpu.getRowSumFilter}
Create horizontal 1D box filter. Supports only \texttt{CV\_8UC1} source type and \texttt{CV\_32FC1} sum type.
\cvdefCpp{
Ptr<BaseRowFilter\_GPU> getRowSumFilter\_GPU(int srcType, int sumType, \par int ksize, int anchor = -1);
}
\cvfunc{gpu::getColumnSumFilter\_GPU}\label{cppfunc.gpu.getColumnSumFilter}
Create vertical 1D box filter. Supports only \texttt{CV\_8UC1} sum type and \texttt{CV\_32FC1} dst type.
\cvdefCpp{
Ptr<BaseColumnFilter\_GPU> getColumnSumFilter\_GPU(int sumType, \par int dstType, int ksize, int anchor = -1);
}
\cvfunc{gpu::createBoxFilter\_GPU}\label{cppfunc.gpu.createBoxFilter}
Create normalized 2D box filter. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type, dst type must be the same as source type. This filter doesn't check indexies outside the image.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createBoxFilter\_GPU(int srcType, int dstType, \par const Size\& ksize, \par const Point\& anchor = Point(-1,-1));
}
\cvdefCpp{
Ptr<BaseFilter\_GPU> getBoxFilter\_GPU(int srcType, int dstType, \par const Size\& ksize, \par Point anchor = Point(-1, -1));
}
\cvCppFunc{gpu::boxFilter}
Smooths the image using the normalized box filter. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type, dst type must be the same as source type.
\cvdefCpp{
void boxFilter(const GpuMat\& src, GpuMat\& dst, int ddepth, Size ksize, \par Point anchor = Point(-1,-1));
}
See \cvCppCross{boxFilter}, \hyperref[cppfunc.gpu.createBoxFilter]{createBoxFilter\_GPU}.
\cvCppFunc{gpu::blur}
A synonym for normalized box filter.
\cvdefCpp{
void blur(const GpuMat\& src, GpuMat\& dst, Size ksize, \par Point anchor = Point(-1,-1));
}
See \cvCppCross{blur}, \cvCppCross{gpu::boxFilter}.
\cvfunc{gpu::createMorphologyFilter\_GPU}\label{cppfunc.gpu.createMorphologyFilter}
Create 2D morphological filter. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type. This filter doesn't check indexies outside the image.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createMorphologyFilter\_GPU(int op, int type, \par const Mat\& kernel, \par const Point\& anchor = Point(-1,-1), \par int iterations = 1);
}
\cvdefCpp{
Ptr<BaseFilter\_GPU> getMorphologyFilter\_GPU(int op, int type, \par const Mat\& kernel, const Size\& ksize, \par Point anchor=Point(-1,-1));
}
\begin{description}
\cvarg{op} {The morphology operation id. Only \texttt{MORPH\_ERODE} and \texttt{MORPH\_DILATE} are supported.}
\cvarg{type}{The input/output image type. Only \texttt{CV\_8UC1} and \texttt{CV\_8UC4} are supported.}
\cvarg{kernel}{The 2D 8-bit structuring element for the morphological operation. It must be continuous matrix.}
\cvarg{size}{The horizontal or vertical structuring element size for separable morphological operations}
\cvarg{anchor}{The anchor position within the structuring element; negative values mean that the anchor is at the center}
\end{description}
See \cvCppCross{createMorphologyFilter}.
\cvCppFunc{gpu::erode}
Erodes an image by using a specific structuring element. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type.
\cvdefCpp{
void erode(const GpuMat\& src, GpuMat\& dst, const Mat\& kernel, \par Point anchor = Point(-1, -1), \par int iterations = 1);
}
See \cvCppCross{erode}, \hyperref[cppfunc.gpu.createMorphologyFilter]{createMorphologyFilter\_GPU}.
\cvCppFunc{gpu::dilate}
Dilates an image by using a specific structuring element. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type.
\cvdefCpp{
void dilate(const GpuMat\& src, GpuMat\& dst, const Mat\& kernel, \par Point anchor = Point(-1, -1), \par int iterations = 1);
}
See \cvCppCross{dilate}, \hyperref[cppfunc.gpu.createMorphologyFilter]{createMorphologyFilter\_GPU}.
\cvCppFunc{gpu::morphologyEx}
Applies an advanced morphological operation to the image. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type.
\cvdefCpp{
void morphologyEx(const GpuMat\& src, GpuMat\& dst, int op, \par const Mat\& kernel, \par Point anchor = Point(-1, -1), \par int iterations = 1);
}
See \cvCppCross{morphologyEx}.
\cvfunc{gpu::createLinearFilter\_GPU}\label{cppfunc.gpu.createLinearFilter}
Create the non-separable linear filter. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type. This filter doesn't check indexies outside the image.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createLinearFilter\_GPU(int srcType, int dstType, \par const Mat\& kernel, \par const Point\& anchor = Point(-1,-1));
}
\cvdefCpp{
Ptr<BaseFilter\_GPU> getLinearFilter\_GPU(int srcType, int dstType, \par const Mat\& kernel, const Size\& ksize, \par Point anchor = Point(-1, -1));
}
\cvCppFunc{gpu::filter2D}
Applies non-separable 2D linear filter to the image. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type.
\cvdefCpp{
void filter2D(const GpuMat\& src, GpuMat\& dst, int ddepth, \par const Mat\& kernel, \par Point anchor=Point(-1,-1));
}
See \cvCppCross{filter2D}, \hyperref[cppfunc.gpu.createLinearFilter]{createLinearFilter\_GPU}.
\cvCppFunc{gpu::Laplacian}
Applies Laplacian operator to the image. Supports \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source type. Supports only \texttt{ksize} = 1 and \texttt{ksize} = 3.
\cvdefCpp{
void Laplacian(const GpuMat\& src, GpuMat\& dst, int ddepth, \par int ksize = 1, double scale = 1);
}
See \cvCppCross{Laplacian}, \cvCppCross{gpu::filter2D}.
\cvfunc{gpu::getLinearRowFilter\_GPU}\label{cppfunc.gpu.getLinearRowFilter}
Create the primitive row filter with the specified kernel.
\cvdefCpp{
Ptr<BaseRowFilter\_GPU> getLinearRowFilter\_GPU(int srcType, \par int bufType, const Mat\& rowKernel, int anchor = -1, \par int borderType = BORDER\_CONSTANT);
}
Supports only \texttt{CV\_8UC1}, \texttt{CV\_8UC4}, \texttt{CV\_16SC1}, \texttt{CV\_16SC2}, \texttt{CV\_32SC1}, \texttt{CV\_32FC1} source type. There are two version of algorithm: NPP and OpenCV. NPP calls when \texttt{srcType == CV\_8UC1} or \texttt{srcType == CV\_8UC4} and \texttt{bufType == srcType}, otherwise calls OpenCV version. NPP supports only \texttt{BORDER\_CONSTANT} border type and doesn't check indexies outside image. OpenCV version supports only \texttt{CV\_32F} as buffer depth and \texttt{BORDER\_REFLECT101}, \texttt{BORDER\_REPLICATE} and \texttt{BORDER\_CONSTANT} border types and checks indexies outside image.
See also: \hyperref[cppfunc.gpu.getLinearColumnFilter]{getLinearColumnFilter\_GPU}, \cvCppCross{createSeparableLinearFilter}.
\cvfunc{gpu::getLinearColumnFilter\_GPU}\label{cppfunc.gpu.getLinearColumnFilter}
Create the primitive column filter with the specified kernel.
\cvdefCpp{
Ptr<BaseColumnFilter\_GPU> getLinearColumnFilter\_GPU(int bufType, \par int dstType, const Mat\& columnKernel, int anchor = -1, \par int borderType = BORDER\_CONSTANT);
}
Supports only \texttt{CV\_8UC1}, \texttt{CV\_8UC4}, \texttt{CV\_16SC1}, \texttt{CV\_16SC2}, \texttt{CV\_32SC1}, \texttt{CV\_32FC1} dst type. There are two version of algorithm: NPP and OpenCV. NPP calls when \texttt{dstType == CV\_8UC1} or \texttt{dstType == CV\_8UC4} and \texttt{bufType == dstType}, otherwise calls OpenCV version. NPP supports only \texttt{BORDER\_CONSTANT} border type and doesn't check indexies outside image. OpenCV version supports only \texttt{CV\_32F} as buffer depth and \texttt{BORDER\_REFLECT101}, \texttt{BORDER\_REPLICATE} and \texttt{BORDER\_CONSTANT} border types and checks indexies outside image.
See also: \hyperref[cppfunc.gpu.getLinearRowFilter]{getLinearRowFilter\_GPU}, \cvCppCross{createSeparableLinearFilter}.
\cvfunc{gpu::createSeparableLinearFilter\_GPU}\label{cppfunc.gpu.createSeparableLinearFilter}
Create the separable linear filter engine.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createSeparableLinearFilter\_GPU(int srcType, \par int dstType, const Mat\& rowKernel, const Mat\& columnKernel, \par const Point\& anchor = Point(-1,-1), \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.getLinearRowFilter]{getLinearRowFilter\_GPU}, \hyperref[cppfunc.gpu.getLinearColumnFilter]{getLinearColumnFilter\_GPU}, \cvCppCross{createSeparableLinearFilter}.
\cvCppFunc{gpu::sepFilter2D}
Applies separable 2D linear filter to the image.
\cvdefCpp{
void sepFilter2D(const GpuMat\& src, GpuMat\& dst, int ddepth, \par const Mat\& kernelX, const Mat\& kernelY, \par Point anchor = Point(-1,-1), \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}, \cvCppCross{sepFilter2D}.
\cvfunc{gpu::createDerivFilter\_GPU}\label{cppfunc.gpu.createDerivFilter}
Create filter engine for the generalized Sobel operator.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createDerivFilter\_GPU(int srcType, int dstType, \par int dx, int dy, int ksize, \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}, \cvCppCross{createDerivFilter}.
\cvCppFunc{gpu::Sobel}
Applies generalized Sobel operator to the image.
\cvdefCpp{
void Sobel(const GpuMat\& src, GpuMat\& dst, int ddepth, int dx, int dy, \par int ksize = 3, double scale = 1, \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}, \cvCppCross{Sobel}.
\cvCppFunc{gpu::Scharr}
Calculates the first x- or y- image derivative using Scharr operator.
\cvdefCpp{
void Scharr(const GpuMat\& src, GpuMat\& dst, int ddepth, \par int dx, int dy, double scale = 1, \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}, \cvCppCross{Scharr}.
\cvfunc{gpu::createGaussianFilter\_GPU}\label{cppfunc.gpu.createGaussianFilter}
Create the Gaussian filter engine.
\cvdefCpp{
Ptr<FilterEngine\_GPU> createGaussianFilter\_GPU(int type, Size ksize, \par double sigma1, double sigma2 = 0, \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.createSeparableLinearFilter]{createSeparableLinearFilter\_GPU}, \cvCppCross{createGaussianFilter}.
\cvCppFunc{gpu::GaussianBlur}
Smooths the image using Gaussian filter.
\cvdefCpp{
void GaussianBlur(const GpuMat\& src, GpuMat\& dst, Size ksize, \par double sigma1, double sigma2 = 0, \par int rowBorderType = BORDER\_DEFAULT, \par int columnBorderType = -1);
}
See \hyperref[cppfunc.gpu.createGaussianFilter]{createGaussianFilter\_GPU}, \cvCppCross{GaussianBlur}.
\cvfunc{gpu::getMaxFilter\_GPU}\label{cppfunc.gpu.getMaxFilter}
Create maximum filter. Supports only \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source and dst type.
\cvdefCpp{
Ptr<BaseFilter\_GPU> getMaxFilter\_GPU(int srcType, int dstType, \par const Size\& ksize, Point anchor = Point(-1,-1));
}
\cvfunc{gpu::getMinFilter\_GPU}\label{cppfunc.gpu.getMinFilter}
Create minimum filter. Supports only \texttt{CV\_8UC1} and \texttt{CV\_8UC4} source and dst type.
\cvdefCpp{
Ptr<BaseFilter\_GPU> getMinFilter\_GPU(int srcType, int dstType, \par const Size\& ksize, Point anchor = Point(-1,-1));
}

View File

@ -1,4 +1,4 @@
\section{Image Filtering}
\section{Image Filtering}\ifCpp\label{section.cpp.cpu.ImageFiltering}\fi
Functions and classes described in this section are used to perform various linear or non-linear filtering operations on 2D images (represented as \cvCppCross{Mat}'s), that is, for each pixel location $(x,y)$ in the source image some its (normally rectangular) neighborhood is considered and used to compute the response. In case of a linear filter it is a weighted sum of pixel values, in case of morphological operations it is the minimum or maximum etc. The computed response is stored to the destination image at the same location $(x,y)$. It means, that the output image will be of the same size as the input image. Normally, the functions supports multi-channel arrays, in which case every channel is processed independently, therefore the output image will also have the same number of channels as the input one.

File diff suppressed because it is too large Load Diff

View File

@ -63,6 +63,8 @@
\chapter{gpu. GPU-based Functionality}
\renewcommand{\curModule}{gpu}
\input{gpu}
\input{gpu_features2d}
\input{gpu_image_filtering}
\fi

View File

@ -804,7 +804,7 @@ namespace cv
};
//! returns the non-separable filter engine with the specified filter
CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU> filter2D, int srcType, int dstType);
CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU>& filter2D, int srcType, int dstType);
//! returns the separable filter engine with the specified filters
CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU>& rowFilter,

View File

@ -48,7 +48,7 @@ using namespace cv::gpu;
#if !defined (HAVE_CUDA)
Ptr<FilterEngine_GPU> cv::gpu::createFilter2D_GPU(const Ptr<BaseFilter_GPU>, int, int) { throw_nogpu(); return Ptr<FilterEngine_GPU>(0); }
Ptr<FilterEngine_GPU> cv::gpu::createFilter2D_GPU(const Ptr<BaseFilter_GPU>&, int, int) { throw_nogpu(); return Ptr<FilterEngine_GPU>(0); }
Ptr<FilterEngine_GPU> cv::gpu::createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU>&, const Ptr<BaseColumnFilter_GPU>&, int, int, int) { throw_nogpu(); return Ptr<FilterEngine_GPU>(0); }
Ptr<BaseRowFilter_GPU> cv::gpu::getRowSumFilter_GPU(int, int, int, int) { throw_nogpu(); return Ptr<BaseRowFilter_GPU>(0); }
Ptr<BaseColumnFilter_GPU> cv::gpu::getColumnSumFilter_GPU(int, int, int, int) { throw_nogpu(); return Ptr<BaseColumnFilter_GPU>(0); }
@ -159,7 +159,7 @@ namespace
};
}
Ptr<FilterEngine_GPU> cv::gpu::createFilter2D_GPU(const Ptr<BaseFilter_GPU> filter2D, int srcType, int dstType)
Ptr<FilterEngine_GPU> cv::gpu::createFilter2D_GPU(const Ptr<BaseFilter_GPU>& filter2D, int srcType, int dstType)
{
return Ptr<FilterEngine_GPU>(new Filter2DEngine_GPU(filter2D, srcType, dstType));
}