Doxygen documentation: flann, photo and stitching modules

This commit is contained in:
Maksim Shabunin 2014-11-19 19:08:03 +03:00
parent 8e9ea0e3d1
commit 472c210687
19 changed files with 1147 additions and 61 deletions

View File

@ -241,7 +241,8 @@ PREDEFINED = __cplusplus=1 \
CV_INLINE= \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1
CV_NEON=1 \
FLANN_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =

View File

@ -47,6 +47,15 @@
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/flann/flann_base.hpp"
/**
@defgroup flann Clustering and Search in Multi-Dimensional Spaces
This section documents OpenCV's interface to the FLANN library. FLANN (Fast Library for Approximate
Nearest Neighbors) is a library that contains a collection of algorithms optimized for fast nearest
neighbor search in large datasets and for high dimensional features. More information about FLANN
can be found in @cite Muja2009 .
*/
namespace cvflann
{
CV_EXPORTS flann_distance_t flann_distance_type();
@ -59,6 +68,10 @@ namespace cv
namespace flann
{
//! @addtogroup flann
//! @{
template <typename T> struct CvType {};
template <> struct CvType<unsigned char> { static int type() { return CV_8U; } };
template <> struct CvType<char> { static int type() { return CV_8S; } };
@ -88,7 +101,9 @@ using ::cvflann::ChiSquareDistance;
using ::cvflann::KL_Divergence;
/** @brief The FLANN nearest neighbor index class. This class is templated with the type of elements for which
the index is built.
*/
template <typename Distance>
class GenericIndex
{
@ -96,10 +111,108 @@ public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
/** @brief Constructs a nearest neighbor search index for a given dataset.
@param features Matrix of containing the features(points) to index. The size of the matrix is
num\_features x feature\_dimensionality and the data type of the elements in the matrix must
coincide with the type of the index.
@param params Structure containing the index parameters. The type of index that will be
constructed depends on the type of this parameter. See the description.
@param distance
The method constructs a fast search structure from a set of features using the specified algorithm
with specified parameters, as defined by params. params is a reference to one of the following class
IndexParams descendants:
- **LinearIndexParams** When passing an object of this type, the index will perform a linear,
brute-force search. :
@code
struct LinearIndexParams : public IndexParams
{
};
@endcode
- **KDTreeIndexParams** When passing an object of this type the index constructed will consist of
a set of randomized kd-trees which will be searched in parallel. :
@code
struct KDTreeIndexParams : public IndexParams
{
KDTreeIndexParams( int trees = 4 );
};
@endcode
- **KMeansIndexParams** When passing an object of this type the index constructed will be a
hierarchical k-means tree. :
@code
struct KMeansIndexParams : public IndexParams
{
KMeansIndexParams(
int branching = 32,
int iterations = 11,
flann_centers_init_t centers_init = CENTERS_RANDOM,
float cb_index = 0.2 );
};
@endcode
- **CompositeIndexParams** When using a parameters object of this type the index created
combines the randomized kd-trees and the hierarchical k-means tree. :
@code
struct CompositeIndexParams : public IndexParams
{
CompositeIndexParams(
int trees = 4,
int branching = 32,
int iterations = 11,
flann_centers_init_t centers_init = CENTERS_RANDOM,
float cb_index = 0.2 );
};
@endcode
- **LshIndexParams** When using a parameters object of this type the index created uses
multi-probe LSH (by Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search
by Qin Lv, William Josephson, Zhe Wang, Moses Charikar, Kai Li., Proceedings of the 33rd
International Conference on Very Large Data Bases (VLDB). Vienna, Austria. September 2007) :
@code
struct LshIndexParams : public IndexParams
{
LshIndexParams(
unsigned int table_number,
unsigned int key_size,
unsigned int multi_probe_level );
};
@endcode
- **AutotunedIndexParams** When passing an object of this type the index created is
automatically tuned to offer the best performance, by choosing the optimal index type
(randomized kd-trees, hierarchical kmeans, linear) and parameters for the dataset provided. :
@code
struct AutotunedIndexParams : public IndexParams
{
AutotunedIndexParams(
float target_precision = 0.9,
float build_weight = 0.01,
float memory_weight = 0,
float sample_fraction = 0.1 );
};
@endcode
- **SavedIndexParams** This object type is used for loading a previously saved index from the
disk. :
@code
struct SavedIndexParams : public IndexParams
{
SavedIndexParams( String filename );
};
@endcode
*/
GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance());
~GenericIndex();
/** @brief Performs a K-nearest neighbor search for a given query point using the index.
@param query The query point
@param indices Vector that will contain the indices of the K-nearest neighbors found. It must have
at least knn size.
@param dists Vector that will contain the distances to the K-nearest neighbors found. It must have
at least knn size.
@param knn Number of nearest neighbors to search for.
@param params SearchParams
*/
void knnSearch(const std::vector<ElementType>& query, std::vector<int>& indices,
std::vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);
void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);
@ -123,6 +236,7 @@ private:
::cvflann::Index<Distance>* nnIndex;
};
//! @cond IGNORED
#define FLANN_DISTANCE_CHECK \
if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \
@ -218,6 +332,8 @@ int GenericIndex<Distance>::radiusSearch(const Mat& query, Mat& indices, Mat& di
return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
}
//! @endcond
/**
* @deprecated Use GenericIndex class instead
*/
@ -283,6 +399,8 @@ template <typename T>
class FLANN_DEPRECATED Index_;
#endif
//! @cond IGNORED
template <typename T>
Index_<T>::Index_(const Mat& dataset, const ::cvflann::IndexParams& params)
{
@ -377,7 +495,25 @@ int Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, Distance
if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
}
//! @endcond
/** @brief Clusters features using hierarchical k-means algorithm.
@param features The points to be clustered. The matrix must have elements of type
Distance::ElementType.
@param centers The centers of the clusters obtained. The matrix must have type
Distance::ResultType. The number of rows in this matrix represents the number of clusters desired,
however, because of the way the cut in the hierarchical tree is chosen, the number of clusters
computed will be the highest number of the form (branching-1)\*k+1 that's lower than the number of
clusters desired, where branching is the tree's branching factor (see description of the
KMeansIndexParams).
@param params Parameters used in the construction of the hierarchical k-means tree.
@param d Distance to be used for clustering.
The method clusters the given feature vectors by constructing a hierarchical k-means tree and
choosing a cut in the tree that minimizes the cluster's variance. It returns the number of clusters
found.
*/
template <typename Distance>
int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params,
Distance d = Distance())
@ -396,7 +532,8 @@ int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::K
return ::cvflann::hierarchicalClustering<Distance>(m_features, m_centers, params, d);
}
/** @deprecated
*/
template <typename ELEM_TYPE, typename DIST_TYPE>
FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params)
{
@ -417,6 +554,8 @@ FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, c
}
}
//! @} flann
} } // namespace cv::flann
#endif

View File

@ -46,12 +46,27 @@
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
/*! \namespace cv
Namespace where all the C++ OpenCV functionality resides
*/
/**
@defgroup photo Computational Photography
@{
@defgroup photo_denoise Denoising
@defgroup photo_hdr HDR imaging
This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
camera calibration with multiple exposures and exposure fusion.
@defgroup photo_clone Seamless Cloning
@defgroup photo_render Non-Photorealistic Rendering
@defgroup photo_c C API
@}
*/
namespace cv
{
//! @addtogroup photo
//! @{
//! the inpainting algorithm
enum
{
@ -72,44 +87,213 @@ enum
NORMCONV_FILTER = 2
};
//! restores the damaged image areas using one of the available intpainting algorithms
/** @brief Restores the selected region in an image using the region neighborhood.
@param src Input 8-bit 1-channel or 3-channel image.
@param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
needs to be inpainted.
@param dst Output image with the same size and type as src .
@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
by the algorithm.
@param flags Inpainting method that could be one of the following:
- **INPAINT\_NS** Navier-Stokes based method [Navier01]
- **INPAINT\_TELEA** Method by Alexandru Telea @cite Telea04.
The function reconstructs the selected image area from the pixel near the area boundary. The
function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details.
@note
- An example using the inpainting technique can be found at
opencv\_source\_code/samples/cpp/inpaint.cpp
- (Python) An example using the inpainting technique can be found at
opencv\_source\_code/samples/python2/inpaint.py
*/
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
OutputArray dst, double inpaintRadius, int flags );
//! @addtogroup photo_denoise
//! @{
/** @brief Perform image denoising using Non-local Means Denoising algorithm
<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
optimizations. Noise expected to be a gaussian white noise
@param src Input 8-bit 1-channel, 2-channel or 3-channel image.
@param dst Output image with the same size and type as src .
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise
This function expected to be applied to grayscale images. For colored images look at
fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
image to CIELAB colorspace and then separately denoise L and AB components with different h
parameter.
*/
CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Modification of fastNlMeansDenoising function for colored images
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src .
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
removes noise but also removes image details, smaller h value preserves details but also preserves
some noise
@param hColor The same as h but for color components. For most images value equals 10
will be enought to remove colored noise and do not distort colors
The function converts image to CIELAB colorspace and then separately denoise L and AB components
with given h parameters using fastNlMeansDenoising function.
*/
CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
float h = 3, float hColor = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been
captured in small period of time. For example video. This version of the function is for grayscale
images or for manual manipulation with colorspaces. For more details see
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
@param srcImgs Input 8-bit 1-channel, 2-channel or 3-channel images sequence. All images should
have the same type and size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
srcImgs[imgToDenoiseIndex] image.
@param dst Output image with the same size and type as srcImgs images.
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
removes noise but also removes image details, smaller h value preserves details but also preserves
some noise
*/
CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences
@param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
size.
@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
@param temporalWindowSize Number of surrounding images to use for target image denoising. Should
be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
srcImgs[imgToDenoiseIndex] image.
@param dst Output image with the same size and type as srcImgs images.
@param templateWindowSize Size in pixels of the template patch that is used to compute weights.
Should be odd. Recommended value 7 pixels
@param searchWindowSize Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
denoising time. Recommended value 21 pixels
@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
removes noise but also removes image details, smaller h value preserves details but also preserves
some noise.
@param hColor The same as h but for color components.
The function converts images to CIELAB colorspace and then separately denoise L and AB components
with given h parameters using fastNlMeansDenoisingMulti function.
*/
CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,
int imgToDenoiseIndex, int temporalWindowSize,
float h = 3, float hColor = 3,
int templateWindowSize = 7, int searchWindowSize = 21);
/** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
finding a function to minimize some functional). As the image denoising, in particular, may be seen
as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
exactly what is implemented.
It should be noted, that this implementation was taken from the July 2013 blog entry
@cite Mordvintsev, which also contained (slightly more general) ready-to-use source code on Python.
Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
of July 2013 and finally it was slightly adapted by later authors.
Although the thorough discussion and justification of the algorithm involved may be found in
@cite ChambolleEtAl, it might make sense to skim over it here, following @cite Mordvintsev. To begin
with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
pixels (it may be seen as set
\f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some
\f$m,\;n\in\mathbb{N}\f$) into \f$\{0,1,\dots,255\}\f$. We shall denote the noised images as \f$f_i\f$ and with
this view, given some image \f$x\f$ of the same size, we may measure how bad it is by the formula
\f[\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\f]
\f$\|\|\cdot\|\|\f$ here denotes \f$L_2\f$-norm and as you see, the first addend states that we want our
image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
we want our result to be close to the observations we've got. If we treat \f$x\f$ as a function, this is
exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
@param observations This array should contain one or more noised versions of the image that is to
be restored.
@param result Here the denoised image will be stored. There is no need to do pre-allocation of
storage space, as it will be automatically allocated, if necessary.
@param lambda Corresponds to \f$\lambda\f$ in the formulas above. As it is enlarged, the smooth
(blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
removed.
@param niters Number of iterations that the algorithm will run. Of course, as more iterations as
better, but it is hard to quantitatively refine this statement, so just use the default and
increase it if the results are poor.
*/
CV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30);
//! @} photo_denoise
//! @addtogroup photo_hdr
//! @{
enum { LDR_SIZE = 256 };
/** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range.
*/
class CV_EXPORTS_W Tonemap : public Algorithm
{
public:
/** @brief Tonemaps image
@param src source image - 32-bit 3-channel Mat
@param dst destination image - 32-bit 3-channel Mat with values in [0, 1] range
*/
CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;
CV_WRAP virtual float getGamma() const = 0;
CV_WRAP virtual void setGamma(float gamma) = 0;
};
/** @brief Creates simple linear mapper with gamma correction
@param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma
equal to 2.2f is suitable for most displays.
Generally gamma \> 1 brightens the image and gamma \< 1 darkens it.
*/
CV_EXPORTS_W Ptr<Tonemap> createTonemap(float gamma = 1.0f);
// "Adaptive Logarithmic Mapping For Displaying HighContrast Scenes", Drago et al., 2003
/** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in
logarithmic domain.
Since it's a global operator the same function is applied to all the pixels, it is controlled by the
bias parameter.
Optional saturation enhancement is possible as described in @cite FL02.
For more information see @cite DM03.
*/
class CV_EXPORTS_W TonemapDrago : public Tonemap
{
public:
@ -121,10 +305,25 @@ public:
CV_WRAP virtual void setBias(float bias) = 0;
};
/** @brief Creates TonemapDrago object
@param gamma gamma value for gamma correction. See createTonemap
@param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater
than 1 increase saturation and values less than 1 decrease it.
@param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best
results, default value is 0.85.
*/
CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f);
// "Fast Bilateral Filtering for the Display of High-Dynamic-Range Images", Durand, Dorsey, 2002
/** @brief This algorithm decomposes image into two layers: base layer and detail layer using bilateral filter
and compresses contrast of the base layer thus preserving all the details.
This implementation uses regular bilateral filter from opencv.
Saturation enhancement is possible as in ocvTonemapDrago.
For more information see @cite DD02.
*/
class CV_EXPORTS_W TonemapDurand : public Tonemap
{
public:
@ -142,11 +341,25 @@ public:
CV_WRAP virtual void setSigmaColor(float sigma_color) = 0;
};
/** @brief Creates TonemapDurand object
@param gamma gamma value for gamma correction. See createTonemap
@param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
are maximum and minimum luminance values of the resulting image.
@param saturation saturation enhancement value. See createTonemapDrago
@param sigma\_space bilateral filter sigma in color space
@param sigma\_color bilateral filter sigma in coordinate space
*/
CV_EXPORTS_W Ptr<TonemapDurand>
createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f);
// "Dynamic Range Reduction Inspired by Photoreceptor Physiology", Reinhard, Devlin, 2005
/** @brief This is a global tonemapping operator that models human visual system.
Mapping function is controlled by adaptation parameter, that is computed using light adaptation and
color adaptation.
For more information see @cite RD05.
*/
class CV_EXPORTS_W TonemapReinhard : public Tonemap
{
public:
@ -160,11 +373,24 @@ public:
CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
};
/** @brief Creates TonemapReinhard object
@param gamma gamma value for gamma correction. See createTonemap
@param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
@param light\_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
value, if 0 it's global, otherwise it's a weighted mean of this two cases.
@param color\_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
if 0 adaptation level is the same for each channel.
*/
CV_EXPORTS_W Ptr<TonemapReinhard>
createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f);
// "Perceptual Framework for Contrast Processing of High Dynamic Range Images", Mantiuk et al., 2006
/** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid,
transforms contrast values to HVS response and scales the response. After this the image is
reconstructed from new contrast values.
For more information see @cite MM06.
*/
class CV_EXPORTS_W TonemapMantiuk : public Tonemap
{
public:
@ -175,28 +401,75 @@ public:
CV_WRAP virtual void setSaturation(float saturation) = 0;
};
/** @brief Creates TonemapMantiuk object
@param gamma gamma value for gamma correction. See createTonemap
@param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing
dynamic range. Values from 0.6 to 0.9 produce best results.
@param saturation saturation enhancement value. See createTonemapDrago
*/
CV_EXPORTS_W Ptr<TonemapMantiuk>
createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f);
/** @brief The base class for algorithms that align images of the same scene with different exposures
*/
class CV_EXPORTS_W AlignExposures : public Algorithm
{
public:
/** @brief Aligns images
@param src vector of input images
@param dst vector of aligned images
@param times vector of exposure time values for each image
@param response 256x1 matrix with inverse camera response function for each pixel value, it should
have the same number of channels as images.
*/
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
InputArray times, InputArray response) = 0;
};
// "Fast, Robust Image Registration for Compositing High Dynamic Range Photographs from Handheld Exposures", Ward, 2003
/** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median
luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
It is invariant to exposure, so exposure values and camera response are not necessary.
In this implementation new image regions are filled with zeros.
For more information see @cite GW03.
*/
class CV_EXPORTS_W AlignMTB : public AlignExposures
{
public:
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
InputArray times, InputArray response) = 0;
/** @brief Short version of process, that doesn't take extra arguments.
@param src vector of input images
@param dst vector of aligned images
*/
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;
/** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the
first.
@param img0 first image
@param img1 second image
*/
CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0;
/** @brief Helper function, that shift Mat filling new regions with zeros.
@param src input image
@param dst result image
@param shift shift value
*/
CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;
/** @brief Computes median threshold and exclude bitmaps of given image.
@param img input image
@param tb median threshold bitmap
@param eb exclude bitmap
*/
CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0;
CV_WRAP virtual int getMaxBits() const = 0;
@ -209,16 +482,36 @@ public:
CV_WRAP virtual void setCut(bool value) = 0;
};
/** @brief Creates AlignMTB object
@param max\_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
usually good enough (31 and 63 pixels shift respectively).
@param exclude\_range range for exclusion bitmap that is constructed to suppress noise around the
median value.
@param cut if true cuts images, otherwise fills the new regions with zeros.
*/
CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);
/** @brief The base class for camera response calibration algorithms.
*/
class CV_EXPORTS_W CalibrateCRF : public Algorithm
{
public:
/** @brief Recovers inverse camera response.
@param src vector of input images
@param dst 256x1 matrix with inverse camera response function
@param times vector of exposure time values for each image
*/
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
};
// "Recovering High Dynamic Range Radiance Maps from Photographs", Debevec, Malik, 1997
/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
function as linear system. Objective function is constructed using pixel values on the same position
in all images, extra term is added to make the result smoother.
For more information see @cite DM97.
*/
class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF
{
public:
@ -232,10 +525,21 @@ public:
CV_WRAP virtual void setRandom(bool random) = 0;
};
/** @brief Creates CalibrateDebevec object
@param samples number of pixel locations to use
@param lambda smoothness term weight. Greater values produce smoother results, but can alter the
response.
@param random if true sample pixel locations are chosen at random, otherwise the form a
rectangular grid.
*/
CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false);
// "Dynamic range improvement through multiple exposures", Robertson et al., 1999
/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
function as linear system. This algorithm uses all image pixels.
For more information see @cite RB99.
*/
class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF
{
public:
@ -248,17 +552,35 @@ public:
CV_WRAP virtual Mat getRadiance() const = 0;
};
/** @brief Creates CalibrateRobertson object
@param max\_iter maximal number of Gauss-Seidel solver iterations.
@param threshold target difference between results of two successive steps of the minimization.
*/
CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
/** @brief The base class algorithms that can merge exposure sequence to a single image.
*/
class CV_EXPORTS_W MergeExposures : public Algorithm
{
public:
/** @brief Merges images.
@param src vector of input images
@param dst result image
@param times vector of exposure time values for each image
@param response 256x1 matrix with inverse camera response function for each pixel value, it should
have the same number of channels as images.
*/
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
InputArray times, InputArray response) = 0;
};
// "Recovering High Dynamic Range Radiance Maps from Photographs", Debevec, Malik, 1997
/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
values and camera response.
For more information see @cite DM97.
*/
class CV_EXPORTS_W MergeDebevec : public MergeExposures
{
public:
@ -267,15 +589,31 @@ public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
};
/** @brief Creates MergeDebevec object
*/
CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec();
// "Exposure Fusion", Mertens et al., 2007
/** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are
combined using laplacian pyramids.
The resulting image weight is constructed as weighted average of contrast, saturation and
well-exposedness measures.
The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying
by 255, but it's recommended to apply gamma correction and/or linear tonemapping.
For more information see @cite MK07.
*/
class CV_EXPORTS_W MergeMertens : public MergeExposures
{
public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
InputArray times, InputArray response) = 0;
/** @brief Short version of process, that doesn't take extra arguments.
@param src vector of input images
@param dst result image
*/
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0;
CV_WRAP virtual float getContrastWeight() const = 0;
@ -288,11 +626,20 @@ public:
CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0;
};
/** @brief Creates MergeMertens object
@param contrast\_weight contrast measure weight. See MergeMertens.
@param saturation\_weight saturation measure weight
@param exposure\_weight well-exposedness measure weight
*/
CV_EXPORTS_W Ptr<MergeMertens>
createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);
// "Dynamic range improvement through multiple exposures", Robertson et al., 1999
/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
values and camera response.
For more information see @cite RB99.
*/
class CV_EXPORTS_W MergeRobertson : public MergeExposures
{
public:
@ -301,35 +648,158 @@ public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
};
/** @brief Creates MergeRobertson object
*/
CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
//! @} photo_hdr
/** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
black-and-white photograph rendering, and in many single channel image processing applications
@cite CL12.
@param src Input 8-bit 3-channel image.
@param grayscale Output 8-bit 1-channel image.
@param color\_boost Output 8-bit 3-channel image.
This function is to be applied on color images.
*/
CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);
//! @addtogroup photo_clone
//! @{
/** @brief Image editing tasks concern either global changes (color/intensity corrections, filters,
deformations) or local changes concerned to a selection. Here we are interested in achieving local
changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless
manner. The extent of the changes ranges from slight distortions to complete replacement by novel
content @cite PM03.
@param src Input 8-bit 3-channel image.
@param dst Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param p Point in dst image where object is placed.
@param blend Output image with the same size and type as dst.
@param flags Cloning method that could be one of the following:
- **NORMAL\_CLONE** The power of the method is fully expressed when inserting objects with
complex outlines into a new background
- **MIXED\_CLONE** The classic method, color-based selection and alpha masking might be time
consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the
original image, is not effective. Mixed seamless cloning based on a loose selection proves
effective.
- **FEATURE\_EXCHANGE** Feature exchange allows the user to easily replace certain features of
one object by alternative features.
*/
CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
OutputArray blend, int flags);
/** @brief Given an original color image, two differently colored versions of this image can be mixed
seamlessly.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src .
@param red\_mul R-channel multiply factor.
@param green\_mul G-channel multiply factor.
@param blue\_mul B-channel multiply factor.
Multiplication factor is between .5 to 2.5.
*/
CV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f,
float green_mul = 1.0f, float blue_mul = 1.0f);
/** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and
then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src.
@param alpha Value ranges between 0-2.
@param beta Value ranges between 0-2.
This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
*/
CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst,
float alpha = 0.2f, float beta = 0.4f);
/** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge
Detector is used.
@param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src.
@param low\_threshold Range from 0 to 100.
@param high\_threshold Value \> 100.
@param kernel\_size The size of the Sobel kernel to be used.
**NOTE:**
The algorithm assumes that the color of the source image is close to that of the destination. This
assumption means that when the colors don't match, the source image color gets tinted toward the
color of the destination image.
*/
CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst,
float low_threshold = 30, float high_threshold = 45,
int kernel_size = 3);
//! @} photo_clone
//! @addtogroup photo_render
//! @{
/** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
filters are used in many different applications @cite EM11.
@param src Input 8-bit 3-channel image.
@param dst Output 8-bit 3-channel image.
@param flags Edge preserving filters:
- **RECURS\_FILTER** = 1
- **NORMCONV\_FILTER** = 2
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
*/
CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
float sigma_s = 60, float sigma_r = 0.4f);
/** @brief This filter enhances the details of a particular image.
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
*/
CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
float sigma_r = 0.15f);
/** @brief Pencil-like non-photorealistic line drawing
@param src Input 8-bit 3-channel image.
@param dst1 Output 8-bit 1-channel image.
@param dst2 Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
@param shade\_factor Range between 0 to 0.1.
*/
CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
/** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on
photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
contrast while preserving, or enhancing, high-contrast features.
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src.
@param sigma\_s Range between 0 to 200.
@param sigma\_r Range between 0 to 1.
*/
CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
float sigma_r = 0.45f);
//! @} photo_render
//! @} photo
} // cv
#endif

View File

@ -47,18 +47,75 @@
namespace cv { namespace cuda {
//! Brute force non-local means algorith (slow but universal)
//! @addtogroup photo_denoise
//! @{
/** @brief Performs pure non local means denoising without any simplification, and thus it is not fast.
@param src Source image. Supports only CV\_8UC1, CV\_8UC2 and CV\_8UC3.
@param dst Destination image.
@param h Filter sigma regulating filter strength for color.
@param search\_window Size of search window.
@param block\_size Size of block used for computing weights.
@param borderMode Border type. See borderInterpolate for details. BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param s Stream for the asynchronous version.
@sa
fastNlMeansDenoising
*/
CV_EXPORTS void nonLocalMeans(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream& s = Stream::Null());
//! Fast (but approximate)version of non-local means algorith similar to CPU function (running sums technique)
/** @brief The class implements fast approximate Non Local Means Denoising algorithm.
*/
class CV_EXPORTS FastNonLocalMeansDenoising
{
public:
//! Simple method, recommended for grayscale images (though it supports multichannel images)
/** @brief Perform image denoising using Non-local Means Denoising algorithm
<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising> with several computational
optimizations. Noise expected to be a gaussian white noise
@param src Input 8-bit 1-channel, 2-channel or 3-channel image.
@param dst Output image with the same size and type as src .
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
This function expected to be applied to grayscale images. For colored images look at
FastNonLocalMeansDenoising::labMethod.
@sa
fastNlMeansDenoising
*/
void simpleMethod(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream& s = Stream::Null());
//! Processes luminance and color components separatelly
void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window = 21, int block_size = 7, Stream& s = Stream::Null());
/** @brief Modification of FastNonLocalMeansDenoising::simpleMethod for color images
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src .
@param h\_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
also removes image details, smaller h value preserves details but also preserves some noise
@param photo_render float The same as h but for color components. For most images value equals 10 will be
enought to remove colored noise and do not distort colors
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
The function converts image to CIELAB colorspace and then separately denoise L and AB components
with given h parameters using FastNonLocalMeansDenoising::simpleMethod function.
@sa
fastNlMeansDenoisingColored
*/
void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float photo_render, int search_window = 21, int block_size = 7, Stream& s = Stream::Null());
private:
@ -66,6 +123,8 @@ private:
GpuMat lab, l, ab;
};
//! @} photo
}} // namespace cv { namespace cuda {
#endif /* __OPENCV_PHOTO_CUDA_HPP__ */

View File

@ -49,6 +49,10 @@
extern "C" {
#endif
/** @addtogroup photo_c
@{
*/
/* Inpainting algorithms */
enum
{
@ -61,6 +65,7 @@ enum
CVAPI(void) cvInpaint( const CvArr* src, const CvArr* inpaint_mask,
CvArr* dst, double inpaintRange, int flags );
/** @} */
#ifdef __cplusplus
} //extern "C"

View File

@ -53,8 +53,46 @@
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
/**
@defgroup stitching Images stitching
This figure illustrates the stitching module pipeline implemented in the Stitcher class. Using that
class it's possible to configure/remove some steps, i.e. adjust the stitching pipeline according to
the particular needs. All building blocks from the pipeline are available in the detail namespace,
one can combine and use them separately.
The implemented stitching pipeline is very similar to the one proposed in @cite BL07.
![image](StitchingPipeline.jpg)
@{
@defgroup stitching_match Features Finding and Images Matching
@defgroup stitching_rotation Rotation Estimation
@defgroup stitching_autocalib Autocalibration
@defgroup stitching_warp Images Warping
@defgroup stitching_seam Seam Estimation
@defgroup stitching_exposure Exposure Compensation
@defgroup stitching_blend Image Blenders
@}
*/
namespace cv {
//! @addtogroup stitching
//! @{
/** @brief High level image stitcher.
It's possible to use this class without being aware of the entire stitching pipeline. However, to
be able to achieve higher stitching stability and quality of the final images at least being
familiar with the theory is recommended.
@note
- A basic example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching.cpp
- A detailed example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching\_detailed.cpp
*/
class CV_EXPORTS_W Stitcher
{
public:
@ -68,7 +106,11 @@ public:
};
// Stitcher() {}
// Creates stitcher with default parameters
/** @brief Creates a stitcher with the default parameters.
@param try\_use\_gpu Flag indicating whether GPU should be used whenever it's possible.
@return Stitcher class instance.
*/
static Stitcher createDefault(bool try_use_gpu = false);
CV_WRAP double registrationResol() const { return registr_resol_; }
@ -128,13 +170,43 @@ public:
const Ptr<detail::Blender> blender() const { return blender_; }
void setBlender(Ptr<detail::Blender> b) { blender_ = b; }
/** @overload */
CV_WRAP Status estimateTransform(InputArrayOfArrays images);
/** @brief These functions try to match the given images and to estimate rotations of each camera.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param rois Region of interest rectangles.
@return Status code.
*/
Status estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois);
/** @overload */
CV_WRAP Status composePanorama(OutputArray pano);
/** @brief These functions try to compose the given images (or images stored internally from the other function
calls) into the final pano under the assumption that the image transformations were estimated
before.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param pano Final pano.
@return Status code.
*/
Status composePanorama(InputArrayOfArrays images, OutputArray pano);
/** @overload */
CV_WRAP Status stitch(InputArrayOfArrays images, OutputArray pano);
/** @brief These functions try to stitch the given images.
@param images Input images.
@param rois Region of interest rectangles.
@param pano Final pano.
@return Status code.
*/
Status stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano);
std::vector<int> component() const { return indices_; }
@ -178,6 +250,8 @@ private:
CV_EXPORTS_W Ptr<Stitcher> createStitcher(bool try_use_gpu = false);
//! @} stitching
} // namespace cv
#endif // __OPENCV_STITCHING_STITCHER_HPP__

View File

@ -49,16 +49,37 @@
namespace cv {
namespace detail {
// See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
// by Heung-Yeung Shum and Richard Szeliski.
//! @addtogroup stitching_autocalib
//! @{
/** @brief Tries to estimate focal lengths from the given homography under the assumption that the camera
undergoes rotations around its centre only.
@param H Homography.
@param f0 Estimated focal length along X axis.
@param f1 Estimated focal length along Y axis.
@param f0\_ok True, if f0 was estimated successfully, false otherwise.
@param f1\_ok True, if f1 was estimated successfully, false otherwise.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
*/
void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
/** @brief Estimates focal lengths for each given camera.
@param features Features of images.
@param pairwise\_matches Matches between all image pairs.
@param focals Estimated focal lengths for each camera.
*/
void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<double> &focals);
bool CV_EXPORTS calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K);
//! @} stitching_autocalib
} // namespace detail
} // namespace cv

View File

@ -48,8 +48,13 @@
namespace cv {
namespace detail {
//! @addtogroup stitching_blend
//! @{
// Simple blender which puts one image over another
/** @brief Base class for all blenders.
Simple blender which puts one image over another
*/
class CV_EXPORTS Blender
{
public:
@ -58,9 +63,26 @@ public:
enum { NO, FEATHER, MULTI_BAND };
static Ptr<Blender> createDefault(int type, bool try_gpu = false);
/** @brief Prepares the blender for blending.
@param corners Source images top-left corners
@param sizes Source image sizes
*/
void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
/** @overload */
virtual void prepare(Rect dst_roi);
/** @brief Processes the image.
@param img Source image
@param mask Source image mask
@param tl Source image top-left corners
*/
virtual void feed(InputArray img, InputArray mask, Point tl);
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst\_mask Final pano mask
*/
virtual void blend(InputOutputArray dst, InputOutputArray dst_mask);
protected:
@ -68,7 +90,8 @@ protected:
Rect dst_roi_;
};
/** @brief Simple blender which mixes images at its borders.
*/
class CV_EXPORTS FeatherBlender : public Blender
{
public:
@ -81,8 +104,8 @@ public:
void feed(InputArray img, InputArray mask, Point tl);
void blend(InputOutputArray dst, InputOutputArray dst_mask);
// Creates weight maps for fixed set of source images by their masks and top-left corners.
// Final image can be obtained by simple weighting of the source images.
//! Creates weight maps for fixed set of source images by their masks and top-left corners.
//! Final image can be obtained by simple weighting of the source images.
Rect createWeightMaps(const std::vector<UMat> &masks, const std::vector<Point> &corners,
std::vector<UMat> &weight_maps);
@ -94,7 +117,8 @@ private:
inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }
/** @brief Blender which uses multi-band blending algorithm (see @cite BA83).
*/
class CV_EXPORTS MultiBandBlender : public Blender
{
public:
@ -131,6 +155,8 @@ void CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector<
void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<UMat>& pyr);
void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<UMat>& pyr);
//! @}
} // namespace detail
} // namespace cv

View File

@ -48,6 +48,13 @@
namespace cv {
namespace detail {
//! @addtogroup stitching
//! @{
/** @brief Describes camera parameters.
@note Translation is assumed to be zero during the whole stitching pipeline. :
*/
struct CV_EXPORTS CameraParams
{
CameraParams();
@ -63,6 +70,8 @@ struct CV_EXPORTS CameraParams
Mat t; // Translation
};
//! @}
} // namespace detail
} // namespace cv

View File

@ -48,6 +48,11 @@
namespace cv {
namespace detail {
//! @addtogroup stitching_exposure
//! @{
/** @brief Base class for all exposure compensators.
*/
class CV_EXPORTS ExposureCompensator
{
public:
@ -56,14 +61,29 @@ public:
enum { NO, GAIN, GAIN_BLOCKS };
static Ptr<ExposureCompensator> createDefault(int type);
/**
@param corners Source image top-left corners
@param images Source images
@param masks Image masks to update (second value in pair specifies the value which should be used
to detect where image is)
*/
void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<UMat> &masks);
/** @overload */
virtual void feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
const std::vector<std::pair<UMat,uchar> > &masks) = 0;
/** @brief Compensate exposure in the specified image.
@param index Image index
@param corner Image top-left corner
@param image Image to process
@param mask Image mask
*/
virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0;
};
/** @brief Stub exposure compensator which does nothing.
*/
class CV_EXPORTS NoExposureCompensator : public ExposureCompensator
{
public:
@ -72,7 +92,9 @@ public:
void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) { }
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities, see @cite BL07 and @cite WJ10 for details.
*/
class CV_EXPORTS GainCompensator : public ExposureCompensator
{
public:
@ -85,7 +107,9 @@ private:
Mat_<double> gains_;
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
intensities, see @cite UES01 for details.
*/
class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator
{
public:
@ -100,6 +124,8 @@ private:
std::vector<UMat> gain_maps_;
};
//! @}
} // namespace detail
} // namespace cv

View File

@ -55,6 +55,10 @@
namespace cv {
namespace detail {
//! @addtogroup stitching_match
//! @{
/** @brief Structure containing image keypoints and descriptors. */
struct CV_EXPORTS ImageFeatures
{
int img_idx;
@ -63,20 +67,40 @@ struct CV_EXPORTS ImageFeatures
UMat descriptors;
};
/** @brief Feature finders base class */
class CV_EXPORTS FeaturesFinder
{
public:
virtual ~FeaturesFinder() {}
/** @overload */
void operator ()(InputArray image, ImageFeatures &features);
/** @brief Finds features in the given image.
@param image Source image
@param features Found features
@param rois Regions of interest
@sa detail::ImageFeatures, Rect\_
*/
void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
/** @brief Frees unused memory allocated before if there is any. */
virtual void collectGarbage() {}
protected:
/** @brief This method must implement features finding logic in order to make the wrappers
detail::FeaturesFinder::operator()\_ work.
@param image Source image
@param features Found features
@sa detail::ImageFeatures */
virtual void find(InputArray image, ImageFeatures &features) = 0;
};
/** @brief SURF features finder.
@sa detail::FeaturesFinder, SURF
*/
class CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder
{
public:
@ -91,6 +115,10 @@ private:
Ptr<Feature2D> surf;
};
/** @brief ORB features finder. :
@sa detail::FeaturesFinder, ORB
*/
class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder
{
public:
@ -126,50 +154,92 @@ private:
};
#endif
/** @brief Structure containing information about matches between two images.
It's assumed that there is a homography between those images.
*/
struct CV_EXPORTS MatchesInfo
{
MatchesInfo();
MatchesInfo(const MatchesInfo &other);
const MatchesInfo& operator =(const MatchesInfo &other);
int src_img_idx, dst_img_idx; // Images indices (optional)
int src_img_idx, dst_img_idx; //!< Images indices (optional)
std::vector<DMatch> matches;
std::vector<uchar> inliers_mask; // Geometrically consistent matches mask
int num_inliers; // Number of geometrically consistent matches
Mat H; // Estimated homography
double confidence; // Confidence two images are from the same panorama
std::vector<uchar> inliers_mask; //!< Geometrically consistent matches mask
int num_inliers; //!< Number of geometrically consistent matches
Mat H; //!< Estimated homography
double confidence; //!< Confidence two images are from the same panorama
};
/** @brief Feature matchers base class. */
class CV_EXPORTS FeaturesMatcher
{
public:
virtual ~FeaturesMatcher() {}
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches\_info Found matches
*/
void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) { match(features1, features2, matches_info); }
/** @brief Performs images matching.
@param features Features of the source images
@param pairwise\_matches Found pairwise matches
@param mask Mask indicating which image pairs must be matched
The function is parallelized with the TBB library.
@sa detail::MatchesInfo
*/
void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
const cv::UMat &mask = cv::UMat());
/** @return True, if it's possible to use the same matcher instance in parallel, false otherwise
*/
bool isThreadSafe() const { return is_thread_safe_; }
/** @brief Frees unused memory allocated before if there is any.
*/
virtual void collectGarbage() {}
protected:
FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
/** @brief This method must implement matching logic in order to make the wrappers
detail::FeaturesMatcher::operator()\_ work.
@param features1 first image features
@param features2 second image features
@param matches\_info found matches
*/
virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo& matches_info) = 0;
bool is_thread_safe_;
};
/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match\_conf
@sa detail::FeaturesMatcher
*/
class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher
{
public:
/** @brief Constructs a "best of 2 nearest" matcher.
@param try\_use\_gpu Should try to use GPU or not
@param match\_conf Match distances ration threshold
@param num\_matches\_thresh1 Minimum number of matches required for the 2D projective transform
estimation used in the inliers classification step
@param num\_matches\_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
int num_matches_thresh2 = 6);
@ -197,6 +267,8 @@ protected:
int range_width_;
};
//! @} stitching_match
} // namespace detail
} // namespace cv

View File

@ -51,23 +51,50 @@
namespace cv {
namespace detail {
//! @addtogroup stitching_rotation
//! @{
/** @brief Rotation estimator base class.
It takes features of all images, pairwise matches between all images and estimates rotations of all
cameras.
@note The coordinate system origin is implementation-dependent, but you can always normalize the
rotations in respect to the first camera, for instance. :
*/
class CV_EXPORTS Estimator
{
public:
virtual ~Estimator() {}
/** @brief Estimates camera parameters.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
bool operator ()(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras)
{ return estimate(features, pairwise_matches, cameras); }
protected:
/** @brief This method must implement camera parameters estimation logic in order to make the wrapper
detail::Estimator::operator()\_ work.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
virtual bool estimate(const std::vector<ImageFeatures> &features,
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras) = 0;
};
/** @brief Homography based rotation estimator.
*/
class CV_EXPORTS HomographyBasedEstimator : public Estimator
{
public:
@ -82,7 +109,8 @@ private:
bool is_focals_estimated_;
};
/** @brief Base class for all camera parameters refinement methods.
*/
class CV_EXPORTS BundleAdjusterBase : public Estimator
{
public:
@ -100,6 +128,11 @@ public:
void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; }
protected:
/** @brief Construct a bundle adjuster base instance.
@param num\_params\_per\_cam Number of parameters per camera
@param num\_errs\_per\_measurement Number of error terms (components) per match
*/
BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)
: num_params_per_cam_(num_params_per_cam),
num_errs_per_measurement_(num_errs_per_measurement)
@ -114,9 +147,26 @@ protected:
const std::vector<MatchesInfo> &pairwise_matches,
std::vector<CameraParams> &cameras);
/** @brief Sets initial camera parameter to refine.
@param cameras Camera parameters
*/
virtual void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) = 0;
/** @brief Gets the refined camera parameters.
@param cameras Refined camera parameters
*/
virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;
/** @brief Calculates error vector.
@param err Error column-vector of length total\_num\_matches \* num\_errs\_per\_measurement
*/
virtual void calcError(Mat &err) = 0;
/** @brief Calculates the cost function jacobian.
@param jac Jacobian matrix of dimensions
(total\_num\_matches \* num\_errs\_per\_measurement) x (num\_images \* num\_params\_per\_cam)
*/
virtual void calcJacobian(Mat &jac) = 0;
// 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine
@ -145,9 +195,12 @@ protected:
};
// Minimizes reprojection error.
// It can estimate focal length, aspect ratio, principal point.
// You can affect only on them via the refinement mask.
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It can estimate focal length, aspect ratio, principal point.
You can affect only on them via the refinement mask.
*/
class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase
{
public:
@ -163,8 +216,11 @@ private:
};
// Minimizes sun of ray-to-ray distances.
// It can estimate focal length. It ignores the refinement mask for now.
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the distances
between the rays passing through the camera center and a feature. :
It can estimate focal length. It ignores the refinement mask for now.
*/
class CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase
{
public:
@ -186,6 +242,11 @@ enum WaveCorrectKind
WAVE_CORRECT_VERT
};
/** @brief Tries to make panorama more horizontal (or vertical).
@param rmats Camera rotation matrices.
@param kind Correction kind, see detail::WaveCorrectKind.
*/
void CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind);
@ -205,6 +266,8 @@ void CV_EXPORTS findMaxSpanningTree(
int num_images, const std::vector<MatchesInfo> &pairwise_matches,
Graph &span_tree, std::vector<int> &centers);
//! @} stitching_rotation
} // namespace detail
} // namespace cv

View File

@ -50,22 +50,35 @@
namespace cv {
namespace detail {
//! @addtogroup stitching_seam
//! @{
/** @brief Base class for a seam estimator.
*/
class CV_EXPORTS SeamFinder
{
public:
virtual ~SeamFinder() {}
/** @brief Estimates seams.
@param src Source images
@param corners Source image top-left corners
@param masks Source image masks to update
*/
virtual void find(const std::vector<UMat> &src, const std::vector<Point> &corners,
std::vector<UMat> &masks) = 0;
};
/** @brief Stub seam estimator which does nothing.
*/
class CV_EXPORTS NoSeamFinder : public SeamFinder
{
public:
void find(const std::vector<UMat>&, const std::vector<Point>&, std::vector<UMat>&) {}
};
/** @brief Base class for all pairwise seam estimators.
*/
class CV_EXPORTS PairwiseSeamFinder : public SeamFinder
{
public:
@ -74,6 +87,12 @@ public:
protected:
void run();
/** @brief Resolves masks intersection of two specified images in the given ROI.
@param first First image index
@param second Second image index
@param roi Region of interest
*/
virtual void findInPair(size_t first, size_t second, Rect roi) = 0;
std::vector<UMat> images_;
@ -82,7 +101,8 @@ protected:
std::vector<UMat> masks_;
};
/** @brief Voronoi diagram-based seam estimator.
*/
class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder
{
public:
@ -201,14 +221,16 @@ private:
std::set<std::pair<int, int> > edges_;
};
/** @brief Base class for all minimum graph-cut-based seam estimators.
*/
class CV_EXPORTS GraphCutSeamFinderBase
{
public:
enum CostType { COST_COLOR, COST_COLOR_GRAD };
};
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03.
*/
class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
{
public:
@ -253,6 +275,8 @@ private:
};
#endif
//! @}
} // namespace detail
} // namespace cv

View File

@ -49,6 +49,9 @@
namespace cv {
namespace detail {
//! @addtogroup stitching
//! @{
// Base Timelapser class, takes a sequence of images, applies appropriate shift, stores result in dst_.
class CV_EXPORTS Timelapser
@ -80,6 +83,8 @@ public:
virtual void initialize(const std::vector<Point> &corners, const std::vector<Size> &sizes);
};
//! @}
} // namespace detail
} // namespace cv

View File

@ -99,6 +99,9 @@
namespace cv {
namespace detail {
//! @addtogroup stitching
//! @{
class CV_EXPORTS DisjointSets
{
public:
@ -158,6 +161,8 @@ CV_EXPORTS void selectRandomSubset(int count, int size, std::vector<int> &subset
CV_EXPORTS int& stitchingLogLevel();
//! @}
} // namespace detail
} // namespace cv

View File

@ -47,6 +47,8 @@
#include "opencv2/core.hpp"
#include "util.hpp" // Make your IDE see declarations
//! @cond IGNORED
namespace cv {
namespace detail {
@ -124,4 +126,6 @@ static inline double sqr(double x) { return x * x; }
} // namespace detail
} // namespace cv
//! @endcond
#endif // __OPENCV_STITCHING_UTIL_INL_HPP__

View File

@ -51,28 +51,76 @@
namespace cv {
namespace detail {
//! @addtogroup stitching_warp
//! @{
/** @brief Rotation-only model image warper interface.
*/
class CV_EXPORTS RotationWarper
{
public:
virtual ~RotationWarper() {}
/** @brief Projects the image point.
@param pt Source point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected point
*/
virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0;
/** @brief Builds the projection maps according to the given camera data.
@param src\_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) = 0;
/** @brief Projects the image.
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
OutputArray dst) = 0;
/** @brief Projects the image backward.
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param dst\_size Backward-projected image size
@param dst Backward-projected image
*/
virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode,
Size dst_size, OutputArray dst) = 0;
/**
@param src\_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
*/
virtual Rect warpRoi(Size src_size, InputArray K, InputArray R) = 0;
virtual float getScale() const { return 1.f; }
virtual void setScale(float) {}
};
/** @brief Base class for warping logic implementation.
*/
struct CV_EXPORTS ProjectorBase
{
void setCameraParams(InputArray K = Mat::eye(3, 3, CV_32F),
@ -87,7 +135,8 @@ struct CV_EXPORTS ProjectorBase
float t[3];
};
/** @brief Base class for rotation-based warper using a detail::ProjectorBase\_ derived class.
*/
template <class P>
class CV_EXPORTS RotationWarperBase : public RotationWarper
{
@ -126,10 +175,15 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
void mapBackward(float u, float v, float &x, float &y);
};
/** @brief Warper that maps an image onto the z = 1 plane.
*/
class CV_EXPORTS PlaneWarper : public RotationWarperBase<PlaneProjector>
{
public:
/** @brief Construct an instance of the plane warper class.
@param scale Projected image scale multiplier
*/
PlaneWarper(float scale = 1.f) { projector_.scale = scale; }
Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T);
@ -154,11 +208,18 @@ struct CV_EXPORTS SphericalProjector : ProjectorBase
};
// Projects image onto unit sphere with origin at (0, 0, 0).
// Poles are located at (0, -1, 0) and (0, 1, 0) points.
/** @brief Warper that maps an image onto the unit sphere located at the origin.
Projects image onto unit sphere with origin at (0, 0, 0).
Poles are located at (0, -1, 0) and (0, 1, 0) points.
*/
class CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector>
{
public:
/** @brief Construct an instance of the spherical warper class.
@param scale Projected image scale multiplier
*/
SphericalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
@ -175,10 +236,15 @@ struct CV_EXPORTS CylindricalProjector : ProjectorBase
};
// Projects image onto x * x + z * z = 1 cylinder
/** @brief Warper that maps an image onto the x\*x + z\*z = 1 cylinder.
*/
class CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjector>
{
public:
/** @brief Construct an instance of the cylindrical warper class.
@param scale Projected image scale multiplier
*/
CylindricalWarper(float scale) { projector_.scale = scale; }
Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap);
@ -508,6 +574,8 @@ protected:
}
};
//! @} stitching_warp
} // namespace detail
} // namespace cv

View File

@ -47,6 +47,8 @@
#include "warpers.hpp" // Make your IDE see declarations
#include <limits>
//! @cond IGNORED
namespace cv {
namespace detail {
@ -767,4 +769,6 @@ void PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
} // namespace detail
} // namespace cv
//! @endcond
#endif // __OPENCV_STITCHING_WARPERS_INL_HPP__

View File

@ -47,6 +47,11 @@
namespace cv {
//! @addtogroup stitching_warp
//! @{
/** @brief Image warper factories base class.
*/
class WarperCreator
{
public:
@ -54,21 +59,25 @@ public:
virtual Ptr<detail::RotationWarper> create(float scale) const = 0;
};
/** @brief Plane warper factory class.
@sa detail::PlaneWarper
*/
class PlaneWarper : public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PlaneWarper>(scale); }
};
/** @brief Cylindrical warper factory class.
@sa detail::CylindricalWarper
*/
class CylindricalWarper: public WarperCreator
{
public:
Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CylindricalWarper>(scale); }
};
/** @brief Spherical warper factory class */
class SphericalWarper: public WarperCreator
{
public:
@ -167,6 +176,8 @@ public:
};
#endif
//! @} stitching_warp
} // namespace cv
#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__