added image stitching module
This commit is contained in:
parent
d7f04f04cc
commit
336989f80b
@ -36,4 +36,5 @@ add_subdirectory(haartraining)
|
||||
|
||||
if(NOT ANDROID)
|
||||
add_subdirectory(gpu)
|
||||
add_subdirectory(stitching)
|
||||
endif()
|
||||
|
42
modules/stitching/CMakeLists.txt
Normal file
42
modules/stitching/CMakeLists.txt
Normal file
@ -0,0 +1,42 @@
|
||||
project(stitching)
|
||||
|
||||
include_directories(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
"${CMAKE_SOURCE_DIR}/modules/core/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/imgproc/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/objdetect/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/ml/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/highgui/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/video/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/features2d/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/flann/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/calib3d/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/legacy/include"
|
||||
"${CMAKE_SOURCE_DIR}/modules/imgproc/src" # for gcgraph.hpp
|
||||
"${CMAKE_SOURCE_DIR}/modules/gpu/include"
|
||||
)
|
||||
|
||||
set(stitching_libs opencv_core opencv_imgproc opencv_highgui opencv_features2d opencv_calib3d opencv_gpu)
|
||||
|
||||
set(stitching_files blenders.hpp blenders.cpp
|
||||
focal_estimators.hpp focal_estimators.cpp
|
||||
motion_estimators.hpp motion_estimators.cpp
|
||||
seam_finders.hpp seam_finders.cpp
|
||||
util.hpp util.cpp util_inl.hpp
|
||||
warpers.hpp warpers.cpp warpers_inl.hpp
|
||||
main.cpp)
|
||||
|
||||
set(the_target opencv_stitching)
|
||||
add_executable(${the_target} ${stitching_files})
|
||||
|
||||
add_dependencies(${the_target} ${stitching_libs})
|
||||
set_target_properties(${the_target} PROPERTIES
|
||||
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
|
||||
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib/"
|
||||
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin/"
|
||||
INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
|
||||
OUTPUT_NAME "opencv_stitching")
|
||||
|
||||
target_link_libraries(${the_target} ${stitching_libs})
|
||||
|
||||
install(TARGETS ${the_target} RUNTIME DESTINATION bin COMPONENT main)
|
315
modules/stitching/blenders.cpp
Normal file
315
modules/stitching/blenders.cpp
Normal file
@ -0,0 +1,315 @@
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include "blenders.hpp"
|
||||
#include "util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
static const float WEIGHT_EPS = 1e-5f;
|
||||
|
||||
Ptr<Blender> Blender::createDefault(int type)
|
||||
{
|
||||
if (type == NO)
|
||||
return new Blender();
|
||||
if (type == FEATHER)
|
||||
return new FeatherBlender();
|
||||
if (type == MULTI_BAND)
|
||||
return new MultiBandBlender();
|
||||
CV_Error(CV_StsBadArg, "unsupported blending method");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
Point Blender::operator ()(const vector<Mat> &src, const vector<Point> &corners, const vector<Mat> &masks,
|
||||
Mat& dst)
|
||||
{
|
||||
Mat dst_mask;
|
||||
return (*this)(src, corners, masks, dst, dst_mask);
|
||||
}
|
||||
|
||||
|
||||
Point Blender::operator ()(const vector<Mat> &src, const vector<Point> &corners, const vector<Mat> &masks,
|
||||
Mat &dst, Mat &dst_mask)
|
||||
{
|
||||
Point dst_tl = blend(src, corners, masks, dst, dst_mask);
|
||||
dst.setTo(Scalar::all(0), dst_mask == 0);
|
||||
return dst_tl;
|
||||
}
|
||||
|
||||
|
||||
Point Blender::blend(const vector<Mat> &src, const vector<Point> &corners, const vector<Mat> &masks,
|
||||
Mat &dst, Mat &dst_mask)
|
||||
{
|
||||
for (size_t i = 0; i < src.size(); ++i)
|
||||
{
|
||||
CV_Assert(src[i].type() == CV_32FC3);
|
||||
CV_Assert(masks[i].type() == CV_8U);
|
||||
}
|
||||
const int image_type = src[0].type();
|
||||
|
||||
Rect dst_roi = resultRoi(src, corners);
|
||||
|
||||
dst.create(dst_roi.size(), image_type);
|
||||
dst.setTo(Scalar::all(0));
|
||||
|
||||
dst_mask.create(dst_roi.size(), CV_8U);
|
||||
dst_mask.setTo(Scalar::all(0));
|
||||
|
||||
for (size_t i = 0; i < src.size(); ++i)
|
||||
{
|
||||
int dx = corners[i].x - dst_roi.x;
|
||||
int dy = corners[i].y - dst_roi.y;
|
||||
|
||||
for (int y = 0; y < src[i].rows; ++y)
|
||||
{
|
||||
const Point3f *src_row = src[i].ptr<Point3f>(y);
|
||||
Point3f *dst_row = dst.ptr<Point3f>(dy + y);
|
||||
|
||||
const uchar *mask_row = masks[i].ptr<uchar>(y);
|
||||
uchar *dst_mask_row = dst_mask.ptr<uchar>(dy + y);
|
||||
|
||||
for (int x = 0; x < src[i].cols; ++x)
|
||||
{
|
||||
if (mask_row[x])
|
||||
dst_row[dx + x] = src_row[x];
|
||||
dst_mask_row[dx + x] |= mask_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
|
||||
Point FeatherBlender::blend(const vector<Mat> &src, const vector<Point> &corners, const vector<Mat> &masks,
|
||||
Mat &dst, Mat &dst_mask)
|
||||
{
|
||||
vector<Mat> weights(masks.size());
|
||||
for (size_t i = 0; i < weights.size(); ++i)
|
||||
createWeightMap(masks[i], sharpness_, weights[i]);
|
||||
|
||||
Mat dst_weight;
|
||||
Point dst_tl = blendLinear(src, corners, weights, dst, dst_weight);
|
||||
dst_mask = dst_weight > WEIGHT_EPS;
|
||||
|
||||
return dst_tl;
|
||||
}
|
||||
|
||||
|
||||
Point MultiBandBlender::blend(const vector<Mat> &src, const vector<Point> &corners, const vector<Mat> &masks,
|
||||
Mat &dst, Mat &dst_mask)
|
||||
{
|
||||
CV_Assert(src.size() == corners.size() && src.size() == masks.size());
|
||||
const int num_images = src.size();
|
||||
|
||||
Rect dst_roi = resultRoi(src, corners);
|
||||
|
||||
vector<Mat> src_(num_images);
|
||||
vector<Point> corners_(num_images);
|
||||
vector<Mat> masks_(num_images);
|
||||
|
||||
// TODO avoid creating extra border
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
copyMakeBorder(src[i], src_[i],
|
||||
corners[i].y - dst_roi.y, dst_roi.br().y - corners[i].y - src[i].rows,
|
||||
corners[i].x - dst_roi.x, dst_roi.br().x - corners[i].x - src[i].cols,
|
||||
BORDER_REFLECT);
|
||||
copyMakeBorder(masks[i], masks_[i],
|
||||
corners[i].y - dst_roi.y, dst_roi.br().y - corners[i].y - src[i].rows,
|
||||
corners[i].x - dst_roi.x, dst_roi.br().x - corners[i].x - src[i].cols,
|
||||
BORDER_CONSTANT);
|
||||
corners_[i] = Point(0, 0);
|
||||
}
|
||||
|
||||
Mat weight_map;
|
||||
vector<Mat> src_pyr_gauss;
|
||||
vector< vector<Mat> > src_pyr_laplace(num_images);
|
||||
vector< vector<Mat> > weight_pyr_gauss(num_images);
|
||||
|
||||
// Compute all pyramids
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
createGaussPyr(src_[i], num_bands_, src_pyr_gauss);
|
||||
createLaplacePyr(src_pyr_gauss, src_pyr_laplace[i]);
|
||||
|
||||
masks_[i].convertTo(weight_map, CV_32F, 1. / 255.);
|
||||
createGaussPyr(weight_map, num_bands_, weight_pyr_gauss[i]);
|
||||
}
|
||||
|
||||
computeResultMask(masks, corners, dst_mask);
|
||||
|
||||
Mat dst_level_weight;
|
||||
vector<Mat> dst_pyr_laplace(num_bands_ + 1);
|
||||
vector<Mat> src_pyr_slice(num_images);
|
||||
vector<Mat> weight_pyr_slice(num_images);
|
||||
|
||||
// Blend pyramids
|
||||
for (int level_id = 0; level_id <= num_bands_; ++level_id)
|
||||
{
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
src_pyr_slice[i] = src_pyr_laplace[i][level_id];
|
||||
weight_pyr_slice[i] = weight_pyr_gauss[i][level_id];
|
||||
}
|
||||
blendLinear(src_pyr_slice, corners_, weight_pyr_slice,
|
||||
dst_pyr_laplace[level_id], dst_level_weight);
|
||||
}
|
||||
|
||||
restoreImageFromLaplacePyr(dst_pyr_laplace);
|
||||
dst = dst_pyr_laplace[0];
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Auxiliary functions
|
||||
|
||||
Rect resultRoi(const vector<Mat> &src, const vector<Point> &corners)
|
||||
{
|
||||
Point tl(numeric_limits<int>::max(), numeric_limits<int>::max());
|
||||
Point br(numeric_limits<int>::min(), numeric_limits<int>::min());
|
||||
|
||||
CV_Assert(src.size() == corners.size());
|
||||
for (size_t i = 0; i < src.size(); ++i)
|
||||
{
|
||||
tl.x = min(tl.x, corners[i].x);
|
||||
tl.y = min(tl.y, corners[i].y);
|
||||
br.x = max(br.x, corners[i].x + src[i].cols);
|
||||
br.y = max(br.y, corners[i].y + src[i].rows);
|
||||
}
|
||||
|
||||
return Rect(tl, br);
|
||||
}
|
||||
|
||||
|
||||
Point computeResultMask(const vector<Mat> &masks, const vector<Point> &corners, Mat &dst_mask)
|
||||
{
|
||||
Rect dst_roi = resultRoi(masks, corners);
|
||||
|
||||
dst_mask.create(dst_roi.size(), CV_8U);
|
||||
dst_mask.setTo(Scalar::all(0));
|
||||
|
||||
for (size_t i = 0; i < masks.size(); ++i)
|
||||
{
|
||||
int dx = corners[i].x - dst_roi.x;
|
||||
int dy = corners[i].y - dst_roi.y;
|
||||
|
||||
for (int y = 0; y < masks[i].rows; ++y)
|
||||
{
|
||||
const uchar *mask_row = masks[i].ptr<uchar>(y);
|
||||
uchar *dst_mask_row = dst_mask.ptr<uchar>(dy + y);
|
||||
|
||||
for (int x = 0; x < masks[i].cols; ++x)
|
||||
dst_mask_row[dx + x] |= mask_row[x];
|
||||
}
|
||||
}
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
|
||||
Point blendLinear(const vector<Mat> &src, const vector<Point> &corners, const vector<Mat> &weights,
|
||||
Mat &dst, Mat& dst_weight)
|
||||
{
|
||||
for (size_t i = 0; i < src.size(); ++i)
|
||||
{
|
||||
CV_Assert(src[i].type() == CV_32FC3);
|
||||
CV_Assert(weights[i].type() == CV_32F);
|
||||
}
|
||||
const int image_type = src[0].type();
|
||||
|
||||
Rect dst_roi = resultRoi(src, corners);
|
||||
|
||||
dst.create(dst_roi.size(), image_type);
|
||||
dst.setTo(Scalar::all(0));
|
||||
|
||||
dst_weight.create(dst_roi.size(), CV_32F);
|
||||
dst_weight.setTo(Scalar::all(0));
|
||||
|
||||
// Compute colors sums and weights
|
||||
for (size_t i = 0; i < src.size(); ++i)
|
||||
{
|
||||
int dx = corners[i].x - dst_roi.x;
|
||||
int dy = corners[i].y - dst_roi.y;
|
||||
|
||||
for (int y = 0; y < src[i].rows; ++y)
|
||||
{
|
||||
const Point3f *src_row = src[i].ptr<Point3f>(y);
|
||||
Point3f *dst_row = dst.ptr<Point3f>(dy + y);
|
||||
|
||||
const float *weight_row = weights[i].ptr<float>(y);
|
||||
float *dst_weight_row = dst_weight.ptr<float>(dy + y);
|
||||
|
||||
for (int x = 0; x < src[i].cols; ++x)
|
||||
{
|
||||
dst_row[dx + x] += src_row[x] * weight_row[x];
|
||||
dst_weight_row[dx + x] += weight_row[x];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize sums
|
||||
for (int y = 0; y < dst.rows; ++y)
|
||||
{
|
||||
Point3f *dst_row = dst.ptr<Point3f>(y);
|
||||
float *dst_weight_row = dst_weight.ptr<float>(y);
|
||||
|
||||
for (int x = 0; x < dst.cols; ++x)
|
||||
{
|
||||
dst_weight_row[x] += WEIGHT_EPS;
|
||||
dst_row[x] *= 1.f / dst_weight_row[x];
|
||||
}
|
||||
}
|
||||
|
||||
return dst_roi.tl();
|
||||
}
|
||||
|
||||
|
||||
void createWeightMap(const Mat &mask, float sharpness, Mat &weight)
|
||||
{
|
||||
CV_Assert(mask.type() == CV_8U);
|
||||
distanceTransform(mask, weight, CV_DIST_L1, 3);
|
||||
threshold(weight * sharpness, weight, 1.f, 1.f, THRESH_TRUNC);
|
||||
}
|
||||
|
||||
|
||||
void createGaussPyr(const Mat &img, int num_layers, vector<Mat> &pyr)
|
||||
{
|
||||
pyr.resize(num_layers + 1);
|
||||
pyr[0] = img.clone();
|
||||
for (int i = 0; i < num_layers; ++i)
|
||||
pyrDown(pyr[i], pyr[i + 1]);
|
||||
}
|
||||
|
||||
|
||||
void createLaplacePyr(const vector<Mat> &pyr_gauss, vector<Mat> &pyr_laplace)
|
||||
{
|
||||
if (pyr_gauss.size() == 0)
|
||||
return;
|
||||
|
||||
pyr_laplace.resize(pyr_gauss.size());
|
||||
|
||||
Mat tmp;
|
||||
for (size_t i = 0; i < pyr_laplace.size() - 1; ++i)
|
||||
{
|
||||
pyrUp(pyr_gauss[i + 1], tmp, pyr_gauss[i].size());
|
||||
pyr_laplace[i] = pyr_gauss[i] - tmp;
|
||||
}
|
||||
pyr_laplace[pyr_laplace.size() - 1] = pyr_gauss[pyr_laplace.size() - 1].clone();
|
||||
}
|
||||
|
||||
|
||||
void restoreImageFromLaplacePyr(vector<Mat> &pyr)
|
||||
{
|
||||
if (pyr.size() == 0)
|
||||
return;
|
||||
|
||||
Mat tmp;
|
||||
for (size_t i = pyr.size() - 1; i > 0; --i)
|
||||
{
|
||||
pyrUp(pyr[i], tmp, pyr[i - 1].size());
|
||||
pyr[i - 1] += tmp;
|
||||
}
|
||||
}
|
71
modules/stitching/blenders.hpp
Normal file
71
modules/stitching/blenders.hpp
Normal file
@ -0,0 +1,71 @@
|
||||
#ifndef _OPENCV_BLENDERS_HPP_
|
||||
#define _OPENCV_BLENDERS_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <opencv2/core/core.hpp>
|
||||
|
||||
// Simple blender which puts one image over another
|
||||
class Blender
|
||||
{
|
||||
public:
|
||||
enum { NO, FEATHER, MULTI_BAND };
|
||||
|
||||
static cv::Ptr<Blender> createDefault(int type);
|
||||
|
||||
cv::Point operator ()(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, const std::vector<cv::Mat> &masks,
|
||||
cv::Mat& dst);
|
||||
cv::Point operator ()(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, const std::vector<cv::Mat> &masks,
|
||||
cv::Mat& dst, cv::Mat& dst_mask);
|
||||
|
||||
protected:
|
||||
virtual cv::Point blend(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, const std::vector<cv::Mat> &masks,
|
||||
cv::Mat& dst, cv::Mat& dst_mask);
|
||||
};
|
||||
|
||||
|
||||
class FeatherBlender : public Blender
|
||||
{
|
||||
public:
|
||||
FeatherBlender(float sharpness = 0.02f) : sharpness_(sharpness) {}
|
||||
|
||||
private:
|
||||
cv::Point blend(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, const std::vector<cv::Mat> &masks,
|
||||
cv::Mat &dst, cv::Mat &dst_mask);
|
||||
|
||||
float sharpness_;
|
||||
};
|
||||
|
||||
|
||||
class MultiBandBlender : public Blender
|
||||
{
|
||||
public:
|
||||
MultiBandBlender(int num_bands = 10) : num_bands_(num_bands) {}
|
||||
|
||||
private:
|
||||
cv::Point blend(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, const std::vector<cv::Mat> &masks,
|
||||
cv::Mat& dst, cv::Mat& dst_mask);
|
||||
|
||||
int num_bands_;
|
||||
};
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Auxiliary functions
|
||||
|
||||
cv::Rect resultRoi(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners);
|
||||
|
||||
cv::Point computeResultMask(const std::vector<cv::Mat> &masks, const std::vector<cv::Point> &corners, cv::Mat &mask);
|
||||
|
||||
cv::Point blendLinear(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners, const std::vector<cv::Mat> &weights,
|
||||
cv::Mat& dst, cv::Mat& dst_weight);
|
||||
|
||||
void createWeightMap(const cv::Mat& mask, float sharpness, cv::Mat& weight);
|
||||
|
||||
void createGaussPyr(const cv::Mat& img, int num_layers, std::vector<cv::Mat>& pyr);
|
||||
|
||||
void createLaplacePyr(const std::vector<cv::Mat>& pyr_gauss, std::vector<cv::Mat>& pyr_laplace);
|
||||
|
||||
// Restores source image in-place. Result will be in pyr[0].
|
||||
void restoreImageFromLaplacePyr(std::vector<cv::Mat>& pyr);
|
||||
|
||||
#endif // _OPENCV_BLENDERS_HPP_
|
106
modules/stitching/focal_estimators.cpp
Normal file
106
modules/stitching/focal_estimators.cpp
Normal file
@ -0,0 +1,106 @@
|
||||
#include "focal_estimators.hpp"
|
||||
#include "util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void focalsFromHomography(const Mat& H, double &f0, double &f1, bool &f0_ok, bool &f1_ok)
|
||||
{
|
||||
CV_Assert(H.type() == CV_64F && H.size() == Size(3, 3));
|
||||
|
||||
const double h[9] =
|
||||
{
|
||||
H.at<double>(0, 0), H.at<double>(0, 1), H.at<double>(0, 2),
|
||||
H.at<double>(1, 0), H.at<double>(1, 1), H.at<double>(1, 2),
|
||||
H.at<double>(2, 0), H.at<double>(2, 1), H.at<double>(2, 2)
|
||||
};
|
||||
|
||||
f1_ok = true;
|
||||
double denom1 = h[6] * h[7];
|
||||
double denom2 = (h[7] - h[6]) * (h[7] + h[6]);
|
||||
if (max(abs(denom1), abs(denom2)) < 1e-5)
|
||||
f1_ok = false;
|
||||
else
|
||||
{
|
||||
double val1 = -(h[0] * h[1] + h[3] * h[4]) / denom1;
|
||||
double val2 = (h[0] * h[0] + h[3] * h[3] - h[1] * h[1] - h[4] * h[4]) / denom2;
|
||||
if (val1 < val2)
|
||||
swap(val1, val2);
|
||||
if (val1 > 0 && val2 > 0)
|
||||
f1 = sqrt(abs(denom1) > abs(denom2) ? val1 : val2);
|
||||
else if (val1 > 0)
|
||||
f1 = sqrt(val1);
|
||||
else
|
||||
f1_ok = false;
|
||||
}
|
||||
|
||||
f0_ok = true;
|
||||
denom1 = h[0] * h[3] + h[1] * h[4];
|
||||
denom2 = h[0] * h[0] + h[1] * h[1] - h[3] * h[3] - h[4] * h[4];
|
||||
if (max(abs(denom1), abs(denom2)) < 1e-5)
|
||||
f0_ok = false;
|
||||
else
|
||||
{
|
||||
double val1 = -h[2] * h[5] / denom1;
|
||||
double val2 = (h[5] * h[5] - h[2] * h[2]) / denom2;
|
||||
if (val1 < val2)
|
||||
swap(val1, val2);
|
||||
if (val1 > 0 && val2 > 0)
|
||||
f0 = sqrt(abs(denom1) > abs(denom2) ? val1 : val2);
|
||||
else if (val1 > 0)
|
||||
f0 = sqrt(val1);
|
||||
else
|
||||
f0_ok = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool focalsFromFundamental(const Mat &F, double &f0, double &f1)
|
||||
{
|
||||
CV_Assert(F.type() == CV_64F);
|
||||
CV_Assert(F.size() == Size(3, 3));
|
||||
|
||||
Mat Ft = F.t();
|
||||
Mat k = Mat::zeros(3, 1, CV_64F);
|
||||
k.at<double>(2, 0) = 1.f;
|
||||
|
||||
// 1. Compute quantities
|
||||
double a = normL2sq(F*Ft*k) / normL2sq(Ft*k);
|
||||
double b = normL2sq(Ft*F*k) / normL2sq(F*k);
|
||||
double c = sqr(k.dot(F*k)) / (normL2sq(Ft*k) * normL2sq(F*k));
|
||||
double d = k.dot(F*Ft*F*k) / k.dot(F*k);
|
||||
double A = 1/c + a - 2*d;
|
||||
double B = 1/c + b - 2*d;
|
||||
double P = 2*(1/c - 2*d + 0.5*normL2sq(F));
|
||||
double Q = -(A + B)/c + 0.5*(normL2sq(F*Ft) - 0.5*sqr(normL2sq(F)));
|
||||
|
||||
// 2. Solve quadratic equation Z*Z*a_ + Z*b_ + c_ = 0
|
||||
double a_ = 1 + c*P;
|
||||
double b_ = -(c*P*P + 2*P + 4*c*Q);
|
||||
double c_ = P*P + 4*c*P*Q + 12*A*B;
|
||||
double D = b_*b_ - 4*a_*c_;
|
||||
if (abs(D) < 1e-5)
|
||||
D = 0;
|
||||
else if (D < 0)
|
||||
return false;
|
||||
double D_sqrt = sqrt(D);
|
||||
double Z0 = (-b_ - D_sqrt) / (2*a_);
|
||||
double Z1 = (-b_ + D_sqrt) / (2*a_);
|
||||
|
||||
// 3. Choose solution
|
||||
double w0 = abs(Z0*Z0*Z0 - 3*P*Z0*Z0 + 2*(P*P + 2*Q)*Z0 - 4*(P*Q + 4*A*B/c));
|
||||
double w1 = abs(Z1*Z1*Z1 - 3*P*Z1*Z1 + 2*(P*P + 2*Q)*Z1 - 4*(P*Q + 4*A*B/c));
|
||||
double Z = Z0;
|
||||
if (w1 < w0)
|
||||
Z = Z1;
|
||||
|
||||
// 4.
|
||||
double X = -1/c*(1 + 2*B/(Z - P));
|
||||
double Y = -1/c*(1 + 2*A/(Z - P));
|
||||
|
||||
// 5. Compute focal lengths
|
||||
f0 = 1/sqrt(1 + X/normL2sq(Ft*k));
|
||||
f1 = 1/sqrt(1 + Y/normL2sq(F*k));
|
||||
|
||||
return true;
|
||||
}
|
12
modules/stitching/focal_estimators.hpp
Normal file
12
modules/stitching/focal_estimators.hpp
Normal file
@ -0,0 +1,12 @@
|
||||
#ifndef _OPENCV_FOCAL_ESTIMATORS_HPP_
|
||||
#define _OPENCV_FOCAL_ESTIMATORS_HPP_
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
|
||||
// See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
|
||||
// by Heung-Yeung Shum and Richard Szeliski.
|
||||
void focalsFromHomography(const cv::Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
|
||||
|
||||
bool focalsFromFundamental(const cv::Mat &F, double &f0, double &f1);
|
||||
|
||||
#endif // _OPENCV_FOCAL_ESTIMATORS_HPP_
|
212
modules/stitching/main.cpp
Normal file
212
modules/stitching/main.cpp
Normal file
@ -0,0 +1,212 @@
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/highgui/highgui.hpp>
|
||||
#include "util.hpp"
|
||||
#include "warpers.hpp"
|
||||
#include "blenders.hpp"
|
||||
#include "seam_finders.hpp"
|
||||
#include "motion_estimators.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void printHelp()
|
||||
{
|
||||
cout << "Rotation model stitcher.\n"
|
||||
<< "Usage: stitch img1 img2 [...imgN]\n"
|
||||
<< "\t[--matchconf <0.0-1.0>]\n"
|
||||
<< "\t[--ba (ray_space|focal_ray_space)]\n"
|
||||
<< "\t[--warp (plane|cylindrical|spherical)]\n"
|
||||
<< "\t[--seam (no|voronoi|graphcut)]\n"
|
||||
<< "\t[--blend (no|feather|multiband)]\n"
|
||||
<< "\t[--output <result_img>]\n";
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
cv::setBreakOnError(true);
|
||||
|
||||
vector<Mat> images;
|
||||
string result_name = "result.png";
|
||||
int ba_space = BundleAdjuster::RAY_SPACE;
|
||||
int warp_type = Warper::SPHERICAL;
|
||||
bool user_match_conf = false;
|
||||
float match_conf;
|
||||
int seam_find_type = SeamFinder::GRAPH_CUT;
|
||||
int blend_type = Blender::MULTI_BAND;
|
||||
|
||||
if (argc == 1)
|
||||
{
|
||||
printHelp();
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (int i = 1; i < argc; ++i)
|
||||
{
|
||||
if (string(argv[i]) == "--result")
|
||||
{
|
||||
result_name = argv[i + 1];
|
||||
i++;
|
||||
}
|
||||
else if (string(argv[i]) == "--matchconf")
|
||||
{
|
||||
user_match_conf = true;
|
||||
match_conf = static_cast<float>(atof(string(argv[i + 1]).c_str()));
|
||||
i++;
|
||||
}
|
||||
else if (string(argv[i]) == "--ba")
|
||||
{
|
||||
if (string(argv[i + 1]) == "ray_space")
|
||||
ba_space = BundleAdjuster::RAY_SPACE;
|
||||
else if (string(argv[i + 1]) == "focal_ray_space")
|
||||
ba_space = BundleAdjuster::FOCAL_RAY_SPACE;
|
||||
else
|
||||
{
|
||||
cout << "Bad bundle adjustment space\n";
|
||||
return -1;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
else if (string(argv[i]) == "--warp")
|
||||
{
|
||||
if (string(argv[i + 1]) == "plane")
|
||||
warp_type = Warper::PLANE;
|
||||
else if (string(argv[i + 1]) == "cylindrical")
|
||||
warp_type = Warper::CYLINDRICAL;
|
||||
else if (string(argv[i + 1]) == "spherical")
|
||||
warp_type = Warper::SPHERICAL;
|
||||
else
|
||||
{
|
||||
cout << "Bad warping method\n";
|
||||
return -1;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
else if (string(argv[i]) == "--seam")
|
||||
{
|
||||
if (string(argv[i + 1]) == "no")
|
||||
seam_find_type = SeamFinder::NO;
|
||||
else if (string(argv[i + 1]) == "voronoi")
|
||||
seam_find_type = SeamFinder::VORONOI;
|
||||
else if (string(argv[i + 1]) == "graphcut")
|
||||
seam_find_type = SeamFinder::GRAPH_CUT;
|
||||
else
|
||||
{
|
||||
cout << "Bad seam finding method\n";
|
||||
return -1;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
else if (string(argv[i]) == "--blend")
|
||||
{
|
||||
if (string(argv[i + 1]) == "no")
|
||||
blend_type = Blender::NO;
|
||||
else if (string(argv[i + 1]) == "feather")
|
||||
blend_type = Blender::FEATHER;
|
||||
else if (string(argv[i + 1]) == "multiband")
|
||||
blend_type = Blender::MULTI_BAND;
|
||||
else
|
||||
{
|
||||
cout << "Bad blending method\n";
|
||||
return -1;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
else if (string(argv[i]) == "--output")
|
||||
{
|
||||
result_name = argv[i + 1];
|
||||
i++;
|
||||
}
|
||||
else
|
||||
{
|
||||
Mat img = imread(argv[i]);
|
||||
if (img.empty())
|
||||
{
|
||||
cout << "Can't open image " << argv[i] << endl;
|
||||
return -1;
|
||||
}
|
||||
images.push_back(img);
|
||||
}
|
||||
}
|
||||
|
||||
const int num_images = static_cast<int>(images.size());
|
||||
if (num_images < 2)
|
||||
{
|
||||
cout << "Need more images\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
LOGLN("Finding features...");
|
||||
vector<ImageFeatures> features;
|
||||
SurfFeaturesFinder finder;
|
||||
finder(images, features);
|
||||
|
||||
LOGLN("Pairwise matching...");
|
||||
vector<MatchesInfo> pairwise_matches;
|
||||
BestOf2NearestMatcher matcher;
|
||||
if (user_match_conf)
|
||||
matcher = BestOf2NearestMatcher(true, match_conf);
|
||||
matcher(images, features, pairwise_matches);
|
||||
|
||||
LOGLN("Estimating rotations...");
|
||||
HomographyBasedEstimator estimator;
|
||||
vector<CameraParams> cameras;
|
||||
estimator(images, features, pairwise_matches, cameras);
|
||||
|
||||
for (size_t i = 0; i < cameras.size(); ++i)
|
||||
{
|
||||
Mat R;
|
||||
cameras[i].M.convertTo(R, CV_32F);
|
||||
cameras[i].M = R;
|
||||
LOGLN("Initial focal length " << i << ": " << cameras[i].focal);
|
||||
}
|
||||
|
||||
LOGLN("Bundle adjustment...");
|
||||
BundleAdjuster adjuster(ba_space);
|
||||
adjuster(images, features, pairwise_matches, cameras);
|
||||
|
||||
// Find median focal length
|
||||
vector<double> focals;
|
||||
for (size_t i = 0; i < cameras.size(); ++i)
|
||||
{
|
||||
LOGLN("Camera focal length " << i << ": " << cameras[i].focal);
|
||||
focals.push_back(cameras[i].focal);
|
||||
}
|
||||
nth_element(focals.begin(), focals.end(), focals.begin() + focals.size() / 2);
|
||||
float camera_focal = focals[focals.size() / 2];
|
||||
|
||||
vector<Mat> masks(num_images);
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
masks[i].create(images[i].size(), CV_8U);
|
||||
masks[i].setTo(Scalar::all(255));
|
||||
}
|
||||
|
||||
vector<Point> corners(num_images);
|
||||
vector<Mat> masks_warped(num_images);
|
||||
vector<Mat> images_warped(num_images);
|
||||
|
||||
LOGLN("Warping images...");
|
||||
Ptr<Warper> warper = Warper::createByCameraFocal(camera_focal, warp_type);
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
corners[i] = (*warper)(images[i], cameras[i].focal, cameras[i].M, images_warped[i]);
|
||||
(*warper)(masks[i], cameras[i].focal, cameras[i].M, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
|
||||
}
|
||||
vector<Mat> images_f(num_images);
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
images_warped[i].convertTo(images_f[i], CV_32F);
|
||||
|
||||
LOGLN("Finding seams...");
|
||||
Ptr<SeamFinder> seam_finder = SeamFinder::createDefault(seam_find_type);
|
||||
(*seam_finder)(images_f, corners, masks_warped);
|
||||
|
||||
LOGLN("Blending images...");
|
||||
Mat result, result_mask;
|
||||
Ptr<Blender> blender = Blender::createDefault(blend_type);
|
||||
(*blender)(images_f, corners, masks_warped, result, result_mask);
|
||||
|
||||
imwrite(result_name, result);
|
||||
|
||||
LOGLN("Finished");
|
||||
return 0;
|
||||
}
|
755
modules/stitching/motion_estimators.cpp
Normal file
755
modules/stitching/motion_estimators.cpp
Normal file
@ -0,0 +1,755 @@
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <opencv2/calib3d/calib3d.hpp>
|
||||
#include <opencv2/gpu/gpu.hpp>
|
||||
#include "focal_estimators.hpp"
|
||||
#include "motion_estimators.hpp"
|
||||
#include "util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
using namespace cv::gpu;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace
|
||||
{
|
||||
class CpuSurfFeaturesFinder : public FeaturesFinder
|
||||
{
|
||||
public:
|
||||
inline CpuSurfFeaturesFinder()
|
||||
{
|
||||
detector_ = new SurfFeatureDetector(500.0);
|
||||
extractor_ = new SurfDescriptorExtractor;
|
||||
}
|
||||
|
||||
protected:
|
||||
void find(const vector<Mat> &images, vector<ImageFeatures> &features);
|
||||
|
||||
private:
|
||||
Ptr<FeatureDetector> detector_;
|
||||
Ptr<DescriptorExtractor> extractor_;
|
||||
};
|
||||
|
||||
void CpuSurfFeaturesFinder::find(const vector<Mat> &images, vector<ImageFeatures> &features)
|
||||
{
|
||||
// Make images gray
|
||||
vector<Mat> gray_images(images.size());
|
||||
for (size_t i = 0; i < images.size(); ++i)
|
||||
{
|
||||
CV_Assert(images[i].depth() == CV_8U);
|
||||
cvtColor(images[i], gray_images[i], CV_BGR2GRAY);
|
||||
}
|
||||
|
||||
features.resize(images.size());
|
||||
|
||||
// Find keypoints in all images
|
||||
for (size_t i = 0; i < images.size(); ++i)
|
||||
{
|
||||
detector_->detect(gray_images[i], features[i].keypoints);
|
||||
extractor_->compute(gray_images[i], features[i].keypoints, features[i].descriptors);
|
||||
}
|
||||
}
|
||||
|
||||
class GpuSurfFeaturesFinder : public FeaturesFinder
|
||||
{
|
||||
public:
|
||||
inline GpuSurfFeaturesFinder()
|
||||
{
|
||||
surf.hessianThreshold = 500.0;
|
||||
surf.extended = false;
|
||||
}
|
||||
|
||||
protected:
|
||||
void find(const vector<Mat> &images, vector<ImageFeatures> &features);
|
||||
|
||||
private:
|
||||
SURF_GPU surf;
|
||||
};
|
||||
|
||||
void GpuSurfFeaturesFinder::find(const vector<Mat> &images, vector<ImageFeatures> &features)
|
||||
{
|
||||
// Make images gray
|
||||
vector<GpuMat> gray_images(images.size());
|
||||
for (size_t i = 0; i < images.size(); ++i)
|
||||
{
|
||||
CV_Assert(images[i].depth() == CV_8U);
|
||||
cvtColor(GpuMat(images[i]), gray_images[i], CV_BGR2GRAY);
|
||||
}
|
||||
|
||||
features.resize(images.size());
|
||||
|
||||
// Find keypoints in all images
|
||||
GpuMat d_keypoints;
|
||||
GpuMat d_descriptors;
|
||||
for (size_t i = 0; i < images.size(); ++i)
|
||||
{
|
||||
surf.nOctaves = 3;
|
||||
surf.nOctaveLayers = 4;
|
||||
surf(gray_images[i], GpuMat(), d_keypoints);
|
||||
|
||||
surf.nOctaves = 4;
|
||||
surf.nOctaveLayers = 2;
|
||||
surf(gray_images[i], GpuMat(), d_keypoints, d_descriptors, true);
|
||||
|
||||
surf.downloadKeypoints(d_keypoints, features[i].keypoints);
|
||||
d_descriptors.download(features[i].descriptors);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SurfFeaturesFinder::SurfFeaturesFinder(bool gpu_hint)
|
||||
{
|
||||
if (gpu_hint && getCudaEnabledDeviceCount() > 0)
|
||||
impl_ = new GpuSurfFeaturesFinder;
|
||||
else
|
||||
impl_ = new CpuSurfFeaturesFinder;
|
||||
}
|
||||
|
||||
|
||||
void SurfFeaturesFinder::find(const vector<Mat> &images, vector<ImageFeatures> &features)
|
||||
{
|
||||
(*impl_)(images, features);
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
MatchesInfo::MatchesInfo() : src_img_idx(-1), dst_img_idx(-1), num_inliers(0) {}
|
||||
|
||||
|
||||
MatchesInfo::MatchesInfo(const MatchesInfo &other)
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
|
||||
|
||||
const MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
|
||||
{
|
||||
src_img_idx = other.src_img_idx;
|
||||
dst_img_idx = other.dst_img_idx;
|
||||
matches = other.matches;
|
||||
num_inliers = other.num_inliers;
|
||||
H = other.H.clone();
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void FeaturesMatcher::operator ()(const vector<Mat> &images, const vector<ImageFeatures> &features,
|
||||
vector<MatchesInfo> &pairwise_matches)
|
||||
{
|
||||
pairwise_matches.resize(images.size() * images.size());
|
||||
for (size_t i = 0; i < images.size(); ++i)
|
||||
{
|
||||
LOGLN("Processing image " << i << "... ");
|
||||
for (size_t j = 0; j < images.size(); ++j)
|
||||
{
|
||||
if (i == j)
|
||||
continue;
|
||||
size_t pair_idx = i * images.size() + j;
|
||||
(*this)(images[i], features[i], images[j], features[j], pairwise_matches[pair_idx]);
|
||||
pairwise_matches[pair_idx].src_img_idx = i;
|
||||
pairwise_matches[pair_idx].dst_img_idx = j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace
|
||||
{
|
||||
class CpuMatcher : public FeaturesMatcher
|
||||
{
|
||||
public:
|
||||
inline CpuMatcher(float match_conf) : match_conf_(match_conf) {}
|
||||
|
||||
void match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info);
|
||||
|
||||
private:
|
||||
float match_conf_;
|
||||
};
|
||||
|
||||
void CpuMatcher::match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info)
|
||||
{
|
||||
matches_info.matches.clear();
|
||||
|
||||
BruteForceMatcher< L2<float> > matcher;
|
||||
vector< vector<DMatch> > pair_matches;
|
||||
|
||||
// Find 1->2 matches
|
||||
matcher.knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
matches_info.matches.push_back(m0);
|
||||
}
|
||||
|
||||
// Find 2->1 matches
|
||||
pair_matches.clear();
|
||||
matcher.knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
|
||||
}
|
||||
}
|
||||
|
||||
class GpuMatcher : public FeaturesMatcher
|
||||
{
|
||||
public:
|
||||
inline GpuMatcher(float match_conf) : match_conf_(match_conf) {}
|
||||
|
||||
void match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info);
|
||||
|
||||
private:
|
||||
float match_conf_;
|
||||
|
||||
GpuMat descriptors1_;
|
||||
GpuMat descriptors2_;
|
||||
|
||||
GpuMat trainIdx_, distance_, allDist_;
|
||||
};
|
||||
|
||||
void GpuMatcher::match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info)
|
||||
{
|
||||
matches_info.matches.clear();
|
||||
|
||||
BruteForceMatcher_GPU< L2<float> > matcher;
|
||||
|
||||
descriptors1_.upload(features1.descriptors);
|
||||
descriptors2_.upload(features2.descriptors);
|
||||
|
||||
vector< vector<DMatch> > pair_matches;
|
||||
|
||||
// Find 1->2 matches
|
||||
matcher.knnMatch(descriptors1_, descriptors2_, trainIdx_, distance_, allDist_, 2);
|
||||
matcher.knnMatchDownload(trainIdx_, distance_, pair_matches);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
|
||||
CV_Assert(m0.queryIdx < features1.keypoints.size());
|
||||
CV_Assert(m0.trainIdx < features2.keypoints.size());
|
||||
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
matches_info.matches.push_back(m0);
|
||||
}
|
||||
|
||||
// Find 2->1 matches
|
||||
pair_matches.clear();
|
||||
matcher.knnMatch(descriptors2_, descriptors1_, trainIdx_, distance_, allDist_, 2);
|
||||
matcher.knnMatchDownload(trainIdx_, distance_, pair_matches);
|
||||
for (size_t i = 0; i < pair_matches.size(); ++i)
|
||||
{
|
||||
if (pair_matches[i].size() < 2)
|
||||
continue;
|
||||
const DMatch& m0 = pair_matches[i][0];
|
||||
const DMatch& m1 = pair_matches[i][1];
|
||||
|
||||
CV_Assert(m0.trainIdx < features1.keypoints.size());
|
||||
CV_Assert(m0.queryIdx < features2.keypoints.size());
|
||||
|
||||
if (m0.distance < (1.f - match_conf_) * m1.distance)
|
||||
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BestOf2NearestMatcher::BestOf2NearestMatcher(bool gpu_hint, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
|
||||
{
|
||||
if (gpu_hint && getCudaEnabledDeviceCount() > 0)
|
||||
impl_ = new GpuMatcher(match_conf);
|
||||
else
|
||||
impl_ = new CpuMatcher(match_conf);
|
||||
|
||||
num_matches_thresh1_ = num_matches_thresh1;
|
||||
num_matches_thresh2_ = num_matches_thresh2;
|
||||
}
|
||||
|
||||
|
||||
void BestOf2NearestMatcher::match(const Mat &img1, const ImageFeatures &features1, const Mat &img2, const ImageFeatures &features2,
|
||||
MatchesInfo &matches_info)
|
||||
{
|
||||
(*impl_)(img1, features1, img2, features2, matches_info);
|
||||
|
||||
// Check if it makes sense to find homography
|
||||
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
|
||||
return;
|
||||
|
||||
// Construct point-point correspondences for homography estimation
|
||||
Mat src_points(1, matches_info.matches.size(), CV_32FC2);
|
||||
Mat dst_points(1, matches_info.matches.size(), CV_32FC2);
|
||||
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
||||
{
|
||||
const DMatch& m = matches_info.matches[i];
|
||||
|
||||
Point2f p = features1.keypoints[m.queryIdx].pt;
|
||||
p.x -= img1.cols * 0.5f;
|
||||
p.y -= img1.rows * 0.5f;
|
||||
src_points.at<Point2f>(0, i) = p;
|
||||
|
||||
p = features2.keypoints[m.trainIdx].pt;
|
||||
p.x -= img2.cols * 0.5f;
|
||||
p.y -= img2.rows * 0.5f;
|
||||
dst_points.at<Point2f>(0, i) = p;
|
||||
}
|
||||
|
||||
// Find pair-wise motion
|
||||
vector<uchar> inlier_mask;
|
||||
matches_info.H = findHomography(src_points, dst_points, inlier_mask, CV_RANSAC);
|
||||
|
||||
// Find number of inliers
|
||||
matches_info.num_inliers = 0;
|
||||
for (size_t i = 0; i < inlier_mask.size(); ++i)
|
||||
if (inlier_mask[i])
|
||||
matches_info.num_inliers++;
|
||||
|
||||
// Check if we should try to refine motion
|
||||
if (matches_info.num_inliers < num_matches_thresh2_)
|
||||
return;
|
||||
|
||||
// Construct point-point correspondences for inliers only
|
||||
src_points.create(1, matches_info.num_inliers, CV_32FC2);
|
||||
dst_points.create(1, matches_info.num_inliers, CV_32FC2);
|
||||
int inlier_idx = 0;
|
||||
for (size_t i = 0; i < matches_info.matches.size(); ++i)
|
||||
{
|
||||
if (!inlier_mask[i])
|
||||
continue;
|
||||
const DMatch& m = matches_info.matches[i];
|
||||
|
||||
Point2f p = features1.keypoints[m.queryIdx].pt;
|
||||
p.x -= img1.cols * 0.5f;
|
||||
p.y -= img2.rows * 0.5f;
|
||||
src_points.at<Point2f>(0, inlier_idx) = p;
|
||||
|
||||
p = features2.keypoints[m.trainIdx].pt;
|
||||
p.x -= img2.cols * 0.5f;
|
||||
p.y -= img2.rows * 0.5f;
|
||||
dst_points.at<Point2f>(0, inlier_idx) = p;
|
||||
|
||||
inlier_idx++;
|
||||
}
|
||||
|
||||
// Rerun motion estimation on inliers only
|
||||
matches_info.H = findHomography(src_points, dst_points, inlier_mask, CV_RANSAC);
|
||||
|
||||
// Find number of inliers
|
||||
matches_info.num_inliers = 0;
|
||||
for (size_t i = 0; i < inlier_mask.size(); ++i)
|
||||
if (inlier_mask[i])
|
||||
matches_info.num_inliers++;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
CameraParams::CameraParams() : focal(1), M(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
|
||||
|
||||
|
||||
CameraParams::CameraParams(const CameraParams &other)
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
|
||||
|
||||
const CameraParams& CameraParams::operator =(const CameraParams &other)
|
||||
{
|
||||
focal = other.focal;
|
||||
M = other.M.clone();
|
||||
t = other.t.clone();
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
struct IncDistance
|
||||
{
|
||||
IncDistance(vector<int> &dists) : dists(&dists[0]) {}
|
||||
void operator ()(const GraphEdge &edge) { dists[edge.to] = dists[edge.from] + 1; }
|
||||
int* dists;
|
||||
};
|
||||
|
||||
|
||||
struct CalcRotation
|
||||
{
|
||||
CalcRotation(int num_images, const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)
|
||||
: num_images(num_images), pairwise_matches(&pairwise_matches[0]), cameras(&cameras[0]) {}
|
||||
|
||||
void operator ()(const GraphEdge &edge)
|
||||
{
|
||||
int pair_idx = edge.from * num_images + edge.to;
|
||||
|
||||
double f_from = cameras[edge.from].focal;
|
||||
double f_to = cameras[edge.to].focal;
|
||||
|
||||
Mat K_from = Mat::eye(3, 3, CV_64F);
|
||||
K_from.at<double>(0, 0) = f_from;
|
||||
K_from.at<double>(1, 1) = f_from;
|
||||
|
||||
Mat K_to = Mat::eye(3, 3, CV_64F);
|
||||
K_to.at<double>(0, 0) = f_to;
|
||||
K_to.at<double>(1, 1) = f_to;
|
||||
|
||||
Mat R = K_from.inv() * pairwise_matches[pair_idx].H.inv() * K_to;
|
||||
cameras[edge.to].M = cameras[edge.from].M * R;
|
||||
}
|
||||
|
||||
int num_images;
|
||||
const MatchesInfo* pairwise_matches;
|
||||
CameraParams* cameras;
|
||||
};
|
||||
|
||||
|
||||
void HomographyBasedEstimator::estimate(const vector<Mat> &images, const vector<ImageFeatures> &features,
|
||||
const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)
|
||||
{
|
||||
const int num_images = static_cast<int>(images.size());
|
||||
|
||||
// Find focals from pair-wise homographies
|
||||
vector<bool> is_focal_estimated(num_images, false);
|
||||
vector<double> focals;
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
for (int j = 0; j < num_images; ++j)
|
||||
{
|
||||
int pair_idx = i * num_images + j;
|
||||
if (pairwise_matches[pair_idx].H.empty())
|
||||
continue;
|
||||
double f_to, f_from;
|
||||
bool f_to_ok, f_from_ok;
|
||||
focalsFromHomography(pairwise_matches[pair_idx].H.inv(), f_to, f_from, f_to_ok, f_from_ok);
|
||||
if (f_from_ok)
|
||||
focals.push_back(f_from);
|
||||
if (f_to_ok)
|
||||
focals.push_back(f_to);
|
||||
if (f_from_ok && f_to_ok)
|
||||
{
|
||||
is_focal_estimated[i] = true;
|
||||
is_focal_estimated[j] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
is_focals_estimated_ = true;
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
is_focals_estimated_ = is_focals_estimated_ && is_focal_estimated[i];
|
||||
|
||||
// Find focal medians and use them as true focal length
|
||||
nth_element(focals.begin(), focals.end(), focals.begin() + focals.size() / 2);
|
||||
cameras.resize(num_images);
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
cameras[i].focal = focals[focals.size() / 2];
|
||||
|
||||
// Restore global motion
|
||||
Graph span_tree;
|
||||
vector<int> span_tree_centers;
|
||||
findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);
|
||||
span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void BundleAdjuster::estimate(const vector<Mat> &images, const vector<ImageFeatures> &features,
|
||||
const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)
|
||||
{
|
||||
num_images_ = static_cast<int>(images.size());
|
||||
images_ = &images[0];
|
||||
features_ = &features[0];
|
||||
pairwise_matches_ = &pairwise_matches[0];
|
||||
|
||||
// Prepare focals and rotations
|
||||
cameras_.create(num_images_ * 4, 1, CV_64F);
|
||||
SVD svd;
|
||||
for (int i = 0; i < num_images_; ++i)
|
||||
{
|
||||
cameras_.at<double>(i * 4, 0) = cameras[i].focal;
|
||||
svd(cameras[i].M, SVD::FULL_UV);
|
||||
Mat R = svd.u * svd.vt;
|
||||
if (determinant(R) < 0) R *= -1;
|
||||
Mat rvec;
|
||||
Rodrigues(R, rvec); CV_Assert(rvec.type() == CV_32F);
|
||||
cameras_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);
|
||||
cameras_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);
|
||||
cameras_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);
|
||||
}
|
||||
|
||||
edges_.clear();
|
||||
for (int i = 0; i < num_images_ - 1; ++i)
|
||||
{
|
||||
for (int j = i + 1; j < num_images_; ++j)
|
||||
{
|
||||
int pair_idx = i * num_images_ + j;
|
||||
const MatchesInfo& mi = pairwise_matches_[pair_idx];
|
||||
float ni = static_cast<float>(mi.num_inliers);
|
||||
float nf = static_cast<float>(mi.matches.size());
|
||||
if (ni / (8.f + 0.3f * nf) > 1.f)
|
||||
edges_.push_back(make_pair(i, j));
|
||||
}
|
||||
}
|
||||
|
||||
total_num_matches_ = 0;
|
||||
for (size_t i = 0; i < edges_.size(); ++i)
|
||||
total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + edges_[i].second].matches.size());
|
||||
|
||||
CvLevMarq solver(num_images_ * 4, total_num_matches_ * 3,
|
||||
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 100, DBL_EPSILON));
|
||||
|
||||
CvMat matParams = cameras_;
|
||||
cvCopy(&matParams, solver.param);
|
||||
|
||||
int count = 0;
|
||||
double last_err = numeric_limits<double>::max();
|
||||
for(;;)
|
||||
{
|
||||
const CvMat* _param = 0;
|
||||
CvMat* _J = 0;
|
||||
CvMat* _err = 0;
|
||||
|
||||
bool proceed = solver.update( _param, _J, _err );
|
||||
|
||||
cvCopy( _param, &matParams );
|
||||
|
||||
if( !proceed || !_err )
|
||||
break;
|
||||
|
||||
if( _J )
|
||||
{
|
||||
calcJacobian();
|
||||
CvMat matJ = J_;
|
||||
cvCopy( &matJ, _J );
|
||||
}
|
||||
|
||||
if (_err)
|
||||
{
|
||||
calcError(err_);
|
||||
//LOGLN("Error: " << sqrt(err_.dot(err_)));
|
||||
count++;
|
||||
CvMat matErr = err_;
|
||||
cvCopy( &matErr, _err );
|
||||
}
|
||||
}
|
||||
LOGLN("BA final error: " << sqrt(err_.dot(err_)));
|
||||
LOGLN("BA iterations done: " << count);
|
||||
|
||||
// Obtain global motion
|
||||
for (int i = 0; i < num_images_; ++i)
|
||||
{
|
||||
cameras[i].focal = cameras_.at<double>(i * 4, 0);
|
||||
Mat rvec(3, 1, CV_64F);
|
||||
rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);
|
||||
rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);
|
||||
rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);
|
||||
Rodrigues(rvec, cameras[i].M);
|
||||
Mat Mf;
|
||||
cameras[i].M.convertTo(Mf, CV_32F);
|
||||
cameras[i].M = Mf;
|
||||
}
|
||||
|
||||
// Normalize motion to center image
|
||||
Graph span_tree;
|
||||
vector<int> span_tree_centers;
|
||||
findMaxSpanningTree(num_images_, pairwise_matches, span_tree, span_tree_centers);
|
||||
Mat R_inv = cameras[span_tree_centers[0]].M.inv();
|
||||
for (int i = 0; i < num_images_; ++i)
|
||||
cameras[i].M = R_inv * cameras[i].M;
|
||||
}
|
||||
|
||||
|
||||
void BundleAdjuster::calcError(Mat &err)
|
||||
{
|
||||
err.create(total_num_matches_ * 3, 1, CV_64F);
|
||||
|
||||
int match_idx = 0;
|
||||
for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
|
||||
{
|
||||
int i = edges_[edge_idx].first;
|
||||
int j = edges_[edge_idx].second;
|
||||
float f1 = static_cast<float>(cameras_.at<double>(i * 4, 0));
|
||||
float f2 = static_cast<float>(cameras_.at<double>(j * 4, 0));
|
||||
double R1[9], R2[9];
|
||||
Mat R1_(3, 3, CV_64F, R1), R2_(3, 3, CV_64F, R2);
|
||||
Mat rvec(3, 1, CV_64F);
|
||||
rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);
|
||||
rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);
|
||||
rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);
|
||||
Rodrigues(rvec, R1_); CV_Assert(R1_.type() == CV_64F);
|
||||
rvec.at<double>(0, 0) = cameras_.at<double>(j * 4 + 1, 0);
|
||||
rvec.at<double>(1, 0) = cameras_.at<double>(j * 4 + 2, 0);
|
||||
rvec.at<double>(2, 0) = cameras_.at<double>(j * 4 + 3, 0);
|
||||
Rodrigues(rvec, R2_); CV_Assert(R2_.type() == CV_64F);
|
||||
|
||||
const ImageFeatures& features1 = features_[i];
|
||||
const ImageFeatures& features2 = features_[j];
|
||||
const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
|
||||
|
||||
for (size_t k = 0; k < matches_info.matches.size(); ++k)
|
||||
{
|
||||
const DMatch& m = matches_info.matches[k];
|
||||
|
||||
Point2f kp1 = features1.keypoints[m.queryIdx].pt;
|
||||
kp1.x -= 0.5f * images_[i].cols;
|
||||
kp1.y -= 0.5f * images_[i].rows;
|
||||
Point2f kp2 = features2.keypoints[m.trainIdx].pt;
|
||||
kp2.x -= 0.5f * images_[j].cols;
|
||||
kp2.y -= 0.5f * images_[j].rows;
|
||||
float len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);
|
||||
float len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);
|
||||
Point3f p1(kp1.x / len1, kp1.y / len1, f1 / len1);
|
||||
Point3f p2(kp2.x / len2, kp2.y / len2, f2 / len2);
|
||||
|
||||
Point3f d1(p1.x * R1[0] + p1.y * R1[1] + p1.z * R1[2],
|
||||
p1.x * R1[3] + p1.y * R1[4] + p1.z * R1[5],
|
||||
p1.x * R1[6] + p1.y * R1[7] + p1.z * R1[8]);
|
||||
Point3f d2(p2.x * R2[0] + p2.y * R2[1] + p2.z * R2[2],
|
||||
p2.x * R2[3] + p2.y * R2[4] + p2.z * R2[5],
|
||||
p2.x * R2[6] + p2.y * R2[7] + p2.z * R2[8]);
|
||||
|
||||
float mult = 1.f;
|
||||
if (cost_space_ == FOCAL_RAY_SPACE)
|
||||
mult = sqrt(f1 * f2);
|
||||
err.at<double>(3 * match_idx, 0) = mult * (d1.x - d2.x);
|
||||
err.at<double>(3 * match_idx + 1, 0) = mult * (d1.y - d2.y);
|
||||
err.at<double>(3 * match_idx + 2, 0) = mult * (d1.z - d2.z);
|
||||
match_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void calcDeriv(const Mat &err1, const Mat &err2, double h, Mat res)
|
||||
{
|
||||
for (int i = 0; i < err1.rows; ++i)
|
||||
res.at<double>(i, 0) = (err2.at<double>(i, 0) - err1.at<double>(i, 0)) / h;
|
||||
}
|
||||
|
||||
|
||||
void BundleAdjuster::calcJacobian()
|
||||
{
|
||||
J_.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);
|
||||
|
||||
double f, r;
|
||||
const double df = 0.001; // Focal length step
|
||||
const double dr = 0.001; // Angle step
|
||||
|
||||
for (int i = 0; i < num_images_; ++i)
|
||||
{
|
||||
f = cameras_.at<double>(i * 4, 0);
|
||||
cameras_.at<double>(i * 4, 0) = f - df;
|
||||
calcError(err1_);
|
||||
cameras_.at<double>(i * 4, 0) = f + df;
|
||||
calcError(err2_);
|
||||
calcDeriv(err1_, err2_, 2 * df, J_.col(i * 4));
|
||||
cameras_.at<double>(i * 4, 0) = f;
|
||||
|
||||
r = cameras_.at<double>(i * 4 + 1, 0);
|
||||
cameras_.at<double>(i * 4 + 1, 0) = r - dr;
|
||||
calcError(err1_);
|
||||
cameras_.at<double>(i * 4 + 1, 0) = r + dr;
|
||||
calcError(err2_);
|
||||
calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 1));
|
||||
cameras_.at<double>(i * 4 + 1, 0) = r;
|
||||
|
||||
r = cameras_.at<double>(i * 4 + 2, 0);
|
||||
cameras_.at<double>(i * 4 + 2, 0) = r - dr;
|
||||
calcError(err1_);
|
||||
cameras_.at<double>(i * 4 + 2, 0) = r + dr;
|
||||
calcError(err2_);
|
||||
calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 2));
|
||||
cameras_.at<double>(i * 4 + 2, 0) = r;
|
||||
|
||||
r = cameras_.at<double>(i * 4 + 3, 0);
|
||||
cameras_.at<double>(i * 4 + 3, 0) = r - dr;
|
||||
calcError(err1_);
|
||||
cameras_.at<double>(i * 4 + 3, 0) = r + dr;
|
||||
calcError(err2_);
|
||||
calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 3));
|
||||
cameras_.at<double>(i * 4 + 3, 0) = r;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void findMaxSpanningTree(int num_images, const vector<MatchesInfo> &pairwise_matches,
|
||||
Graph &span_tree, vector<int> ¢ers)
|
||||
{
|
||||
Graph graph(num_images);
|
||||
vector<GraphEdge> edges;
|
||||
|
||||
// Construct images graph and remember its edges
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
{
|
||||
for (int j = 0; j < num_images; ++j)
|
||||
{
|
||||
float conf = static_cast<float>(pairwise_matches[i * num_images + j].num_inliers);
|
||||
graph.addEdge(i, j, conf);
|
||||
edges.push_back(GraphEdge(i, j, conf));
|
||||
}
|
||||
}
|
||||
|
||||
DjSets comps(num_images);
|
||||
span_tree.create(num_images);
|
||||
vector<int> span_tree_powers(num_images, 0);
|
||||
|
||||
// Find maximum spanning tree
|
||||
sort(edges.begin(), edges.end(), greater<GraphEdge>());
|
||||
for (size_t i = 0; i < edges.size(); ++i)
|
||||
{
|
||||
int comp1 = comps.find(edges[i].from);
|
||||
int comp2 = comps.find(edges[i].to);
|
||||
if (comp1 != comp2)
|
||||
{
|
||||
comps.merge(comp1, comp2);
|
||||
span_tree.addEdge(edges[i].from, edges[i].to, edges[i].weight);
|
||||
span_tree.addEdge(edges[i].to, edges[i].from, edges[i].weight);
|
||||
span_tree_powers[edges[i].from]++;
|
||||
span_tree_powers[edges[i].to]++;
|
||||
}
|
||||
}
|
||||
|
||||
// Find spanning tree leafs
|
||||
vector<int> span_tree_leafs;
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
if (span_tree_powers[i] == 1)
|
||||
span_tree_leafs.push_back(i);
|
||||
|
||||
// Find maximum distance from each spanning tree vertex
|
||||
vector<int> max_dists(num_images, 0);
|
||||
vector<int> cur_dists;
|
||||
for (size_t i = 0; i < span_tree_leafs.size(); ++i)
|
||||
{
|
||||
cur_dists.assign(num_images, 0);
|
||||
span_tree.walkBreadthFirst(span_tree_leafs[i], IncDistance(cur_dists));
|
||||
for (int j = 0; j < num_images; ++j)
|
||||
max_dists[j] = max(max_dists[j], cur_dists[j]);
|
||||
}
|
||||
|
||||
// Find min-max distance
|
||||
int min_max_dist = max_dists[0];
|
||||
for (int i = 1; i < num_images; ++i)
|
||||
if (min_max_dist > max_dists[i])
|
||||
min_max_dist = max_dists[i];
|
||||
|
||||
// Find spanning tree centers
|
||||
centers.clear();
|
||||
for (int i = 0; i < num_images; ++i)
|
||||
if (max_dists[i] == min_max_dist)
|
||||
centers.push_back(i);
|
||||
CV_Assert(centers.size() > 0 && centers.size() <= 2);
|
||||
}
|
157
modules/stitching/motion_estimators.hpp
Normal file
157
modules/stitching/motion_estimators.hpp
Normal file
@ -0,0 +1,157 @@
|
||||
#ifndef _OPENCV_MOTION_ESTIMATORS_HPP_
|
||||
#define _OPENCV_MOTION_ESTIMATORS_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/features2d/features2d.hpp>
|
||||
#include "util.hpp"
|
||||
|
||||
struct ImageFeatures
|
||||
{
|
||||
std::vector<cv::KeyPoint> keypoints;
|
||||
cv::Mat descriptors;
|
||||
};
|
||||
|
||||
|
||||
class FeaturesFinder
|
||||
{
|
||||
public:
|
||||
virtual ~FeaturesFinder() {}
|
||||
void operator ()(const std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features) { find(images, features); }
|
||||
|
||||
protected:
|
||||
virtual void find(const std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features) = 0;
|
||||
};
|
||||
|
||||
|
||||
class SurfFeaturesFinder : public FeaturesFinder
|
||||
{
|
||||
public:
|
||||
explicit SurfFeaturesFinder(bool gpu_hint = true);
|
||||
|
||||
protected:
|
||||
void find(const std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features);
|
||||
|
||||
cv::Ptr<FeaturesFinder> impl_;
|
||||
};
|
||||
|
||||
|
||||
struct MatchesInfo
|
||||
{
|
||||
MatchesInfo();
|
||||
MatchesInfo(const MatchesInfo &other);
|
||||
const MatchesInfo& operator =(const MatchesInfo &other);
|
||||
|
||||
int src_img_idx, dst_img_idx; // Optional images indices
|
||||
std::vector<cv::DMatch> matches;
|
||||
int num_inliers; // Number of geometrically consistent matches
|
||||
cv::Mat H; // Homography
|
||||
};
|
||||
|
||||
|
||||
class FeaturesMatcher
|
||||
{
|
||||
public:
|
||||
virtual ~FeaturesMatcher() {}
|
||||
void operator ()(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2,
|
||||
MatchesInfo& matches_info) { match(img1, features1, img2, features2, matches_info); }
|
||||
void operator ()(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
|
||||
std::vector<MatchesInfo> &pairwise_matches);
|
||||
|
||||
protected:
|
||||
virtual void match(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2,
|
||||
MatchesInfo& matches_info) = 0;
|
||||
};
|
||||
|
||||
|
||||
class BestOf2NearestMatcher : public FeaturesMatcher
|
||||
{
|
||||
public:
|
||||
explicit BestOf2NearestMatcher(bool gpu_hint = true, float match_conf = 0.55f, int num_matches_thresh1 = 5, int num_matches_thresh2 = 5);
|
||||
|
||||
protected:
|
||||
void match(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2,
|
||||
MatchesInfo &matches_info);
|
||||
|
||||
int num_matches_thresh1_;
|
||||
int num_matches_thresh2_;
|
||||
|
||||
cv::Ptr<FeaturesMatcher> impl_;
|
||||
};
|
||||
|
||||
|
||||
struct CameraParams
|
||||
{
|
||||
CameraParams();
|
||||
CameraParams(const CameraParams& other);
|
||||
const CameraParams& operator =(const CameraParams& other);
|
||||
|
||||
double focal;
|
||||
cv::Mat M, t;
|
||||
};
|
||||
|
||||
|
||||
class Estimator
|
||||
{
|
||||
public:
|
||||
void operator ()(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
|
||||
const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras)
|
||||
{
|
||||
estimate(images, features, pairwise_matches, cameras);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void estimate(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
|
||||
const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras) = 0;
|
||||
};
|
||||
|
||||
|
||||
class HomographyBasedEstimator : public Estimator
|
||||
{
|
||||
public:
|
||||
HomographyBasedEstimator() : is_focals_estimated_(false) {}
|
||||
bool isFocalsEstimated() const { return is_focals_estimated_; }
|
||||
|
||||
private:
|
||||
void estimate(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
|
||||
const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras);
|
||||
|
||||
bool is_focals_estimated_;
|
||||
};
|
||||
|
||||
|
||||
class BundleAdjuster : public Estimator
|
||||
{
|
||||
public:
|
||||
enum { RAY_SPACE, FOCAL_RAY_SPACE };
|
||||
|
||||
BundleAdjuster(int cost_space = FOCAL_RAY_SPACE) : cost_space_(cost_space) {}
|
||||
|
||||
private:
|
||||
void estimate(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
|
||||
const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras);
|
||||
|
||||
void calcError(cv::Mat &err);
|
||||
void calcJacobian();
|
||||
|
||||
int num_images_;
|
||||
int total_num_matches_;
|
||||
const cv::Mat *images_;
|
||||
const ImageFeatures *features_;
|
||||
const MatchesInfo *pairwise_matches_;
|
||||
cv::Mat cameras_;
|
||||
std::vector<std::pair<int,int> > edges_;
|
||||
|
||||
int cost_space_;
|
||||
cv::Mat err_, err1_, err2_;
|
||||
cv::Mat J_;
|
||||
};
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Auxiliary functions
|
||||
|
||||
void findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches,
|
||||
Graph &span_tree, std::vector<int> ¢ers);
|
||||
|
||||
#endif // _OPENCV_MOTION_ESTIMATORS_HPP_
|
258
modules/stitching/seam_finders.cpp
Normal file
258
modules/stitching/seam_finders.cpp
Normal file
@ -0,0 +1,258 @@
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
#include <gcgraph.hpp>
|
||||
#include "seam_finders.hpp"
|
||||
#include "util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
|
||||
Ptr<SeamFinder> SeamFinder::createDefault(int type)
|
||||
{
|
||||
if (type == NO)
|
||||
return new NoSeamFinder();
|
||||
if (type == VORONOI)
|
||||
return new VoronoiSeamFinder();
|
||||
if (type == GRAPH_CUT)
|
||||
return new GraphCutSeamFinder();
|
||||
CV_Error(CV_StsBadArg, "unsupported seam finding method");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void SeamFinder::operator ()(const vector<Mat> &src, const vector<Point> &corners,
|
||||
vector<Mat> &masks)
|
||||
{
|
||||
find(src, corners, masks);
|
||||
}
|
||||
|
||||
|
||||
void PairwiseSeamFinder::find(const vector<Mat> &src, const vector<Point> &corners,
|
||||
vector<Mat> &masks)
|
||||
{
|
||||
if (src.size() == 0)
|
||||
return;
|
||||
|
||||
for (size_t i = 0; i < src.size() - 1; ++i)
|
||||
{
|
||||
for (size_t j = i + 1; j < src.size(); ++j)
|
||||
{
|
||||
int x_min = max(corners[i].x, corners[j].x);
|
||||
int x_max = min(corners[i].x + src[i].cols - 1, corners[j].x + src[j].cols - 1);
|
||||
int y_min = max(corners[i].y, corners[j].y);
|
||||
int y_max = min(corners[i].y + src[i].rows - 1, corners[j].y + src[j].rows - 1);
|
||||
|
||||
if (x_max >= x_min && y_max >= y_min)
|
||||
findInPair(src[i], src[j], corners[i], corners[j],
|
||||
Rect(x_min, y_min, x_max - x_min + 1, y_max - y_min + 1),
|
||||
masks[i], masks[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void VoronoiSeamFinder::findInPair(const Mat &img1, const Mat &img2, Point tl1, Point tl2,
|
||||
Rect roi, Mat &mask1, Mat &mask2)
|
||||
{
|
||||
const int gap = 10;
|
||||
|
||||
Mat submask1(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
|
||||
Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
|
||||
|
||||
// Cut submasks with some gap
|
||||
for (int y = -gap; y < roi.height + gap; ++y)
|
||||
{
|
||||
for (int x = -gap; x < roi.width + gap; ++x)
|
||||
{
|
||||
int y1 = roi.y - tl1.y + y;
|
||||
int x1 = roi.x - tl1.x + x;
|
||||
if (y1 >= 0 && x1 >= 0 && y1 < img1.rows && x1 < img1.cols)
|
||||
submask1.at<uchar>(y + gap, x + gap) = mask1.at<uchar>(y1, x1);
|
||||
else
|
||||
submask1.at<uchar>(y + gap, x + gap) = 0;
|
||||
|
||||
int y2 = roi.y - tl2.y + y;
|
||||
int x2 = roi.x - tl2.x + x;
|
||||
if (y2 >= 0 && x2 >= 0 && y2 < img2.rows && x2 < img2.cols)
|
||||
submask2.at<uchar>(y + gap, x + gap) = mask2.at<uchar>(y2, x2);
|
||||
else
|
||||
submask2.at<uchar>(y + gap, x + gap) = 0;
|
||||
}
|
||||
}
|
||||
|
||||
Mat collision = (submask1 != 0) & (submask2 != 0);
|
||||
Mat unique1 = submask1.clone(); unique1.setTo(0, collision);
|
||||
Mat unique2 = submask2.clone(); unique2.setTo(0, collision);
|
||||
|
||||
Mat dist1, dist2;
|
||||
distanceTransform(unique1 == 0, dist1, CV_DIST_L1, 3);
|
||||
distanceTransform(unique2 == 0, dist2, CV_DIST_L1, 3);
|
||||
|
||||
Mat seam = dist1 < dist2;
|
||||
|
||||
for (int y = 0; y < roi.height; ++y)
|
||||
{
|
||||
for (int x = 0; x < roi.width; ++x)
|
||||
{
|
||||
if (seam.at<uchar>(y + gap, x + gap))
|
||||
mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x) = 0;
|
||||
else
|
||||
mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class GraphCutSeamFinder::Impl
|
||||
{
|
||||
public:
|
||||
Impl(int cost_type, float terminal_cost, float bad_region_penalty)
|
||||
: cost_type_(cost_type), terminal_cost_(terminal_cost), bad_region_penalty_(bad_region_penalty) {}
|
||||
|
||||
void findInPair(const Mat &img1, const Mat &img2, Point tl1, Point tl2,
|
||||
Rect roi, Mat &mask1, Mat &mask2);
|
||||
|
||||
private:
|
||||
void setGraphWeightsColor(const Mat &img1, const Mat &img2, const Mat &mask1, const Mat &mask2,
|
||||
GCGraph<float> &graph);
|
||||
|
||||
int cost_type_;
|
||||
float terminal_cost_;
|
||||
float bad_region_penalty_;
|
||||
};
|
||||
|
||||
|
||||
void GraphCutSeamFinder::Impl::setGraphWeightsColor(const Mat &img1, const Mat &img2, const Mat &mask1, const Mat &mask2,
|
||||
GCGraph<float> &graph)
|
||||
{
|
||||
const Size img_size = img1.size();
|
||||
|
||||
// Set terminal weights
|
||||
for (int y = 0; y < img_size.height; ++y)
|
||||
{
|
||||
for (int x = 0; x < img_size.width; ++x)
|
||||
{
|
||||
int v = graph.addVtx();
|
||||
graph.addTermWeights(v, mask1.at<uchar>(y, x) ? terminal_cost_ : 0.f,
|
||||
mask2.at<uchar>(y, x) ? terminal_cost_ : 0.f);
|
||||
}
|
||||
}
|
||||
|
||||
const float weight_eps = 1e-3f;
|
||||
|
||||
// Set regular edge weights
|
||||
for (int y = 0; y < img_size.height; ++y)
|
||||
{
|
||||
for (int x = 0; x < img_size.width; ++x)
|
||||
{
|
||||
int v = y * img_size.width + x;
|
||||
if (x < img_size.width - 1)
|
||||
{
|
||||
float weight = normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +
|
||||
normL2(img1.at<Point3f>(y, x + 1), img2.at<Point3f>(y, x + 1)) +
|
||||
weight_eps;
|
||||
|
||||
if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y, x + 1) ||
|
||||
!mask2.at<uchar>(y, x) || !mask2.at<uchar>(y, x + 1))
|
||||
weight += bad_region_penalty_;
|
||||
|
||||
graph.addEdges(v, v + 1, weight, weight);
|
||||
}
|
||||
if (y < img_size.height - 1)
|
||||
{
|
||||
float weight = normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +
|
||||
normL2(img1.at<Point3f>(y + 1, x), img2.at<Point3f>(y + 1, x)) +
|
||||
weight_eps;
|
||||
|
||||
if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y + 1, x) ||
|
||||
!mask2.at<uchar>(y, x) || !mask2.at<uchar>(y + 1, x))
|
||||
weight += bad_region_penalty_;
|
||||
|
||||
graph.addEdges(v, v + img_size.width, weight, weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GraphCutSeamFinder::Impl::findInPair(const Mat &img1, const Mat &img2, Point tl1, Point tl2,
|
||||
Rect roi, Mat &mask1, Mat &mask2)
|
||||
{
|
||||
const int gap = 10;
|
||||
|
||||
Mat subimg1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32FC3);
|
||||
Mat subimg2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32FC3);
|
||||
Mat submask1(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
|
||||
Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
|
||||
|
||||
// Cut subimages and submasks with some gap
|
||||
for (int y = -gap; y < roi.height + gap; ++y)
|
||||
{
|
||||
for (int x = -gap; x < roi.width + gap; ++x)
|
||||
{
|
||||
int y1 = roi.y - tl1.y + y;
|
||||
int x1 = roi.x - tl1.x + x;
|
||||
if (y1 >= 0 && x1 >= 0 && y1 < img1.rows && x1 < img1.cols)
|
||||
{
|
||||
subimg1.at<Point3f>(y + gap, x + gap) = img1.at<Point3f>(y1, x1);
|
||||
submask1.at<uchar>(y + gap, x + gap) = mask1.at<uchar>(y1, x1);
|
||||
}
|
||||
else
|
||||
{
|
||||
subimg1.at<Point3f>(y + gap, x + gap) = Point3f(0, 0, 0);
|
||||
submask1.at<uchar>(y + gap, x + gap) = 0;
|
||||
}
|
||||
|
||||
int y2 = roi.y - tl2.y + y;
|
||||
int x2 = roi.x - tl2.x + x;
|
||||
if (y2 >= 0 && x2 >= 0 && y2 < img2.rows && x2 < img2.cols)
|
||||
{
|
||||
subimg2.at<Point3f>(y + gap, x + gap) = img2.at<Point3f>(y2, x2);
|
||||
submask2.at<uchar>(y + gap, x + gap) = mask2.at<uchar>(y2, x2);
|
||||
}
|
||||
else
|
||||
{
|
||||
subimg2.at<Point3f>(y + gap, x + gap) = Point3f(0, 0, 0);
|
||||
submask2.at<uchar>(y + gap, x + gap) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const int vertex_count = (roi.height + 2 * gap) * (roi.width + 2 * gap);
|
||||
const int edge_count = (roi.height - 1 + 2 * gap) * (roi.width + 2 * gap) +
|
||||
(roi.width - 1 + 2 * gap) * (roi.height + 2 * gap);
|
||||
GCGraph<float> graph(vertex_count, edge_count);
|
||||
|
||||
switch (cost_type_)
|
||||
{
|
||||
case GraphCutSeamFinder::COST_COLOR:
|
||||
setGraphWeightsColor(subimg1, subimg2, submask1, submask2, graph);
|
||||
break;
|
||||
default:
|
||||
CV_Error(CV_StsBadArg, "unsupported pixel similarity measure");
|
||||
}
|
||||
|
||||
graph.maxFlow();
|
||||
|
||||
for (int y = 0; y < roi.height; ++y)
|
||||
{
|
||||
for (int x = 0; x < roi.width; ++x)
|
||||
{
|
||||
if (graph.inSourceSegment((y + gap) * (roi.width + 2 * gap) + x + gap))
|
||||
mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x) = 0;
|
||||
else
|
||||
mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x) = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GraphCutSeamFinder::GraphCutSeamFinder(int cost_type, float terminal_cost, float bad_region_penalty)
|
||||
: impl_(new Impl(cost_type, terminal_cost, bad_region_penalty)) {}
|
||||
|
||||
|
||||
void GraphCutSeamFinder::findInPair(const Mat &img1, const Mat &img2, Point tl1, Point tl2,
|
||||
Rect roi, Mat &mask1, Mat &mask2)
|
||||
{
|
||||
impl_->findInPair(img1, img2, tl1, tl2, roi, mask1, mask2);
|
||||
}
|
68
modules/stitching/seam_finders.hpp
Normal file
68
modules/stitching/seam_finders.hpp
Normal file
@ -0,0 +1,68 @@
|
||||
#ifndef _OPENCV_SEAM_FINDERS_HPP_
|
||||
#define _OPENCV_SEAM_FINDERS_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <opencv2/core/core.hpp>
|
||||
|
||||
class SeamFinder
|
||||
{
|
||||
public:
|
||||
enum { NO, VORONOI, GRAPH_CUT };
|
||||
|
||||
static cv::Ptr<SeamFinder> createDefault(int type);
|
||||
|
||||
virtual ~SeamFinder() {}
|
||||
|
||||
void operator ()(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners,
|
||||
std::vector<cv::Mat> &masks);
|
||||
|
||||
protected:
|
||||
virtual void find(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners,
|
||||
std::vector<cv::Mat> &masks) = 0;
|
||||
};
|
||||
|
||||
|
||||
class NoSeamFinder : public SeamFinder
|
||||
{
|
||||
protected:
|
||||
void find(const std::vector<cv::Mat>&, const std::vector<cv::Point>&, std::vector<cv::Mat>&) {}
|
||||
};
|
||||
|
||||
|
||||
class PairwiseSeamFinder : public SeamFinder
|
||||
{
|
||||
protected:
|
||||
void find(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners,
|
||||
std::vector<cv::Mat> &masks);
|
||||
|
||||
virtual void findInPair(const cv::Mat &img1, const cv::Mat &img2, cv::Point tl1, cv::Point tl2,
|
||||
cv::Rect roi, cv::Mat &mask1, cv::Mat &mask2) = 0;
|
||||
};
|
||||
|
||||
|
||||
class VoronoiSeamFinder : public PairwiseSeamFinder
|
||||
{
|
||||
private:
|
||||
void findInPair(const cv::Mat &img1, const cv::Mat &img2, cv::Point tl1, cv::Point tl2,
|
||||
cv::Rect roi, cv::Mat &mask1, cv::Mat &mask2);
|
||||
};
|
||||
|
||||
|
||||
class GraphCutSeamFinder : public PairwiseSeamFinder
|
||||
{
|
||||
public:
|
||||
// TODO add COST_COLOR_GRAD support
|
||||
enum { COST_COLOR/*, COST_COLOR_GRAD*/ };
|
||||
|
||||
GraphCutSeamFinder(int cost_type = COST_COLOR, float terminal_cost = 10000.f,
|
||||
float bad_region_penalty = 1000.f);
|
||||
|
||||
private:
|
||||
void findInPair(const cv::Mat &img1, const cv::Mat &img2, cv::Point tl1, cv::Point tl2,
|
||||
cv::Rect roi, cv::Mat &mask1, cv::Mat &mask2);
|
||||
|
||||
class Impl;
|
||||
cv::Ptr<Impl> impl_;
|
||||
};
|
||||
|
||||
#endif // _OPENCV_SEAM_FINDERS_HPP_
|
50
modules/stitching/util.cpp
Normal file
50
modules/stitching/util.cpp
Normal file
@ -0,0 +1,50 @@
|
||||
#include "util.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void DjSets::create(int n) {
|
||||
rank_.assign(n, 0);
|
||||
size.assign(n, 1);
|
||||
parent.resize(n);
|
||||
for (int i = 0; i < n; ++i)
|
||||
parent[i] = i;
|
||||
}
|
||||
|
||||
|
||||
int DjSets::find(int elem) {
|
||||
int set = elem;
|
||||
while (set != parent[set])
|
||||
set = parent[set];
|
||||
int next;
|
||||
while (elem != parent[elem]) {
|
||||
next = parent[elem];
|
||||
parent[elem] = set;
|
||||
elem = next;
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
|
||||
int DjSets::merge(int set1, int set2) {
|
||||
if (rank_[set1] < rank_[set2]) {
|
||||
parent[set1] = set2;
|
||||
size[set2] += size[set1];
|
||||
return set2;
|
||||
}
|
||||
if (rank_[set2] < rank_[set1]) {
|
||||
parent[set2] = set1;
|
||||
size[set1] += size[set2];
|
||||
return set1;
|
||||
}
|
||||
parent[set1] = set2;
|
||||
rank_[set2]++;
|
||||
size[set2] += size[set1];
|
||||
return set2;
|
||||
}
|
||||
|
||||
|
||||
void Graph::addEdge(int from, int to, float weight)
|
||||
{
|
||||
edges_[from].push_back(GraphEdge(from, to, weight));
|
||||
}
|
72
modules/stitching/util.hpp
Normal file
72
modules/stitching/util.hpp
Normal file
@ -0,0 +1,72 @@
|
||||
#ifndef _OPENCV_STITCHING_UTIL_HPP_
|
||||
#define _OPENCV_STITCHING_UTIL_HPP_
|
||||
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <opencv2/core/core.hpp>
|
||||
|
||||
#define ENABLE_LOG 1
|
||||
|
||||
#if ENABLE_LOG
|
||||
#include <iostream>
|
||||
#define LOG(msg) std::cout << msg;
|
||||
#else
|
||||
#define LOG(msg)
|
||||
#endif
|
||||
|
||||
#define LOGLN(msg) LOG(msg << std::endl)
|
||||
|
||||
|
||||
class DjSets
|
||||
{
|
||||
public:
|
||||
DjSets(int n = 0) { create(n); }
|
||||
|
||||
void create(int n);
|
||||
int find(int elem);
|
||||
int merge(int set1, int set2);
|
||||
|
||||
std::vector<int> parent;
|
||||
std::vector<int> size;
|
||||
|
||||
private:
|
||||
std::vector<int> rank_;
|
||||
};
|
||||
|
||||
|
||||
struct GraphEdge
|
||||
{
|
||||
GraphEdge(int from, int to, float weight)
|
||||
: from(from), to(to), weight(weight) {}
|
||||
bool operator <(const GraphEdge& other) const { return weight < other.weight; }
|
||||
bool operator >(const GraphEdge& other) const { return weight > other.weight; }
|
||||
|
||||
int from, to;
|
||||
float weight;
|
||||
};
|
||||
|
||||
|
||||
class Graph
|
||||
{
|
||||
public:
|
||||
Graph(int num_vertices = 0) { create(num_vertices); }
|
||||
|
||||
void create(int num_vertices) { edges_.assign(num_vertices, std::list<GraphEdge>()); }
|
||||
|
||||
int numVertices() const { return static_cast<int>(edges_.size()); }
|
||||
|
||||
void addEdge(int from, int to, float weight);
|
||||
|
||||
template <typename B>
|
||||
B forEach(B body) const;
|
||||
|
||||
template <typename B>
|
||||
B walkBreadthFirst(int from, B body) const;
|
||||
|
||||
private:
|
||||
std::vector< std::list<GraphEdge> > edges_;
|
||||
};
|
||||
|
||||
#include "util_inl.hpp"
|
||||
|
||||
#endif // _OPENCV_STITCHING_UTIL_HPP_
|
74
modules/stitching/util_inl.hpp
Normal file
74
modules/stitching/util_inl.hpp
Normal file
@ -0,0 +1,74 @@
|
||||
#ifndef _OPENCV_STITCHING_UTIL_INL_HPP_
|
||||
#define _OPENCV_STITCHING_UTIL_INL_HPP_
|
||||
|
||||
#include <queue>
|
||||
#include "util.hpp" // Make your IDE see declarations
|
||||
|
||||
template <typename B>
|
||||
B Graph::forEach(B body) const
|
||||
{
|
||||
for (int i = 0; i < numVertices(); ++i)
|
||||
{
|
||||
std::list<GraphEdge>::const_iterator edge = edges_[i].begin();
|
||||
for (; edge != edges_[i].end(); ++edge)
|
||||
body(*edge);
|
||||
}
|
||||
return body;
|
||||
}
|
||||
|
||||
|
||||
template <typename B>
|
||||
B Graph::walkBreadthFirst(int from, B body) const
|
||||
{
|
||||
std::vector<bool> was(numVertices(), false);
|
||||
std::queue<int> vertices;
|
||||
|
||||
was[from] = true;
|
||||
vertices.push(from);
|
||||
|
||||
while (!vertices.empty())
|
||||
{
|
||||
int vertex = vertices.front();
|
||||
vertices.pop();
|
||||
|
||||
std::list<GraphEdge>::const_iterator edge = edges_[vertex].begin();
|
||||
for (; edge != edges_[vertex].end(); ++edge)
|
||||
{
|
||||
if (!was[edge->to])
|
||||
{
|
||||
body(*edge);
|
||||
was[edge->to] = true;
|
||||
vertices.push(edge->to);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return body;
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Some auxiliary math functions
|
||||
|
||||
static inline
|
||||
float normL2(const cv::Point3f& a, const cv::Point3f& b)
|
||||
{
|
||||
return (a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y) + (a.z - b.z) * (a.z - b.z);
|
||||
}
|
||||
|
||||
|
||||
static inline
|
||||
double normL2sq(const cv::Mat &m)
|
||||
{
|
||||
return m.dot(m);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline
|
||||
T sqr(T x)
|
||||
{
|
||||
return x * x;
|
||||
}
|
||||
|
||||
#endif // _OPENCV_STITCHING_UTIL_INL_HPP_
|
114
modules/stitching/warpers.cpp
Normal file
114
modules/stitching/warpers.cpp
Normal file
@ -0,0 +1,114 @@
|
||||
#include "warpers.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
Ptr<Warper> Warper::createByCameraFocal(int focal, int type)
|
||||
{
|
||||
if (type == PLANE)
|
||||
return new PlaneWarper(focal);
|
||||
if (type == CYLINDRICAL)
|
||||
return new CylindricalWarper(focal);
|
||||
if (type == SPHERICAL)
|
||||
return new SphericalWarper(focal);
|
||||
CV_Error(CV_StsBadArg, "unsupported warping type");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void ProjectorBase::setCameraMatrix(const Mat &M)
|
||||
{
|
||||
CV_Assert(M.size() == Size(3, 3));
|
||||
CV_Assert(M.type() == CV_32F);
|
||||
m[0] = M.at<float>(0, 0); m[1] = M.at<float>(0, 1); m[2] = M.at<float>(0, 2);
|
||||
m[3] = M.at<float>(1, 0); m[4] = M.at<float>(1, 1); m[5] = M.at<float>(1, 2);
|
||||
m[6] = M.at<float>(2, 0); m[7] = M.at<float>(2, 1); m[8] = M.at<float>(2, 2);
|
||||
|
||||
Mat M_inv = M.inv();
|
||||
minv[0] = M_inv.at<float>(0, 0); minv[1] = M_inv.at<float>(0, 1); minv[2] = M_inv.at<float>(0, 2);
|
||||
minv[3] = M_inv.at<float>(1, 0); minv[4] = M_inv.at<float>(1, 1); minv[5] = M_inv.at<float>(1, 2);
|
||||
minv[6] = M_inv.at<float>(2, 0); minv[7] = M_inv.at<float>(2, 1); minv[8] = M_inv.at<float>(2, 2);
|
||||
}
|
||||
|
||||
|
||||
Point Warper::operator ()(const Mat &src, float focal, const Mat& M, Mat &dst,
|
||||
int interp_mode, int border_mode)
|
||||
{
|
||||
return warp(src, focal, M, dst, interp_mode, border_mode);
|
||||
}
|
||||
|
||||
|
||||
void PlaneWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
|
||||
{
|
||||
float tl_uf = numeric_limits<float>::max();
|
||||
float tl_vf = numeric_limits<float>::max();
|
||||
float br_uf = -numeric_limits<float>::max();
|
||||
float br_vf = -numeric_limits<float>::max();
|
||||
|
||||
float u, v;
|
||||
|
||||
projector_.mapForward(0, 0, u, v);
|
||||
tl_uf = min(tl_uf, u); tl_vf = min(tl_vf, v);
|
||||
br_uf = max(br_uf, u); br_vf = max(br_vf, v);
|
||||
|
||||
projector_.mapForward(0, static_cast<float>(src_size_.height - 1), u, v);
|
||||
tl_uf = min(tl_uf, u); tl_vf = min(tl_vf, v);
|
||||
br_uf = max(br_uf, u); br_vf = max(br_vf, v);
|
||||
|
||||
projector_.mapForward(static_cast<float>(src_size_.width - 1), 0, u, v);
|
||||
tl_uf = min(tl_uf, u); tl_vf = min(tl_vf, v);
|
||||
br_uf = max(br_uf, u); br_vf = max(br_vf, v);
|
||||
|
||||
projector_.mapForward(static_cast<float>(src_size_.width - 1), static_cast<float>(src_size_.height - 1), u, v);
|
||||
tl_uf = min(tl_uf, u); tl_vf = min(tl_vf, v);
|
||||
br_uf = max(br_uf, u); br_vf = max(br_vf, v);
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
||||
|
||||
|
||||
void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
|
||||
{
|
||||
detectResultRoiByBorder(dst_tl, dst_br);
|
||||
|
||||
float tl_uf = static_cast<float>(dst_tl.x);
|
||||
float tl_vf = static_cast<float>(dst_tl.y);
|
||||
float br_uf = static_cast<float>(dst_br.x);
|
||||
float br_vf = static_cast<float>(dst_br.y);
|
||||
|
||||
float x = projector_.minv[1];
|
||||
float y = projector_.minv[4];
|
||||
float z = projector_.minv[7];
|
||||
if (y > 0.f)
|
||||
{
|
||||
x = projector_.focal * x / z + src_size_.width * 0.5f;
|
||||
y = projector_.focal * y / z + src_size_.height * 0.5f;
|
||||
if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)
|
||||
{
|
||||
tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(CV_PI * projector_.scale));
|
||||
br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(CV_PI * projector_.scale));
|
||||
}
|
||||
}
|
||||
|
||||
x = projector_.minv[1];
|
||||
y = -projector_.minv[4];
|
||||
z = projector_.minv[7];
|
||||
if (y > 0.f)
|
||||
{
|
||||
x = projector_.focal * x / z + src_size_.width * 0.5f;
|
||||
y = projector_.focal * y / z + src_size_.height * 0.5f;
|
||||
if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)
|
||||
{
|
||||
tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(0));
|
||||
br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(0));
|
||||
}
|
||||
}
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
121
modules/stitching/warpers.hpp
Normal file
121
modules/stitching/warpers.hpp
Normal file
@ -0,0 +1,121 @@
|
||||
#ifndef _OPENCV_WARPERS_HPP_
|
||||
#define _OPENCV_WARPERS_HPP_
|
||||
|
||||
#include <opencv2/core/core.hpp>
|
||||
#include <opencv2/imgproc/imgproc.hpp>
|
||||
|
||||
class Warper
|
||||
{
|
||||
public:
|
||||
enum { PLANE, CYLINDRICAL, SPHERICAL };
|
||||
|
||||
static cv::Ptr<Warper> createByCameraFocal(int focal, int type);
|
||||
|
||||
virtual ~Warper() {}
|
||||
|
||||
cv::Point operator ()(const cv::Mat &src, float focal, const cv::Mat& M, cv::Mat &dst,
|
||||
int interp_mode = cv::INTER_LINEAR, int border_mode = cv::BORDER_REFLECT);
|
||||
|
||||
protected:
|
||||
virtual cv::Point warp(const cv::Mat &src, float focal, const cv::Mat& M, cv::Mat &dst,
|
||||
int interp_mode, int border_mode) = 0;
|
||||
};
|
||||
|
||||
|
||||
struct ProjectorBase
|
||||
{
|
||||
void setCameraMatrix(const cv::Mat& M);
|
||||
|
||||
cv::Size size;
|
||||
float focal;
|
||||
float m[9];
|
||||
float minv[9];
|
||||
float scale;
|
||||
};
|
||||
|
||||
|
||||
template <class P>
|
||||
class WarperBase : public Warper
|
||||
{
|
||||
protected:
|
||||
cv::Point warp(const cv::Mat &src, float focal, const cv::Mat &M, cv::Mat &dst,
|
||||
int interp_mode, int border_mode);
|
||||
|
||||
// Detects ROI of the destination image. It's correct for any projection.
|
||||
virtual void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
|
||||
|
||||
// Detects ROI of the destination image by walking over image border.
|
||||
// Correctness for any projection isn't guaranteed.
|
||||
void detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br);
|
||||
|
||||
cv::Size src_size_;
|
||||
P projector_;
|
||||
};
|
||||
|
||||
|
||||
struct PlaneProjector : ProjectorBase
|
||||
{
|
||||
void mapForward(float x, float y, float &u, float &v);
|
||||
void mapBackward(float u, float v, float &x, float &y);
|
||||
|
||||
float plane_dist;
|
||||
};
|
||||
|
||||
|
||||
// Projects image onto z = plane_dist plane
|
||||
class PlaneWarper : public WarperBase<PlaneProjector>
|
||||
{
|
||||
public:
|
||||
PlaneWarper(float plane_dist = 1.f, float scale = 1.f)
|
||||
{
|
||||
projector_.plane_dist = plane_dist;
|
||||
projector_.scale = scale;
|
||||
}
|
||||
|
||||
private:
|
||||
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
|
||||
};
|
||||
|
||||
|
||||
struct SphericalProjector : ProjectorBase
|
||||
{
|
||||
void mapForward(float x, float y, float &u, float &v);
|
||||
void mapBackward(float u, float v, float &x, float &y);
|
||||
};
|
||||
|
||||
|
||||
// Projects image onto unit sphere with origin at (0, 0, 0).
|
||||
// Poles are located at (0, -1, 0) and (0, 1, 0) points.
|
||||
class SphericalWarper : public WarperBase<SphericalProjector>
|
||||
{
|
||||
public:
|
||||
SphericalWarper(float scale = 300.f) { projector_.scale = scale; }
|
||||
|
||||
private:
|
||||
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
|
||||
};
|
||||
|
||||
|
||||
struct CylindricalProjector : ProjectorBase
|
||||
{
|
||||
void mapForward(float x, float y, float &u, float &v);
|
||||
void mapBackward(float u, float v, float &x, float &y);
|
||||
};
|
||||
|
||||
|
||||
// Projects image onto x * x + z * z = 1 cylinder
|
||||
class CylindricalWarper : public WarperBase<CylindricalProjector>
|
||||
{
|
||||
public:
|
||||
CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }
|
||||
|
||||
private:
|
||||
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)
|
||||
{
|
||||
WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);
|
||||
}
|
||||
};
|
||||
|
||||
#include "warpers_inl.hpp"
|
||||
|
||||
#endif // _OPENCV_WARPERS_HPP_
|
198
modules/stitching/warpers_inl.hpp
Normal file
198
modules/stitching/warpers_inl.hpp
Normal file
@ -0,0 +1,198 @@
|
||||
#ifndef _OPENCV_WARPERS_INL_HPP_
|
||||
#define _OPENCV_WARPERS_INL_HPP_
|
||||
|
||||
#include "warpers.hpp" // Make your IDE see declarations
|
||||
|
||||
template <class P>
|
||||
cv::Point WarperBase<P>::warp(const cv::Mat &src, float focal, const cv::Mat &M, cv::Mat &dst,
|
||||
int interp_mode, int border_mode)
|
||||
{
|
||||
src_size_ = src.size();
|
||||
|
||||
projector_.size = src.size();
|
||||
projector_.focal = focal;
|
||||
projector_.setCameraMatrix(M);
|
||||
|
||||
cv::Point dst_tl, dst_br;
|
||||
detectResultRoi(dst_tl, dst_br);
|
||||
|
||||
cv::Mat xmap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
|
||||
cv::Mat ymap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
|
||||
|
||||
float x, y;
|
||||
for (int v = dst_tl.y; v <= dst_br.y; ++v)
|
||||
{
|
||||
for (int u = dst_tl.x; u <= dst_br.x; ++u)
|
||||
{
|
||||
projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
|
||||
xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
|
||||
ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
|
||||
}
|
||||
}
|
||||
|
||||
dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());
|
||||
remap(src, dst, xmap, ymap, interp_mode, border_mode);
|
||||
|
||||
return dst_tl;
|
||||
}
|
||||
|
||||
|
||||
template <class P>
|
||||
void WarperBase<P>::detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)
|
||||
{
|
||||
float tl_uf = std::numeric_limits<float>::max();
|
||||
float tl_vf = std::numeric_limits<float>::max();
|
||||
float br_uf = -std::numeric_limits<float>::max();
|
||||
float br_vf = -std::numeric_limits<float>::max();
|
||||
|
||||
float u, v;
|
||||
for (int y = 0; y < src_size_.height; ++y)
|
||||
{
|
||||
for (int x = 0; x < src_size_.width; ++x)
|
||||
{
|
||||
projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
}
|
||||
}
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
||||
|
||||
|
||||
template <class P>
|
||||
void WarperBase<P>::detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br)
|
||||
{
|
||||
float tl_uf = std::numeric_limits<float>::max();
|
||||
float tl_vf = std::numeric_limits<float>::max();
|
||||
float br_uf = -std::numeric_limits<float>::max();
|
||||
float br_vf = -std::numeric_limits<float>::max();
|
||||
|
||||
float u, v;
|
||||
for (float x = 0; x < src_size_.width; ++x)
|
||||
{
|
||||
projector_.mapForward(static_cast<float>(x), 0, u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
|
||||
projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size_.height - 1), u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
}
|
||||
for (int y = 0; y < src_size_.height; ++y)
|
||||
{
|
||||
projector_.mapForward(0, static_cast<float>(y), u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
|
||||
projector_.mapForward(static_cast<float>(src_size_.width - 1), static_cast<float>(y), u, v);
|
||||
tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
|
||||
br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
|
||||
}
|
||||
|
||||
dst_tl.x = static_cast<int>(tl_uf);
|
||||
dst_tl.y = static_cast<int>(tl_vf);
|
||||
dst_br.x = static_cast<int>(br_uf);
|
||||
dst_br.y = static_cast<int>(br_vf);
|
||||
}
|
||||
|
||||
|
||||
inline
|
||||
void PlaneProjector::mapForward(float x, float y, float &u, float &v)
|
||||
{
|
||||
x -= size.width * 0.5f;
|
||||
y -= size.height * 0.5f;
|
||||
|
||||
float x_ = m[0] * x + m[1] * y + m[2] * focal;
|
||||
float y_ = m[3] * x + m[4] * y + m[5] * focal;
|
||||
float z_ = m[6] * x + m[7] * y + m[8] * focal;
|
||||
|
||||
u = scale * x_ / z_ * plane_dist;
|
||||
v = scale * y_ / z_ * plane_dist;
|
||||
}
|
||||
|
||||
|
||||
inline
|
||||
void PlaneProjector::mapBackward(float u, float v, float &x, float &y)
|
||||
{
|
||||
float x_ = u / scale;
|
||||
float y_ = v / scale;
|
||||
|
||||
float z;
|
||||
x = minv[0] * x_ + minv[1] * y_ + minv[2] * plane_dist;
|
||||
y = minv[3] * x_ + minv[4] * y_ + minv[5] * plane_dist;
|
||||
z = minv[6] * x_ + minv[7] * y_ + minv[8] * plane_dist;
|
||||
|
||||
x = focal * x / z + size.width * 0.5f;
|
||||
y = focal * y / z + size.height * 0.5f;
|
||||
}
|
||||
|
||||
|
||||
inline
|
||||
void SphericalProjector::mapForward(float x, float y, float &u, float &v)
|
||||
{
|
||||
x -= size.width * 0.5f;
|
||||
y -= size.height * 0.5f;
|
||||
|
||||
float x_ = m[0] * x + m[1] * y + m[2] * focal;
|
||||
float y_ = m[3] * x + m[4] * y + m[5] * focal;
|
||||
float z_ = m[6] * x + m[7] * y + m[8] * focal;
|
||||
|
||||
u = scale * atan2f(x_, z_);
|
||||
v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));
|
||||
}
|
||||
|
||||
|
||||
inline
|
||||
void SphericalProjector::mapBackward(float u, float v, float &x, float &y)
|
||||
{
|
||||
float sinv = sinf(static_cast<float>(CV_PI) - v / scale);
|
||||
float x_ = sinv * sinf(u / scale);
|
||||
float y_ = cosf(static_cast<float>(CV_PI) - v / scale);
|
||||
float z_ = sinv * cosf(u / scale);
|
||||
|
||||
float z;
|
||||
x = minv[0] * x_ + minv[1] * y_ + minv[2] * z_;
|
||||
y = minv[3] * x_ + minv[4] * y_ + minv[5] * z_;
|
||||
z = minv[6] * x_ + minv[7] * y_ + minv[8] * z_;
|
||||
|
||||
x = focal * x / z + size.width * 0.5f;
|
||||
y = focal * y / z + size.height * 0.5f;
|
||||
}
|
||||
|
||||
|
||||
inline
|
||||
void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
|
||||
{
|
||||
x -= size.width * 0.5f;
|
||||
y -= size.height * 0.5f;
|
||||
|
||||
float x_ = m[0] * x + m[1] * y + m[2] * focal;
|
||||
float y_ = m[3] * x + m[4] * y + m[5] * focal;
|
||||
float z_ = m[6] * x + m[7] * y + m[8] * focal;
|
||||
|
||||
u = scale * atan2f(x_, z_);
|
||||
v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
|
||||
}
|
||||
|
||||
|
||||
inline
|
||||
void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)
|
||||
{
|
||||
float x_ = sinf(u / scale);
|
||||
float y_ = v / scale;
|
||||
float z_ = cosf(u / scale);
|
||||
|
||||
float z;
|
||||
x = minv[0] * x_ + minv[1] * y_ + minv[2] * z_;
|
||||
y = minv[3] * x_ + minv[4] * y_ + minv[5] * z_;
|
||||
z = minv[6] * x_ + minv[7] * y_ + minv[8] * z_;
|
||||
|
||||
x = focal * x / z + size.width * 0.5f;
|
||||
y = focal * y / z + size.height * 0.5f;
|
||||
}
|
||||
|
||||
#endif // _OPENCV_WARPERS_INL_HPP_
|
Loading…
x
Reference in New Issue
Block a user