Refactored videostab module
This commit is contained in:
parent
79e2070606
commit
02d34bdac2
@ -57,8 +57,6 @@
|
|||||||
#include "opencv2/gpu/gpu.hpp"
|
#include "opencv2/gpu/gpu.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// TODO remove code duplications (in cpp too)
|
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
{
|
{
|
||||||
namespace videostab
|
namespace videostab
|
||||||
@ -73,10 +71,65 @@ CV_EXPORTS Mat estimateGlobalMotionRobust(
|
|||||||
const RansacParams ¶ms = RansacParams::default2dMotion(MM_AFFINE),
|
const RansacParams ¶ms = RansacParams::default2dMotion(MM_AFFINE),
|
||||||
float *rmse = 0, int *ninliers = 0);
|
float *rmse = 0, int *ninliers = 0);
|
||||||
|
|
||||||
class CV_EXPORTS GlobalMotionEstimatorBase
|
class CV_EXPORTS MotionEstimatorBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
virtual ~GlobalMotionEstimatorBase() {}
|
virtual ~MotionEstimatorBase() {}
|
||||||
|
|
||||||
|
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
|
||||||
|
virtual MotionModel motionModel() const { return motionModel_; }
|
||||||
|
|
||||||
|
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
MotionEstimatorBase(MotionModel model) { setMotionModel(model); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
MotionModel motionModel_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
MotionEstimatorRansacL2(MotionModel model = MM_AFFINE);
|
||||||
|
|
||||||
|
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
|
||||||
|
RansacParams ransacParams() const { return ransacParams_; }
|
||||||
|
|
||||||
|
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
|
||||||
|
float minInlierRatio() const { return minInlierRatio_; }
|
||||||
|
|
||||||
|
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0);
|
||||||
|
|
||||||
|
private:
|
||||||
|
RansacParams ransacParams_;
|
||||||
|
float minInlierRatio_;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
MotionEstimatorL1(MotionModel model = MM_AFFINE);
|
||||||
|
|
||||||
|
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<double> obj_, collb_, colub_;
|
||||||
|
std::vector<double> elems_, rowlb_, rowub_;
|
||||||
|
std::vector<int> rows_, cols_;
|
||||||
|
|
||||||
|
void set(int row, int col, double coef)
|
||||||
|
{
|
||||||
|
rows_.push_back(row);
|
||||||
|
cols_.push_back(col);
|
||||||
|
elems_.push_back(coef);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class CV_EXPORTS ImageMotionEstimatorBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
virtual ~ImageMotionEstimatorBase() {}
|
||||||
|
|
||||||
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
|
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
|
||||||
virtual MotionModel motionModel() const { return motionModel_; }
|
virtual MotionModel motionModel() const { return motionModel_; }
|
||||||
@ -84,12 +137,13 @@ public:
|
|||||||
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;
|
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
GlobalMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
|
ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
|
||||||
|
|
||||||
|
private:
|
||||||
MotionModel motionModel_;
|
MotionModel motionModel_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CV_EXPORTS FromFileMotionReader : public GlobalMotionEstimatorBase
|
class CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
FromFileMotionReader(const std::string &path);
|
FromFileMotionReader(const std::string &path);
|
||||||
@ -100,50 +154,45 @@ private:
|
|||||||
std::ifstream file_;
|
std::ifstream file_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CV_EXPORTS ToFileMotionWriter : public GlobalMotionEstimatorBase
|
class CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ToFileMotionWriter(const std::string &path, Ptr<GlobalMotionEstimatorBase> estimator);
|
ToFileMotionWriter(const std::string &path, Ptr<ImageMotionEstimatorBase> estimator);
|
||||||
|
|
||||||
|
virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
|
||||||
|
virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
|
||||||
|
|
||||||
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
|
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::ofstream file_;
|
std::ofstream file_;
|
||||||
Ptr<GlobalMotionEstimatorBase> estimator_;
|
Ptr<ImageMotionEstimatorBase> motionEstimator_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CV_EXPORTS RansacMotionEstimator : public GlobalMotionEstimatorBase
|
class CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
RansacMotionEstimator(MotionModel model = MM_AFFINE);
|
KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator);
|
||||||
|
|
||||||
|
virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
|
||||||
|
virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
|
||||||
|
|
||||||
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
|
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
|
||||||
Ptr<FeatureDetector> detector() const { return detector_; }
|
Ptr<FeatureDetector> detector() const { return detector_; }
|
||||||
|
|
||||||
void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
|
void setOpticalFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
|
||||||
Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
|
Ptr<ISparseOptFlowEstimator> opticalFlowEstimator() const { return optFlowEstimator_; }
|
||||||
|
|
||||||
void setGridSize(Size val) { gridSize_ = val; }
|
|
||||||
Size gridSize() const { return gridSize_; }
|
|
||||||
|
|
||||||
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
|
|
||||||
RansacParams ransacParams() const { return ransacParams_; }
|
|
||||||
|
|
||||||
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
|
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
|
||||||
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
|
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
|
||||||
|
|
||||||
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
|
|
||||||
float minInlierRatio() const { return minInlierRatio_; }
|
|
||||||
|
|
||||||
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
|
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
Ptr<MotionEstimatorBase> motionEstimator_;
|
||||||
Ptr<FeatureDetector> detector_;
|
Ptr<FeatureDetector> detector_;
|
||||||
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
|
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
|
||||||
Size gridSize_;
|
|
||||||
RansacParams ransacParams_;
|
|
||||||
Ptr<IOutlierRejector> outlierRejector_;
|
Ptr<IOutlierRejector> outlierRejector_;
|
||||||
float minInlierRatio_;
|
|
||||||
|
|
||||||
std::vector<uchar> status_;
|
std::vector<uchar> status_;
|
||||||
std::vector<KeyPoint> keypointsPrev_;
|
std::vector<KeyPoint> keypointsPrev_;
|
||||||
@ -152,29 +201,25 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
#if HAVE_OPENCV_GPU
|
#if HAVE_OPENCV_GPU
|
||||||
class CV_EXPORTS RansacMotionEstimatorGpu : public GlobalMotionEstimatorBase
|
class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
RansacMotionEstimatorGpu(MotionModel model = MM_AFFINE);
|
KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator);
|
||||||
|
|
||||||
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
|
virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
|
||||||
RansacParams ransacParams() const { return ransacParams_; }
|
virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
|
||||||
|
|
||||||
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
|
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
|
||||||
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
|
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
|
||||||
|
|
||||||
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
|
|
||||||
float minInlierRatio() const { return minInlierRatio_; }
|
|
||||||
|
|
||||||
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
|
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
|
||||||
Mat estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok = 0);
|
Mat estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok = 0);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
Ptr<MotionEstimatorBase> motionEstimator_;
|
||||||
gpu::GoodFeaturesToTrackDetector_GPU detector_;
|
gpu::GoodFeaturesToTrackDetector_GPU detector_;
|
||||||
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
|
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
|
||||||
RansacParams ransacParams_;
|
|
||||||
Ptr<IOutlierRejector> outlierRejector_;
|
Ptr<IOutlierRejector> outlierRejector_;
|
||||||
float minInlierRatio_;
|
|
||||||
|
|
||||||
gpu::GpuMat frame0_, grayFrame0_, frame1_;
|
gpu::GpuMat frame0_, grayFrame0_, frame1_;
|
||||||
gpu::GpuMat pointsPrev_, points_;
|
gpu::GpuMat pointsPrev_, points_;
|
||||||
@ -186,44 +231,6 @@ private:
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
class CV_EXPORTS LpBasedMotionEstimator : public GlobalMotionEstimatorBase
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
LpBasedMotionEstimator(MotionModel model = MM_AFFINE);
|
|
||||||
|
|
||||||
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
|
|
||||||
Ptr<FeatureDetector> detector() const { return detector_; }
|
|
||||||
|
|
||||||
void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
|
|
||||||
Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
|
|
||||||
|
|
||||||
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
|
|
||||||
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
|
|
||||||
|
|
||||||
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok);
|
|
||||||
|
|
||||||
private:
|
|
||||||
Ptr<FeatureDetector> detector_;
|
|
||||||
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
|
|
||||||
Ptr<IOutlierRejector> outlierRejector_;
|
|
||||||
|
|
||||||
std::vector<uchar> status_;
|
|
||||||
std::vector<KeyPoint> keypointsPrev_;
|
|
||||||
std::vector<Point2f> pointsPrev_, points_;
|
|
||||||
std::vector<Point2f> pointsPrevGood_, pointsGood_;
|
|
||||||
|
|
||||||
std::vector<double> obj_, collb_, colub_;
|
|
||||||
std::vector<int> rows_, cols_;
|
|
||||||
std::vector<double> elems_, rowlb_, rowub_;
|
|
||||||
|
|
||||||
void set(int row, int col, double coef)
|
|
||||||
{
|
|
||||||
rows_.push_back(row);
|
|
||||||
cols_.push_back(col);
|
|
||||||
elems_.push_back(coef);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
|
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
|
||||||
|
|
||||||
} // namespace videostab
|
} // namespace videostab
|
||||||
|
@ -74,8 +74,8 @@ public:
|
|||||||
void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }
|
void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }
|
||||||
Ptr<IFrameSource> frameSource() const { return frameSource_; }
|
Ptr<IFrameSource> frameSource() const { return frameSource_; }
|
||||||
|
|
||||||
void setMotionEstimator(Ptr<GlobalMotionEstimatorBase> val) { motionEstimator_ = val; }
|
void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
|
||||||
Ptr<GlobalMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
|
Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
|
||||||
|
|
||||||
void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }
|
void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }
|
||||||
Ptr<DeblurerBase> deblurrer() const { return deblurer_; }
|
Ptr<DeblurerBase> deblurrer() const { return deblurer_; }
|
||||||
@ -107,7 +107,7 @@ protected:
|
|||||||
|
|
||||||
Ptr<ILog> log_;
|
Ptr<ILog> log_;
|
||||||
Ptr<IFrameSource> frameSource_;
|
Ptr<IFrameSource> frameSource_;
|
||||||
Ptr<GlobalMotionEstimatorBase> motionEstimator_;
|
Ptr<ImageMotionEstimatorBase> motionEstimator_;
|
||||||
Ptr<DeblurerBase> deblurer_;
|
Ptr<DeblurerBase> deblurer_;
|
||||||
Ptr<InpainterBase> inpainter_;
|
Ptr<InpainterBase> inpainter_;
|
||||||
int radius_;
|
int radius_;
|
||||||
|
@ -64,8 +64,8 @@ public:
|
|||||||
|
|
||||||
virtual ~WobbleSuppressorBase() {}
|
virtual ~WobbleSuppressorBase() {}
|
||||||
|
|
||||||
void setMotionEstimator(Ptr<GlobalMotionEstimatorBase> val) { motionEstimator_ = val; }
|
void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
|
||||||
Ptr<GlobalMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
|
Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
|
||||||
|
|
||||||
virtual void suppress(int idx, const Mat &frame, Mat &result) = 0;
|
virtual void suppress(int idx, const Mat &frame, Mat &result) = 0;
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ public:
|
|||||||
virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
|
virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Ptr<GlobalMotionEstimatorBase> motionEstimator_;
|
Ptr<ImageMotionEstimatorBase> motionEstimator_;
|
||||||
int frameCount_;
|
int frameCount_;
|
||||||
const std::vector<Mat> *motions_;
|
const std::vector<Mat> *motions_;
|
||||||
const std::vector<Mat> *motions2_;
|
const std::vector<Mat> *motions2_;
|
||||||
|
@ -404,237 +404,32 @@ Mat estimateGlobalMotionRobust(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
FromFileMotionReader::FromFileMotionReader(const string &path)
|
MotionEstimatorRansacL2::MotionEstimatorRansacL2(MotionModel model)
|
||||||
: GlobalMotionEstimatorBase(MM_UNKNOWN)
|
: MotionEstimatorBase(model)
|
||||||
{
|
{
|
||||||
file_.open(path.c_str());
|
|
||||||
CV_Assert(file_.is_open());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, bool *ok)
|
|
||||||
{
|
|
||||||
Mat_<float> M(3, 3);
|
|
||||||
bool ok_;
|
|
||||||
file_ >> M(0,0) >> M(0,1) >> M(0,2)
|
|
||||||
>> M(1,0) >> M(1,1) >> M(1,2)
|
|
||||||
>> M(2,0) >> M(2,1) >> M(2,2) >> ok_;
|
|
||||||
if (ok) *ok = ok_;
|
|
||||||
return M;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ToFileMotionWriter::ToFileMotionWriter(const string &path, Ptr<GlobalMotionEstimatorBase> estimator)
|
|
||||||
: GlobalMotionEstimatorBase(estimator->motionModel())
|
|
||||||
{
|
|
||||||
file_.open(path.c_str());
|
|
||||||
CV_Assert(file_.is_open());
|
|
||||||
estimator_ = estimator;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
|
||||||
{
|
|
||||||
bool ok_;
|
|
||||||
Mat_<float> M = estimator_->estimate(frame0, frame1, &ok_);
|
|
||||||
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
|
|
||||||
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
|
|
||||||
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << endl;
|
|
||||||
if (ok) *ok = ok_;
|
|
||||||
return M;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
RansacMotionEstimator::RansacMotionEstimator(MotionModel model)
|
|
||||||
: GlobalMotionEstimatorBase(model)
|
|
||||||
{
|
|
||||||
setDetector(new GoodFeaturesToTrackDetector());
|
|
||||||
setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
|
|
||||||
setGridSize(Size(0,0));
|
|
||||||
setRansacParams(RansacParams::default2dMotion(model));
|
setRansacParams(RansacParams::default2dMotion(model));
|
||||||
setOutlierRejector(new NullOutlierRejector());
|
|
||||||
setMinInlierRatio(0.1f);
|
setMinInlierRatio(0.1f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bool *ok)
|
||||||
{
|
{
|
||||||
// find keypoints
|
CV_Assert(points0.type() == points1.type());
|
||||||
|
const int npoints = points0.getMat().checkVector(2);
|
||||||
detector_->detect(frame0, keypointsPrev_);
|
CV_Assert(points1.getMat().checkVector(2) == npoints);
|
||||||
|
|
||||||
// add extra keypoints
|
|
||||||
|
|
||||||
if (gridSize_.width > 0 && gridSize_.height > 0)
|
|
||||||
{
|
|
||||||
float dx = static_cast<float>(frame0.cols) / (gridSize_.width + 1);
|
|
||||||
float dy = static_cast<float>(frame0.rows) / (gridSize_.height + 1);
|
|
||||||
for (int x = 0; x < gridSize_.width; ++x)
|
|
||||||
for (int y = 0; y < gridSize_.height; ++y)
|
|
||||||
keypointsPrev_.push_back(KeyPoint((x+1)*dx, (y+1)*dy, 0.f));
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract points from keypoints
|
|
||||||
|
|
||||||
pointsPrev_.resize(keypointsPrev_.size());
|
|
||||||
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
|
|
||||||
pointsPrev_[i] = keypointsPrev_[i].pt;
|
|
||||||
|
|
||||||
// find correspondences
|
|
||||||
|
|
||||||
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
|
|
||||||
|
|
||||||
// leave good correspondences only
|
|
||||||
|
|
||||||
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
|
|
||||||
pointsGood_.clear(); pointsGood_.reserve(points_.size());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < points_.size(); ++i)
|
|
||||||
{
|
|
||||||
if (status_[i])
|
|
||||||
{
|
|
||||||
pointsPrevGood_.push_back(pointsPrev_[i]);
|
|
||||||
pointsGood_.push_back(points_[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// perfrom outlier rejection
|
|
||||||
|
|
||||||
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
|
|
||||||
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
|
|
||||||
{
|
|
||||||
pointsPrev_.swap(pointsPrevGood_);
|
|
||||||
points_.swap(pointsGood_);
|
|
||||||
|
|
||||||
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
|
|
||||||
|
|
||||||
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
|
|
||||||
pointsGood_.clear(); pointsGood_.reserve(points_.size());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < points_.size(); ++i)
|
|
||||||
{
|
|
||||||
if (status_[i])
|
|
||||||
{
|
|
||||||
pointsPrevGood_.push_back(pointsPrev_[i]);
|
|
||||||
pointsGood_.push_back(points_[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t npoints = pointsGood_.size();
|
|
||||||
|
|
||||||
// find motion
|
// find motion
|
||||||
|
|
||||||
int ninliers = 0;
|
int ninliers = 0;
|
||||||
Mat_<float> M;
|
Mat_<float> M;
|
||||||
|
|
||||||
if (motionModel_ != MM_HOMOGRAPHY)
|
if (motionModel() != MM_HOMOGRAPHY)
|
||||||
M = estimateGlobalMotionRobust(
|
M = estimateGlobalMotionRobust(
|
||||||
pointsPrevGood_, pointsGood_, motionModel_, ransacParams_, 0, &ninliers);
|
points0, points1, motionModel(), ransacParams_, 0, &ninliers);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
vector<uchar> mask;
|
vector<uchar> mask;
|
||||||
M = findHomography(pointsPrevGood_, pointsGood_, mask, CV_RANSAC, ransacParams_.thresh);
|
M = findHomography(points0, points1, mask, CV_RANSAC, ransacParams_.thresh);
|
||||||
for (size_t i = 0; i < npoints; ++i)
|
|
||||||
if (mask[i]) ninliers++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if we're confident enough in estimated motion
|
|
||||||
|
|
||||||
if (ok) *ok = true;
|
|
||||||
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
|
|
||||||
{
|
|
||||||
M = Mat::eye(3, 3, CV_32F);
|
|
||||||
if (ok) *ok = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return M;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#if HAVE_OPENCV_GPU
|
|
||||||
RansacMotionEstimatorGpu::RansacMotionEstimatorGpu(MotionModel model)
|
|
||||||
: GlobalMotionEstimatorBase(model)
|
|
||||||
{
|
|
||||||
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
|
|
||||||
setRansacParams(RansacParams::default2dMotion(model));
|
|
||||||
setOutlierRejector(new NullOutlierRejector());
|
|
||||||
setMinInlierRatio(0.1f);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Mat RansacMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
|
||||||
{
|
|
||||||
frame0_.upload(frame0);
|
|
||||||
frame1_.upload(frame1);
|
|
||||||
return estimate(frame0_, frame1_, ok);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok)
|
|
||||||
{
|
|
||||||
// convert frame to gray if it's color
|
|
||||||
|
|
||||||
gpu::GpuMat grayFrame0;
|
|
||||||
if (frame0.channels() == 1)
|
|
||||||
grayFrame0 = frame0;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
gpu::cvtColor(frame0_, grayFrame0_, CV_BGR2GRAY);
|
|
||||||
grayFrame0 = grayFrame0_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// find keypoints
|
|
||||||
|
|
||||||
detector_(grayFrame0, pointsPrev_);
|
|
||||||
|
|
||||||
// find correspondences
|
|
||||||
|
|
||||||
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
|
|
||||||
|
|
||||||
// leave good correspondences only
|
|
||||||
|
|
||||||
gpu::compactPoints(pointsPrev_, points_, status_);
|
|
||||||
|
|
||||||
pointsPrev_.download(hostPointsPrev_);
|
|
||||||
points_.download(hostPoints_);
|
|
||||||
|
|
||||||
// perfrom outlier rejection
|
|
||||||
|
|
||||||
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
|
|
||||||
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
|
|
||||||
{
|
|
||||||
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
|
|
||||||
|
|
||||||
hostPointsPrevTmp_.clear(); hostPointsPrevTmp_.reserve(hostPoints_.cols);
|
|
||||||
hostPointsTmp_.clear(); hostPointsTmp_.reserve(hostPoints_.cols);
|
|
||||||
|
|
||||||
for (int i = 0; i < hostPoints_.cols; ++i)
|
|
||||||
{
|
|
||||||
if (rejectionStatus_[i])
|
|
||||||
{
|
|
||||||
hostPointsPrevTmp_.push_back(hostPointsPrev_.at<Point2f>(0,i));
|
|
||||||
hostPointsTmp_.push_back(hostPoints_.at<Point2f>(0,i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hostPointsPrev_ = Mat(1, hostPointsPrevTmp_.size(), CV_32FC2, &hostPointsPrevTmp_[0]);
|
|
||||||
hostPoints_ = Mat(1, hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// find motion
|
|
||||||
|
|
||||||
int npoints = hostPoints_.cols;
|
|
||||||
int ninliers = 0;
|
|
||||||
Mat_<float> M;
|
|
||||||
|
|
||||||
if (motionModel_ != MM_HOMOGRAPHY)
|
|
||||||
M = estimateGlobalMotionRobust(
|
|
||||||
hostPointsPrev_, hostPoints_, motionModel_, ransacParams_, 0, &ninliers);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
vector<uchar> mask;
|
|
||||||
M = findHomography(hostPointsPrev_, hostPoints_, mask, CV_RANSAC, ransacParams_.thresh);
|
|
||||||
for (int i = 0; i < npoints; ++i)
|
for (int i = 0; i < npoints; ++i)
|
||||||
if (mask[i]) ninliers++;
|
if (mask[i]) ninliers++;
|
||||||
}
|
}
|
||||||
@ -650,73 +445,23 @@ Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::Gpu
|
|||||||
|
|
||||||
return M;
|
return M;
|
||||||
}
|
}
|
||||||
#endif // #if HAVE_OPENCV_GPU
|
|
||||||
|
|
||||||
|
|
||||||
LpBasedMotionEstimator::LpBasedMotionEstimator(MotionModel model)
|
MotionEstimatorL1::MotionEstimatorL1(MotionModel model)
|
||||||
: GlobalMotionEstimatorBase(model)
|
: MotionEstimatorBase(model)
|
||||||
{
|
{
|
||||||
setDetector(new GoodFeaturesToTrackDetector());
|
|
||||||
setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
|
|
||||||
setOutlierRejector(new NullOutlierRejector());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// TODO will estimation of all motions as one LP problem be faster?
|
// TODO will estimation of all motions as one LP problem be faster?
|
||||||
|
Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok)
|
||||||
Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
|
||||||
{
|
{
|
||||||
// find keypoints
|
CV_Assert(points0.type() == points1.type());
|
||||||
|
const int npoints = points0.getMat().checkVector(2);
|
||||||
|
CV_Assert(points1.getMat().checkVector(2) == npoints);
|
||||||
|
|
||||||
detector_->detect(frame0, keypointsPrev_);
|
const Point2f *points0_ = points0.getMat().ptr<Point2f>();
|
||||||
|
const Point2f *points1_ = points1.getMat().ptr<Point2f>();
|
||||||
// extract points from keypoints
|
|
||||||
|
|
||||||
pointsPrev_.resize(keypointsPrev_.size());
|
|
||||||
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
|
|
||||||
pointsPrev_[i] = keypointsPrev_[i].pt;
|
|
||||||
|
|
||||||
// find correspondences
|
|
||||||
|
|
||||||
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
|
|
||||||
|
|
||||||
// leave good correspondences only
|
|
||||||
|
|
||||||
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
|
|
||||||
pointsGood_.clear(); pointsGood_.reserve(points_.size());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < points_.size(); ++i)
|
|
||||||
{
|
|
||||||
if (status_[i])
|
|
||||||
{
|
|
||||||
pointsPrevGood_.push_back(pointsPrev_[i]);
|
|
||||||
pointsGood_.push_back(points_[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// perfrom outlier rejection
|
|
||||||
|
|
||||||
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
|
|
||||||
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
|
|
||||||
{
|
|
||||||
pointsPrev_.swap(pointsPrevGood_);
|
|
||||||
points_.swap(pointsGood_);
|
|
||||||
|
|
||||||
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
|
|
||||||
|
|
||||||
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
|
|
||||||
pointsGood_.clear(); pointsGood_.reserve(points_.size());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < points_.size(); ++i)
|
|
||||||
{
|
|
||||||
if (status_[i])
|
|
||||||
{
|
|
||||||
pointsPrevGood_.push_back(pointsPrev_[i]);
|
|
||||||
pointsGood_.push_back(points_[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare LP problem
|
|
||||||
|
|
||||||
#ifndef HAVE_CLP
|
#ifndef HAVE_CLP
|
||||||
|
|
||||||
@ -726,23 +471,23 @@ Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
CV_Assert(motionModel_ <= MM_AFFINE && motionModel_ != MM_RIGID);
|
CV_Assert(motionModel() <= MM_AFFINE && motionModel() != MM_RIGID);
|
||||||
|
|
||||||
|
// prepare LP problem
|
||||||
|
|
||||||
int npoints = static_cast<int>(pointsGood_.size());
|
|
||||||
int ncols = 6 + 2*npoints;
|
int ncols = 6 + 2*npoints;
|
||||||
int nrows = 4*npoints;
|
int nrows = 4*npoints;
|
||||||
|
|
||||||
if (motionModel_ == MM_SIMILARITY)
|
if (motionModel() == MM_SIMILARITY)
|
||||||
nrows += 2;
|
nrows += 2;
|
||||||
else if (motionModel_ == MM_TRANSLATION_AND_SCALE)
|
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
|
||||||
nrows += 3;
|
nrows += 3;
|
||||||
else if (motionModel_ == MM_TRANSLATION)
|
else if (motionModel() == MM_TRANSLATION)
|
||||||
nrows += 4;
|
nrows += 4;
|
||||||
|
|
||||||
rows_.clear();
|
rows_.clear();
|
||||||
cols_.clear();
|
cols_.clear();
|
||||||
elems_.clear();
|
elems_.clear();
|
||||||
|
|
||||||
obj_.assign(ncols, 0);
|
obj_.assign(ncols, 0);
|
||||||
collb_.assign(ncols, -INF);
|
collb_.assign(ncols, -INF);
|
||||||
colub_.assign(ncols, INF);
|
colub_.assign(ncols, INF);
|
||||||
@ -767,8 +512,8 @@ Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool
|
|||||||
|
|
||||||
for (int i = 0; i < npoints; ++i, r += 4)
|
for (int i = 0; i < npoints; ++i, r += 4)
|
||||||
{
|
{
|
||||||
p0 = pointsPrevGood_[i];
|
p0 = points0_[i];
|
||||||
p1 = pointsGood_[i];
|
p1 = points1_[i];
|
||||||
|
|
||||||
set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
|
set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
|
||||||
rowub_[r] = p1.x;
|
rowub_[r] = p1.x;
|
||||||
@ -783,18 +528,18 @@ Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool
|
|||||||
rowlb_[r+3] = p1.y;
|
rowlb_[r+3] = p1.y;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (motionModel_ == MM_SIMILARITY)
|
if (motionModel() == MM_SIMILARITY)
|
||||||
{
|
{
|
||||||
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
|
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
|
||||||
set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
|
set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
|
||||||
}
|
}
|
||||||
else if (motionModel_ == MM_TRANSLATION_AND_SCALE)
|
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
|
||||||
{
|
{
|
||||||
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
|
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
|
||||||
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
|
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
|
||||||
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
|
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
|
||||||
}
|
}
|
||||||
else if (motionModel_ == MM_TRANSLATION)
|
else if (motionModel() == MM_TRANSLATION)
|
||||||
{
|
{
|
||||||
set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
|
set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
|
||||||
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
|
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
|
||||||
@ -834,6 +579,187 @@ Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FromFileMotionReader::FromFileMotionReader(const string &path)
|
||||||
|
: ImageMotionEstimatorBase(MM_UNKNOWN)
|
||||||
|
{
|
||||||
|
file_.open(path.c_str());
|
||||||
|
CV_Assert(file_.is_open());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, bool *ok)
|
||||||
|
{
|
||||||
|
Mat_<float> M(3, 3);
|
||||||
|
bool ok_;
|
||||||
|
file_ >> M(0,0) >> M(0,1) >> M(0,2)
|
||||||
|
>> M(1,0) >> M(1,1) >> M(1,2)
|
||||||
|
>> M(2,0) >> M(2,1) >> M(2,2) >> ok_;
|
||||||
|
if (ok) *ok = ok_;
|
||||||
|
return M;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ToFileMotionWriter::ToFileMotionWriter(const string &path, Ptr<ImageMotionEstimatorBase> estimator)
|
||||||
|
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
|
||||||
|
{
|
||||||
|
file_.open(path.c_str());
|
||||||
|
CV_Assert(file_.is_open());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
||||||
|
{
|
||||||
|
bool ok_;
|
||||||
|
Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
|
||||||
|
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
|
||||||
|
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
|
||||||
|
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << endl;
|
||||||
|
if (ok) *ok = ok_;
|
||||||
|
return M;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
|
||||||
|
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
|
||||||
|
{
|
||||||
|
setDetector(new GoodFeaturesToTrackDetector());
|
||||||
|
setOpticalFlowEstimator(new SparsePyrLkOptFlowEstimator());
|
||||||
|
setOutlierRejector(new NullOutlierRejector());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
||||||
|
{
|
||||||
|
// find keypoints
|
||||||
|
detector_->detect(frame0, keypointsPrev_);
|
||||||
|
|
||||||
|
// extract points from keypoints
|
||||||
|
pointsPrev_.resize(keypointsPrev_.size());
|
||||||
|
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
|
||||||
|
pointsPrev_[i] = keypointsPrev_[i].pt;
|
||||||
|
|
||||||
|
// find correspondences
|
||||||
|
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
|
||||||
|
|
||||||
|
// leave good correspondences only
|
||||||
|
|
||||||
|
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
|
||||||
|
pointsGood_.clear(); pointsGood_.reserve(points_.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < points_.size(); ++i)
|
||||||
|
{
|
||||||
|
if (status_[i])
|
||||||
|
{
|
||||||
|
pointsPrevGood_.push_back(pointsPrev_[i]);
|
||||||
|
pointsGood_.push_back(points_[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// perform outlier rejection
|
||||||
|
|
||||||
|
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
|
||||||
|
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
|
||||||
|
{
|
||||||
|
pointsPrev_.swap(pointsPrevGood_);
|
||||||
|
points_.swap(pointsGood_);
|
||||||
|
|
||||||
|
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
|
||||||
|
|
||||||
|
pointsPrevGood_.clear();
|
||||||
|
pointsPrevGood_.reserve(points_.size());
|
||||||
|
|
||||||
|
pointsGood_.clear();
|
||||||
|
pointsGood_.reserve(points_.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < points_.size(); ++i)
|
||||||
|
{
|
||||||
|
if (status_[i])
|
||||||
|
{
|
||||||
|
pointsPrevGood_.push_back(pointsPrev_[i]);
|
||||||
|
pointsGood_.push_back(points_[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// estimate motion
|
||||||
|
return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if HAVE_OPENCV_GPU
|
||||||
|
KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator)
|
||||||
|
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
|
||||||
|
{
|
||||||
|
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
|
||||||
|
setOutlierRejector(new NullOutlierRejector());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Mat KeypointBasedMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
|
||||||
|
{
|
||||||
|
frame0_.upload(frame0);
|
||||||
|
frame1_.upload(frame1);
|
||||||
|
return estimate(frame0_, frame1_, ok);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Mat KeypointBasedMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok)
|
||||||
|
{
|
||||||
|
// convert frame to gray if it's color
|
||||||
|
|
||||||
|
gpu::GpuMat grayFrame0;
|
||||||
|
if (frame0.channels() == 1)
|
||||||
|
grayFrame0 = frame0;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
gpu::cvtColor(frame0_, grayFrame0_, CV_BGR2GRAY);
|
||||||
|
grayFrame0 = grayFrame0_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// find keypoints
|
||||||
|
detector_(grayFrame0, pointsPrev_);
|
||||||
|
|
||||||
|
// find correspondences
|
||||||
|
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
|
||||||
|
|
||||||
|
// leave good correspondences only
|
||||||
|
gpu::compactPoints(pointsPrev_, points_, status_);
|
||||||
|
|
||||||
|
pointsPrev_.download(hostPointsPrev_);
|
||||||
|
points_.download(hostPoints_);
|
||||||
|
|
||||||
|
// perform outlier rejection
|
||||||
|
|
||||||
|
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
|
||||||
|
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
|
||||||
|
{
|
||||||
|
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
|
||||||
|
|
||||||
|
hostPointsPrevTmp_.clear();
|
||||||
|
hostPointsPrevTmp_.reserve(hostPoints_.cols);
|
||||||
|
|
||||||
|
hostPointsTmp_.clear();
|
||||||
|
hostPointsTmp_.reserve(hostPoints_.cols);
|
||||||
|
|
||||||
|
for (int i = 0; i < hostPoints_.cols; ++i)
|
||||||
|
{
|
||||||
|
if (rejectionStatus_[i])
|
||||||
|
{
|
||||||
|
hostPointsPrevTmp_.push_back(hostPointsPrev_.at<Point2f>(0,i));
|
||||||
|
hostPointsTmp_.push_back(hostPoints_.at<Point2f>(0,i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostPointsPrev_ = Mat(1, hostPointsPrevTmp_.size(), CV_32FC2, &hostPointsPrevTmp_[0]);
|
||||||
|
hostPoints_ = Mat(1, hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// estimate motion
|
||||||
|
return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok);
|
||||||
|
}
|
||||||
|
#endif // HAVE_OPENCV_GPU
|
||||||
|
|
||||||
|
|
||||||
Mat getMotion(int from, int to, const vector<Mat> &motions)
|
Mat getMotion(int from, int to, const vector<Mat> &motions)
|
||||||
{
|
{
|
||||||
Mat M = Mat::eye(3, 3, CV_32F);
|
Mat M = Mat::eye(3, 3, CV_32F);
|
||||||
|
@ -58,7 +58,7 @@ StabilizerBase::StabilizerBase()
|
|||||||
{
|
{
|
||||||
setLog(new LogToStdout());
|
setLog(new LogToStdout());
|
||||||
setFrameSource(new NullFrameSource());
|
setFrameSource(new NullFrameSource());
|
||||||
setMotionEstimator(new RansacMotionEstimator());
|
setMotionEstimator(new KeypointBasedMotionEstimator(new MotionEstimatorRansacL2()));
|
||||||
setDeblurer(new NullDeblurer());
|
setDeblurer(new NullDeblurer());
|
||||||
setInpainter(new NullInpainter());
|
setInpainter(new NullInpainter());
|
||||||
setRadius(15);
|
setRadius(15);
|
||||||
|
@ -53,9 +53,7 @@ namespace videostab
|
|||||||
|
|
||||||
WobbleSuppressorBase::WobbleSuppressorBase() : motions_(0), stabilizationMotions_(0)
|
WobbleSuppressorBase::WobbleSuppressorBase() : motions_(0), stabilizationMotions_(0)
|
||||||
{
|
{
|
||||||
RansacMotionEstimator *est = new RansacMotionEstimator();
|
setMotionEstimator(new KeypointBasedMotionEstimator(new MotionEstimatorRansacL2(MM_HOMOGRAPHY)));
|
||||||
est->setMotionModel(MM_HOMOGRAPHY);
|
|
||||||
est->setRansacParams(RansacParams::default2dMotion(MM_HOMOGRAPHY));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -85,8 +85,6 @@ void printHelp()
|
|||||||
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
|
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
|
||||||
" --nkps=<int_number>\n"
|
" --nkps=<int_number>\n"
|
||||||
" Number of keypoints to find in each frame. The default is 1000.\n"
|
" Number of keypoints to find in each frame. The default is 1000.\n"
|
||||||
" --extra-kps=<int_number>\n"
|
|
||||||
" Extra keypoint grid size for motion estimation. The default is 0.\n"
|
|
||||||
" --local-outlier-rejection=(yes|no)\n"
|
" --local-outlier-rejection=(yes|no)\n"
|
||||||
" Perform local outlier rejection. The default is no.\n\n"
|
" Perform local outlier rejection. The default is no.\n\n"
|
||||||
" -sm, --save-motions=(<file_path>|no)\n"
|
" -sm, --save-motions=(<file_path>|no)\n"
|
||||||
@ -154,8 +152,6 @@ void printHelp()
|
|||||||
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
|
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
|
||||||
" --ws-nkps=<int_number>\n"
|
" --ws-nkps=<int_number>\n"
|
||||||
" Number of keypoints to find in each frame. The default is 1000.\n"
|
" Number of keypoints to find in each frame. The default is 1000.\n"
|
||||||
" --ws-extra-kps=<int_number>\n"
|
|
||||||
" Extra keypoint grid size for motion estimation. The default is 0.\n"
|
|
||||||
" --ws-local-outlier-rejection=(yes|no)\n"
|
" --ws-local-outlier-rejection=(yes|no)\n"
|
||||||
" Perform local outlier rejection. The default is no.\n\n"
|
" Perform local outlier rejection. The default is no.\n\n"
|
||||||
" -sm2, --save-motions2=(<file_path>|no)\n"
|
" -sm2, --save-motions2=(<file_path>|no)\n"
|
||||||
@ -181,24 +177,22 @@ class IMotionEstimatorBuilder
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
virtual ~IMotionEstimatorBuilder() {}
|
virtual ~IMotionEstimatorBuilder() {}
|
||||||
virtual Ptr<GlobalMotionEstimatorBase> build() = 0;
|
virtual Ptr<ImageMotionEstimatorBase> build() = 0;
|
||||||
protected:
|
protected:
|
||||||
IMotionEstimatorBuilder(CommandLineParser &cmd) : cmd(cmd) {}
|
IMotionEstimatorBuilder(CommandLineParser &cmd) : cmd(cmd) {}
|
||||||
CommandLineParser cmd;
|
CommandLineParser cmd;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class RansacMotionEstimatorBuilder : public IMotionEstimatorBuilder
|
class MotionEstimatorRansacL2Builder : public IMotionEstimatorBuilder
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
RansacMotionEstimatorBuilder(CommandLineParser &cmd, const string &prefix = "")
|
MotionEstimatorRansacL2Builder(CommandLineParser &cmd, bool gpu, const string &prefix = "")
|
||||||
: IMotionEstimatorBuilder(cmd), prefix(prefix) {}
|
: IMotionEstimatorBuilder(cmd), gpu(gpu), prefix(prefix) {}
|
||||||
|
|
||||||
virtual Ptr<GlobalMotionEstimatorBase> build()
|
virtual Ptr<ImageMotionEstimatorBase> build()
|
||||||
{
|
{
|
||||||
RansacMotionEstimator *est = new RansacMotionEstimator(motionModel(arg(prefix + "model")));
|
MotionEstimatorRansacL2 *est = new MotionEstimatorRansacL2(motionModel(arg(prefix + "model")));
|
||||||
|
|
||||||
est->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
|
|
||||||
|
|
||||||
RansacParams ransac = est->ransacParams();
|
RansacParams ransac = est->ransacParams();
|
||||||
if (arg(prefix + "subset") != "auto")
|
if (arg(prefix + "subset") != "auto")
|
||||||
@ -208,97 +202,76 @@ public:
|
|||||||
ransac.eps = argf(prefix + "outlier-ratio");
|
ransac.eps = argf(prefix + "outlier-ratio");
|
||||||
est->setRansacParams(ransac);
|
est->setRansacParams(ransac);
|
||||||
|
|
||||||
est->setGridSize(Size(argi(prefix + "extra-kps"), argi(prefix + "extra-kps")));
|
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
|
||||||
|
|
||||||
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
|
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
|
||||||
if (arg(prefix + "local-outlier-rejection") == "yes")
|
if (arg(prefix + "local-outlier-rejection") == "yes")
|
||||||
{
|
{
|
||||||
TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector();
|
TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
|
||||||
RansacParams ransacParams = tor->ransacParams();
|
RansacParams ransacParams = tblor->ransacParams();
|
||||||
if (arg(prefix + "thresh") != "auto")
|
if (arg(prefix + "thresh") != "auto")
|
||||||
ransacParams.thresh = argf(prefix + "thresh");
|
ransacParams.thresh = argf(prefix + "thresh");
|
||||||
tor->setRansacParams(ransacParams);
|
tblor->setRansacParams(ransacParams);
|
||||||
outlierRejector = tor;
|
outlierRejector = tblor;
|
||||||
}
|
}
|
||||||
est->setOutlierRejector(outlierRejector);
|
|
||||||
|
|
||||||
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
|
|
||||||
|
|
||||||
return est;
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
string prefix;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#if HAVE_OPENCV_GPU
|
#if HAVE_OPENCV_GPU
|
||||||
class RansacMotionEstimatorBuilderGpu : public IMotionEstimatorBuilder
|
if (gpu)
|
||||||
{
|
|
||||||
public:
|
|
||||||
RansacMotionEstimatorBuilderGpu(CommandLineParser &cmd, const string &prefix = "")
|
|
||||||
: IMotionEstimatorBuilder(cmd), prefix(prefix) {}
|
|
||||||
|
|
||||||
virtual Ptr<GlobalMotionEstimatorBase> build()
|
|
||||||
{
|
|
||||||
RansacMotionEstimatorGpu *est = new RansacMotionEstimatorGpu(motionModel(arg(prefix + "model")));
|
|
||||||
|
|
||||||
RansacParams ransac = est->ransacParams();
|
|
||||||
if (arg(prefix + "subset") != "auto")
|
|
||||||
ransac.size = argi(prefix + "subset");
|
|
||||||
if (arg(prefix + "thresh") != "auto")
|
|
||||||
ransac.thresh = argi(prefix + "thresh");
|
|
||||||
ransac.eps = argf(prefix + "outlier-ratio");
|
|
||||||
est->setRansacParams(ransac);
|
|
||||||
|
|
||||||
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
|
|
||||||
if (arg(prefix + "local-outlier-rejection") == "yes")
|
|
||||||
{
|
{
|
||||||
TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector();
|
KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
|
||||||
RansacParams ransacParams = tor->ransacParams();
|
kbest->setOutlierRejector(outlierRejector);
|
||||||
if (arg(prefix + "thresh") != "auto")
|
return kbest;
|
||||||
ransacParams.thresh = argf(prefix + "thresh");
|
|
||||||
tor->setRansacParams(ransacParams);
|
|
||||||
outlierRejector = tor;
|
|
||||||
}
|
}
|
||||||
est->setOutlierRejector(outlierRejector);
|
|
||||||
|
|
||||||
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
|
|
||||||
|
|
||||||
return est;
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
string prefix;
|
|
||||||
};
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
|
||||||
|
kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
|
||||||
|
kbest->setOutlierRejector(outlierRejector);
|
||||||
|
return kbest;
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
bool gpu;
|
||||||
|
string prefix;
|
||||||
|
};
|
||||||
|
|
||||||
class LpBasedMotionEstimatorBuilder : public IMotionEstimatorBuilder
|
|
||||||
|
class MotionEstimatorL1Builder : public IMotionEstimatorBuilder
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
LpBasedMotionEstimatorBuilder(CommandLineParser &cmd, const string &prefix = "")
|
MotionEstimatorL1Builder(CommandLineParser &cmd, bool gpu, const string &prefix = "")
|
||||||
: IMotionEstimatorBuilder(cmd), prefix(prefix) {}
|
: IMotionEstimatorBuilder(cmd), gpu(gpu), prefix(prefix) {}
|
||||||
|
|
||||||
virtual Ptr<GlobalMotionEstimatorBase> build()
|
virtual Ptr<ImageMotionEstimatorBase> build()
|
||||||
{
|
{
|
||||||
LpBasedMotionEstimator *est = new LpBasedMotionEstimator(motionModel(arg(prefix + "model")));
|
MotionEstimatorL1 *est = new MotionEstimatorL1(motionModel(arg(prefix + "model")));
|
||||||
|
|
||||||
est->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
|
|
||||||
|
|
||||||
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
|
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
|
||||||
if (arg(prefix + "local-outlier-rejection") == "yes")
|
if (arg(prefix + "local-outlier-rejection") == "yes")
|
||||||
{
|
{
|
||||||
TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector();
|
TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
|
||||||
RansacParams ransacParams = tor->ransacParams();
|
RansacParams ransacParams = tblor->ransacParams();
|
||||||
if (arg(prefix + "thresh") != "auto")
|
if (arg(prefix + "thresh") != "auto")
|
||||||
ransacParams.thresh = argf(prefix + "thresh");
|
ransacParams.thresh = argf(prefix + "thresh");
|
||||||
tor->setRansacParams(ransacParams);
|
tblor->setRansacParams(ransacParams);
|
||||||
outlierRejector = tor;
|
outlierRejector = tblor;
|
||||||
}
|
}
|
||||||
est->setOutlierRejector(outlierRejector);
|
|
||||||
|
|
||||||
return est;
|
#if HAVE_OPENCV_GPU
|
||||||
|
if (gpu)
|
||||||
|
{
|
||||||
|
KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
|
||||||
|
kbest->setOutlierRejector(outlierRejector);
|
||||||
|
return kbest;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
|
||||||
|
kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
|
||||||
|
kbest->setOutlierRejector(outlierRejector);
|
||||||
|
return kbest;
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
|
bool gpu;
|
||||||
string prefix;
|
string prefix;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -399,40 +372,16 @@ int main(int argc, const char **argv)
|
|||||||
// prepare motion estimation builders
|
// prepare motion estimation builders
|
||||||
|
|
||||||
Ptr<IMotionEstimatorBuilder> motionEstBuilder;
|
Ptr<IMotionEstimatorBuilder> motionEstBuilder;
|
||||||
#if HAVE_OPENCV_GPU
|
if (arg("lin-prog-motion-est") == "yes")
|
||||||
if (arg("gpu") == "yes")
|
motionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes");
|
||||||
{
|
|
||||||
if (arg("lin-prog-motion-est") == "yes")
|
|
||||||
motionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd);
|
|
||||||
else
|
|
||||||
motionEstBuilder = new RansacMotionEstimatorBuilderGpu(cmd);
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
#endif
|
motionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes");
|
||||||
{
|
|
||||||
if (arg("lin-prog-motion-est") == "yes")
|
|
||||||
motionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd);
|
|
||||||
else
|
|
||||||
motionEstBuilder = new RansacMotionEstimatorBuilder(cmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
|
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
|
||||||
#if HAVE_OPENCV_GPU
|
if (arg("ws-lp") == "yes")
|
||||||
if (arg("gpu") == "yes")
|
wsMotionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-");
|
||||||
{
|
|
||||||
if (arg("ws-lp") == "yes")
|
|
||||||
wsMotionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd, "ws-");
|
|
||||||
else
|
|
||||||
wsMotionEstBuilder = new RansacMotionEstimatorBuilderGpu(cmd, "ws-");
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
#endif
|
wsMotionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-");
|
||||||
{
|
|
||||||
if (arg("ws-lp") == "yes")
|
|
||||||
wsMotionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd, "ws-");
|
|
||||||
else
|
|
||||||
wsMotionEstBuilder = new RansacMotionEstimatorBuilder(cmd, "ws-");
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine whether we must use one pass or two pass stabilizer
|
// determine whether we must use one pass or two pass stabilizer
|
||||||
bool isTwoPass =
|
bool isTwoPass =
|
||||||
|
Loading…
x
Reference in New Issue
Block a user