Added GPU version of PyrLK based global motion estimator (videostab)

This commit is contained in:
Alexey Spizhevoy 2012-04-18 13:23:41 +00:00
parent 1351f4c8ef
commit 1569c1ed52
13 changed files with 600 additions and 95 deletions

View File

@ -2090,6 +2090,10 @@ private:
std::auto_ptr<Impl> impl_; std::auto_ptr<Impl> impl_;
}; };
//! removes points (CV_32FC2, single row matrix) with zero mask value
CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask);
} // namespace gpu } // namespace gpu
} // namespace cv } // namespace cv

View File

@ -0,0 +1,65 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "thrust/device_ptr.h"
#include "thrust/remove.h"
#include "thrust/functional.h"
#include "internal_shared.hpp"
using namespace thrust;
namespace cv { namespace gpu { namespace device {
int compactPoints(int N, float *points0, float *points1, const uchar *mask)
{
thrust::device_ptr<float2> dpoints0((float2*)points0);
thrust::device_ptr<float2> dpoints1((float2*)points1);
thrust::device_ptr<const uchar> dmask(mask);
return thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)),
thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)),
dmask, thrust::not1(thrust::identity<uchar>()))
- make_zip_iterator(make_tuple(dpoints0, dpoints1));
}
}}}

View File

@ -0,0 +1,75 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
#ifndef HAVE_CUDA
void cv::gpu::compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }
#else
namespace cv { namespace gpu { namespace device {
int compactPoints(int N, float *points0, float *points1, const uchar *mask);
}}}
void cv::gpu::compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
{
CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
int npoints = points0.cols;
int remaining = cv::gpu::device::compactPoints(
npoints, (float*)points0.data, (float*)points1.data, mask.data);
points0 = points0.colRange(0, remaining);
points1 = points1.colRange(0, remaining);
}
#endif

View File

@ -0,0 +1,87 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <iostream>
using namespace std;
using namespace cv;
struct CompactPoints : testing::TestWithParam<gpu::DeviceInfo>
{
virtual void SetUp() { gpu::setDevice(GetParam().deviceID()); }
};
TEST_P(CompactPoints, CanCompactizeSmallInput)
{
Mat src0(1, 3, CV_32FC2);
src0.at<Point2f>(0,0) = Point2f(0,0);
src0.at<Point2f>(0,1) = Point2f(0,1);
src0.at<Point2f>(0,2) = Point2f(0,2);
Mat src1(1, 3, CV_32FC2);
src1.at<Point2f>(0,0) = Point2f(1,0);
src1.at<Point2f>(0,1) = Point2f(1,1);
src1.at<Point2f>(0,2) = Point2f(1,2);
Mat mask(1, 3, CV_8U);
mask.at<uchar>(0,0) = 1;
mask.at<uchar>(0,1) = 0;
mask.at<uchar>(0,2) = 1;
gpu::GpuMat dsrc0(src0), dsrc1(src1), dmask(mask);
gpu::compactPoints(dsrc0, dsrc1, dmask);
dsrc0.download(src0);
dsrc1.download(src1);
ASSERT_EQ(2, src0.cols);
ASSERT_EQ(2, src1.cols);
ASSERT_TRUE(src0.at<Point2f>(0,0) == Point2f(0,0));
ASSERT_TRUE(src0.at<Point2f>(0,1) == Point2f(0,2));
ASSERT_TRUE(src1.at<Point2f>(0,0) == Point2f(1,0));
ASSERT_TRUE(src1.at<Point2f>(0,1) == Point2f(1,2));
}
INSTANTIATE_TEST_CASE_P(GPU_GlobalMotion, CompactPoints, ALL_DEVICES);

View File

@ -50,6 +50,10 @@
#include "opencv2/features2d/features2d.hpp" #include "opencv2/features2d/features2d.hpp"
#include "opencv2/videostab/optical_flow.hpp" #include "opencv2/videostab/optical_flow.hpp"
#if HAVE_OPENCV_GPU
#include "opencv2/gpu/gpu.hpp"
#endif
namespace cv namespace cv
{ {
namespace videostab namespace videostab
@ -161,14 +165,43 @@ private:
Ptr<FeatureDetector> detector_; Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_; Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
RansacParams ransacParams_; RansacParams ransacParams_;
float minInlierRatio_;
Size gridSize_;
std::vector<uchar> status_; std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_; std::vector<KeyPoint> keypointsPrev_;
std::vector<Point2f> pointsPrev_, points_; std::vector<Point2f> pointsPrev_, points_;
std::vector<Point2f> pointsPrevGood_, pointsGood_; std::vector<Point2f> pointsPrevGood_, pointsGood_;
float minInlierRatio_;
Size gridSize_;
}; };
#if HAVE_OPENCV_GPU
class CV_EXPORTS PyrLkRobustMotionEstimatorGpu : public GlobalMotionEstimatorBase
{
public:
PyrLkRobustMotionEstimatorGpu(MotionModel model = MM_AFFINE);
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
Mat estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok = 0);
private:
gpu::GoodFeaturesToTrackDetector_GPU detector_;
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
RansacParams ransacParams_;
float minInlierRatio_;
gpu::GpuMat frame0_, grayFrame0_, frame1_;
gpu::GpuMat pointsPrev_, points_;
Mat hostPointsPrev_, hostPoints_;
gpu::GpuMat status_;
};
#endif
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions); CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
} // namespace videostab } // namespace videostab

View File

@ -47,7 +47,7 @@
#include "opencv2/opencv_modules.hpp" #include "opencv2/opencv_modules.hpp"
#if HAVE_OPENCV_GPU #if HAVE_OPENCV_GPU
#include "opencv2/gpu/gpu.hpp" #include "opencv2/gpu/gpu.hpp"
#endif #endif
namespace cv namespace cv
@ -99,6 +99,27 @@ public:
}; };
#if HAVE_OPENCV_GPU #if HAVE_OPENCV_GPU
class CV_EXPORTS SparsePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator
{
public:
SparsePyrLkOptFlowEstimatorGpu();
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors);
void run(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, const gpu::GpuMat &points0, gpu::GpuMat &points1,
gpu::GpuMat &status, gpu::GpuMat &errors);
void run(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, const gpu::GpuMat &points0, gpu::GpuMat &points1,
gpu::GpuMat &status);
private:
gpu::PyrLKOpticalFlow optFlowEstimator_;
gpu::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_;
};
class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator : public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator
{ {
@ -108,6 +129,7 @@ public:
virtual void run( virtual void run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors); OutputArray errors);
private: private:
gpu::PyrLKOpticalFlow optFlowEstimator_; gpu::PyrLKOpticalFlow optFlowEstimator_;
gpu::GpuMat frame0_, frame1_, flowX_, flowY_, errors_; gpu::GpuMat frame0_, frame1_, flowX_, flowY_, errors_;

View File

@ -241,13 +241,11 @@ Mat estimateGlobalMotionLeastSquares(
Mat estimateGlobalMotionRobust( Mat estimateGlobalMotionRobust(
const vector<Point2f> &points0, const vector<Point2f> &points1, int model, int npoints, const Point2f *points0, const Point2f *points1, int model,
const RansacParams &params, float *rmse, int *ninliers) const RansacParams &params, float *rmse, int *ninliers)
{ {
CV_Assert(model <= MM_AFFINE); CV_Assert(model <= MM_AFFINE);
CV_Assert(points0.size() == points1.size());
const int npoints = static_cast<int>(points0.size());
const int niters = static_cast<int>(ceil(log(1 - params.prob) / const int niters = static_cast<int>(ceil(log(1 - params.prob) /
log(1 - pow(1 - params.eps, params.size)))); log(1 - pow(1 - params.eps, params.size))));
@ -382,11 +380,7 @@ PyrLkRobustMotionEstimator::PyrLkRobustMotionEstimator(MotionModel model)
setDetector(new GoodFeaturesToTrackDetector()); setDetector(new GoodFeaturesToTrackDetector());
setOptFlowEstimator(new SparsePyrLkOptFlowEstimator()); setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
setMotionModel(model); setMotionModel(model);
setRansacParams(RansacParams::default2dMotion(model));
RansacParams ransac = RansacParams::default2dMotion(model);
ransac.size *= 2; // we use more points than needed, but result looks better
setRansacParams(ransac);
setMinInlierRatio(0.1f); setMinInlierRatio(0.1f);
setGridSize(Size(0,0)); setGridSize(Size(0,0));
} }
@ -394,35 +388,37 @@ PyrLkRobustMotionEstimator::PyrLkRobustMotionEstimator(MotionModel model)
Mat PyrLkRobustMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok) Mat PyrLkRobustMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{ {
// find keypoints
detector_->detect(frame0, keypointsPrev_); detector_->detect(frame0, keypointsPrev_);
// add extra keypoints // add extra keypoints
if (gridSize_.width > 0 && gridSize_.height > 0) if (gridSize_.width > 0 && gridSize_.height > 0)
{ {
float dx = (float)frame0.cols / (gridSize_.width + 1); float dx = static_cast<float>(frame0.cols) / (gridSize_.width + 1);
float dy = (float)frame0.rows / (gridSize_.height + 1); float dy = static_cast<float>(frame0.rows) / (gridSize_.height + 1);
for (int x = 0; x < gridSize_.width; ++x) for (int x = 0; x < gridSize_.width; ++x)
for (int y = 0; y < gridSize_.height; ++y) for (int y = 0; y < gridSize_.height; ++y)
keypointsPrev_.push_back(KeyPoint((x+1)*dx, (y+1)*dy, 0.f)); keypointsPrev_.push_back(KeyPoint((x+1)*dx, (y+1)*dy, 0.f));
} }
// draw keypoints // extract points from keypoints
/*Mat img;
drawKeypoints(frame0, keypointsPrev_, img);
imshow("frame0_keypoints", img);
waitKey(3);*/
pointsPrev_.resize(keypointsPrev_.size()); pointsPrev_.resize(keypointsPrev_.size());
for (size_t i = 0; i < keypointsPrev_.size(); ++i) for (size_t i = 0; i < keypointsPrev_.size(); ++i)
pointsPrev_[i] = keypointsPrev_[i].pt; pointsPrev_[i] = keypointsPrev_[i].pt;
// find correspondences
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray()); optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
size_t npoints = points_.size(); // leave good correspondences only
pointsPrevGood_.clear(); pointsPrevGood_.reserve(npoints);
pointsGood_.clear(); pointsGood_.reserve(npoints);
for (size_t i = 0; i < npoints; ++i) pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
pointsGood_.clear(); pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i)
{ {
if (status_[i]) if (status_[i])
{ {
@ -431,24 +427,29 @@ Mat PyrLkRobustMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, b
} }
} }
int ninliers; size_t npoints = pointsGood_.size();
// find motion
int ninliers = 0;
Mat_<float> M; Mat_<float> M;
if (motionModel_ != MM_HOMOGRAPHY) if (motionModel_ != MM_HOMOGRAPHY)
M = estimateGlobalMotionRobust( M = estimateGlobalMotionRobust(
pointsPrevGood_, pointsGood_, motionModel_, ransacParams_, 0, &ninliers); npoints, &pointsPrevGood_[0], &pointsGood_[0], motionModel_,
ransacParams_, 0, &ninliers);
else else
{ {
vector<uchar> mask; vector<uchar> mask;
M = findHomography(pointsPrevGood_, pointsGood_, mask, CV_RANSAC, ransacParams_.thresh); M = findHomography(pointsPrevGood_, pointsGood_, mask, CV_RANSAC, ransacParams_.thresh);
for (size_t i = 0; i < npoints; ++i)
ninliers = 0;
for (size_t i = 0; i < pointsGood_.size(); ++i)
if (mask[i]) ninliers++; if (mask[i]) ninliers++;
} }
// check if we're confident enough in estimated motion
if (ok) *ok = true; if (ok) *ok = true;
if (static_cast<float>(ninliers) / pointsGood_.size() < minInlierRatio_) if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
{ {
M = Mat::eye(3, 3, CV_32F); M = Mat::eye(3, 3, CV_32F);
if (ok) *ok = false; if (ok) *ok = false;
@ -458,6 +459,85 @@ Mat PyrLkRobustMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, b
} }
#if HAVE_OPENCV_GPU
PyrLkRobustMotionEstimatorGpu::PyrLkRobustMotionEstimatorGpu(MotionModel model)
{
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
setMotionModel(model);
setRansacParams(RansacParams::default2dMotion(model));
setMinInlierRatio(0.1f);
}
Mat PyrLkRobustMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
frame0_.upload(frame0);
frame1_.upload(frame1);
return estimate(frame0_, frame1_, ok);
}
Mat PyrLkRobustMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok)
{
// convert frame to gray if it's color
gpu::GpuMat grayFrame0;
if (frame0.channels() == 1)
grayFrame0 = frame0;
else
{
gpu::cvtColor(frame0_, grayFrame0_, CV_BGR2GRAY);
grayFrame0 = grayFrame0_;
}
// find keypoints
detector_(grayFrame0, pointsPrev_);
// find correspondences
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
// leave good correspondences only
gpu::compactPoints(pointsPrev_, points_, status_);
pointsPrev_.download(hostPointsPrev_);
points_.download(hostPoints_);
int npoints = hostPointsPrev_.cols;
// find motion
int ninliers = 0;
Mat_<float> M;
if (motionModel_ != MM_HOMOGRAPHY)
M = estimateGlobalMotionRobust(
npoints, hostPointsPrev_.ptr<Point2f>(0), hostPoints_.ptr<Point2f>(), motionModel_,
ransacParams_, 0, &ninliers);
else
{
vector<uchar> mask;
M = findHomography(hostPointsPrev_, hostPoints_, mask, CV_RANSAC, ransacParams_.thresh);
for (int i = 0; i < npoints; ++i)
if (mask[i]) ninliers++;
}
// check if we're confident enough in estimated motion
if (ok) *ok = true;
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
{
M = Mat::eye(3, 3, CV_32F);
if (ok) *ok = false;
}
return M;
}
#endif // #if HAVE_OPENCV_GPU
Mat getMotion(int from, int to, const vector<Mat> &motions) Mat getMotion(int from, int to, const vector<Mat> &motions)
{ {
Mat M = Mat::eye(3, 3, CV_32F); Mat M = Mat::eye(3, 3, CV_32F);

View File

@ -520,7 +520,6 @@ void completeFrameAccordingToFlow(
Mat_<uchar> flowMask_(flowMask), mask1_(mask1), mask0_(mask0); Mat_<uchar> flowMask_(flowMask), mask1_(mask1), mask0_(mask0);
Mat_<float> flowX_(flowX), flowY_(flowY); Mat_<float> flowX_(flowX), flowY_(flowY);
//int count = 0;
for (int y0 = 0; y0 < frame0.rows; ++y0) for (int y0 = 0; y0 < frame0.rows; ++y0)
{ {
for (int x0 = 0; x0 < frame0.cols; ++x0) for (int x0 = 0; x0 < frame0.cols; ++x0)
@ -535,12 +534,10 @@ void completeFrameAccordingToFlow(
{ {
frame0.at<Point3_<uchar> >(y0,x0) = frame1.at<Point3_<uchar> >(y1,x1); frame0.at<Point3_<uchar> >(y0,x0) = frame1.at<Point3_<uchar> >(y1,x1);
mask0_(y0,x0) = 255; mask0_(y0,x0) = 255;
//count++;
} }
} }
} }
} }
//cout << count << endl;
} }
} // namespace videostab } // namespace videostab

View File

@ -261,9 +261,6 @@ void LpMotionStabilizer::stabilize(
rowlb_.assign(nrows, -INF); rowlb_.assign(nrows, -INF);
rowub_.assign(nrows, INF); rowub_.assign(nrows, INF);
vector<CoinShallowPackedVector> packedRows;
packedRows.reserve(nrows);
int r = 0; int r = 0;
// frame corners // frame corners

View File

@ -61,6 +61,53 @@ void SparsePyrLkOptFlowEstimator::run(
#if HAVE_OPENCV_GPU #if HAVE_OPENCV_GPU
SparsePyrLkOptFlowEstimatorGpu::SparsePyrLkOptFlowEstimatorGpu()
{
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
}
void SparsePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
points0_.upload(points0.getMat());
if (errors.needed())
{
run(frame0_, frame1_, points0_, points1_, status_, errors_);
errors_.download(errors.getMatRef());
}
else
run(frame0_, frame1_, points0_, points1_, status_);
points1_.download(points1.getMatRef());
status_.download(status.getMatRef());
}
void SparsePyrLkOptFlowEstimatorGpu::run(
const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, const gpu::GpuMat &points0,
gpu::GpuMat &points1, gpu::GpuMat &status, gpu::GpuMat &errors)
{
optFlowEstimator_.winSize = winSize_;
optFlowEstimator_.maxLevel = maxLevel_;
optFlowEstimator_.sparse(frame0, frame1, points0, points1, status, &errors);
}
void SparsePyrLkOptFlowEstimatorGpu::run(
const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, const gpu::GpuMat &points0,
gpu::GpuMat &points1, gpu::GpuMat &status)
{
optFlowEstimator_.winSize = winSize_;
optFlowEstimator_.maxLevel = maxLevel_;
optFlowEstimator_.sparse(frame0, frame1, points0, points1, status);
}
DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu() DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu()
{ {
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0); CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
@ -76,6 +123,7 @@ void DensePyrLkOptFlowEstimatorGpu::run(
optFlowEstimator_.winSize = winSize_; optFlowEstimator_.winSize = winSize_;
optFlowEstimator_.maxLevel = maxLevel_; optFlowEstimator_.maxLevel = maxLevel_;
if (errors.needed()) if (errors.needed())
{ {
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_); optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_);
@ -87,8 +135,7 @@ void DensePyrLkOptFlowEstimatorGpu::run(
flowX_.download(flowX.getMatRef()); flowX_.download(flowX.getMatRef());
flowY_.download(flowY.getMatRef()); flowY_.download(flowY.getMatRef());
} }
#endif #endif // #if HAVE_OPENCV_GPU
} // namespace videostab } // namespace videostab
} // namespace cv } // namespace cv

View File

@ -49,6 +49,7 @@
#include <stdexcept> #include <stdexcept>
#include <iostream> #include <iostream>
#include <ctime>
#include "opencv2/core/core.hpp" #include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc/imgproc.hpp"

View File

@ -321,6 +321,7 @@ void TwoPassStabilizer::runPrePassIfNecessary()
{ {
if (!isPrePassDone_) if (!isPrePassDone_)
{ {
clock_t startTime = clock();
log_->print("first pass: estimating motions"); log_->print("first pass: estimating motions");
Mat prevFrame, frame; Mat prevFrame, frame;
@ -346,6 +347,13 @@ void TwoPassStabilizer::runPrePassIfNecessary()
else else
motions2_.push_back(motions_.back()); motions2_.push_back(motions_.back());
} }
if (ok)
{
if (ok2) log_->print(".");
else log_->print("?");
}
else log_->print("x");
} }
else else
{ {
@ -356,13 +364,6 @@ void TwoPassStabilizer::runPrePassIfNecessary()
prevFrame = frame; prevFrame = frame;
frameCount_++; frameCount_++;
if (ok)
{
if (ok2) log_->print(".");
else log_->print("?");
}
else log_->print("x");
} }
// add aux. motions // add aux. motions
@ -419,6 +420,9 @@ void TwoPassStabilizer::runPrePassIfNecessary()
isPrePassDone_ = true; isPrePassDone_ = true;
frameSource_->reset(); frameSource_->reset();
clock_t elapsedTime = clock() - startTime;
log_->print("first pass time: %.3f sec\n", static_cast<double>(elapsedTime) / CLOCKS_PER_SEC);
} }
} }

View File

@ -148,6 +148,8 @@ void printHelp()
" Save motions estimated for wobble suppression. The default is no.\n" " Save motions estimated for wobble suppression. The default is no.\n"
" -lm2, --load-motions2=(<file_path>|no)\n" " -lm2, --load-motions2=(<file_path>|no)\n"
" Load motions for wobble suppression from file. The default is no.\n\n" " Load motions for wobble suppression from file. The default is no.\n\n"
" --gpu=(yes|no)\n"
" Use GPU optimization whenever possible. The default is no.\n\n"
" -o, --output=(no|<file_path>)\n" " -o, --output=(no|<file_path>)\n"
" Set output file path explicitely. The default is stabilized.avi.\n" " Set output file path explicitely. The default is stabilized.avi.\n"
" --fps=(<float_number>|auto)\n" " --fps=(<float_number>|auto)\n"
@ -206,6 +208,7 @@ int main(int argc, const char **argv)
"{ | ws-extra-kps | 0 | }" "{ | ws-extra-kps | 0 | }"
"{ sm2 | save-motions2 | no | }" "{ sm2 | save-motions2 | no | }"
"{ lm2 | load-motions2 | no | }" "{ lm2 | load-motions2 | no | }"
"{ | gpu | no }"
"{ o | output | stabilized.avi | }" "{ o | output | stabilized.avi | }"
"{ | fps | auto | }" "{ | fps | auto | }"
"{ q | quiet | false | }" "{ q | quiet | false | }"
@ -220,6 +223,17 @@ int main(int argc, const char **argv)
return 0; return 0;
} }
#ifdef HAVE_OPENCV_GPU
if (arg("gpu") == "yes")
{
cout << "initializing GPU..."; cout.flush();
Mat hostTmp = Mat::zeros(1, 1, CV_32F);
gpu::GpuMat deviceTmp;
deviceTmp.upload(hostTmp);
cout << endl;
}
#endif
string inputPath = arg("1"); string inputPath = arg("1");
if (inputPath.empty()) throw runtime_error("specify video file path"); if (inputPath.empty()) throw runtime_error("specify video file path");
@ -258,6 +272,8 @@ int main(int argc, const char **argv)
twoPassStabilizer->setWobbleSuppressor(ws); twoPassStabilizer->setWobbleSuppressor(ws);
ws->setPeriod(argi("ws-period")); ws->setPeriod(argi("ws-period"));
if (arg("gpu") == "no")
{
PyrLkRobustMotionEstimator *est = 0; PyrLkRobustMotionEstimator *est = 0;
if (arg("ws-model") == "transl") if (arg("ws-model") == "transl")
@ -277,18 +293,56 @@ int main(int argc, const char **argv)
} }
est->setDetector(new GoodFeaturesToTrackDetector(argi("ws-nkps"))); est->setDetector(new GoodFeaturesToTrackDetector(argi("ws-nkps")));
RansacParams ransac = est->ransacParams(); RansacParams ransac = est->ransacParams();
if (arg("ws-subset") != "auto") if (arg("ws-subset") != "auto") ransac.size = argi("ws-subset");
ransac.size = argi("ws-subset"); if (arg("ws-thresh") != "auto") ransac.thresh = argi("ws-thresh");
if (arg("ws-thresh") != "auto")
ransac.thresh = argi("ws-thresh");
ransac.eps = argf("ws-outlier-ratio"); ransac.eps = argf("ws-outlier-ratio");
est->setRansacParams(ransac); est->setRansacParams(ransac);
est->setMinInlierRatio(argf("ws-min-inlier-ratio")); est->setMinInlierRatio(argf("ws-min-inlier-ratio"));
est->setGridSize(Size(argi("ws-extra-kps"), argi("ws-extra-kps"))); est->setGridSize(Size(argi("ws-extra-kps"), argi("ws-extra-kps")));
ws->setMotionEstimator(est); ws->setMotionEstimator(est);
}
else if (arg("gpu") == "yes")
{
#ifdef HAVE_OPENCV_GPU
PyrLkRobustMotionEstimatorGpu *est = 0;
MotionModel model = est->motionModel(); if (arg("ws-model") == "transl")
est = new PyrLkRobustMotionEstimatorGpu(MM_TRANSLATION);
else if (arg("ws-model") == "transl_and_scale")
est = new PyrLkRobustMotionEstimatorGpu(MM_TRANSLATION_AND_SCALE);
else if (arg("ws-model") == "linear_sim")
est = new PyrLkRobustMotionEstimatorGpu(MM_LINEAR_SIMILARITY);
else if (arg("ws-model") == "affine")
est = new PyrLkRobustMotionEstimatorGpu(MM_AFFINE);
else if (arg("ws-model") == "homography")
est = new PyrLkRobustMotionEstimatorGpu(MM_HOMOGRAPHY);
else
{
delete est;
throw runtime_error("unknown wobble suppression motion model: " + arg("ws-model"));
}
RansacParams ransac = est->ransacParams();
if (arg("ws-subset") != "auto") ransac.size = argi("ws-subset");
if (arg("ws-thresh") != "auto") ransac.thresh = argi("ws-thresh");
ransac.eps = argf("ws-outlier-ratio");
est->setRansacParams(ransac);
est->setMinInlierRatio(argf("ws-min-inlier-ratio"));
ws->setMotionEstimator(est);
#else
throw runtime_error("OpenCV is built without GPU support");
#endif
}
else
{
throw runtime_error("bad gpu optimization argument value: " + arg("gpu"));
}
MotionModel model = ws->motionEstimator()->motionModel();
if (arg("load-motions2") != "no") if (arg("load-motions2") != "no")
{ {
ws->setMotionEstimator(new FromFileMotionReader(arg("load-motions2"))); ws->setMotionEstimator(new FromFileMotionReader(arg("load-motions2")));
@ -314,6 +368,8 @@ int main(int argc, const char **argv)
stabilizer->setFrameSource(source); stabilizer->setFrameSource(source);
stabilizedFrames = dynamic_cast<IFrameSource*>(stabilizer); stabilizedFrames = dynamic_cast<IFrameSource*>(stabilizer);
if (arg("gpu") == "no")
{
PyrLkRobustMotionEstimator *est = 0; PyrLkRobustMotionEstimator *est = 0;
if (arg("model") == "transl") if (arg("model") == "transl")
@ -334,15 +390,52 @@ int main(int argc, const char **argv)
est->setDetector(new GoodFeaturesToTrackDetector(argi("nkps"))); est->setDetector(new GoodFeaturesToTrackDetector(argi("nkps")));
RansacParams ransac = est->ransacParams(); RansacParams ransac = est->ransacParams();
if (arg("subset") != "auto") if (arg("subset") != "auto") ransac.size = argi("subset");
ransac.size = argi("subset"); if (arg("thresh") != "auto") ransac.thresh = argi("thresh");
if (arg("thresh") != "auto")
ransac.thresh = argi("thresh");
ransac.eps = argf("outlier-ratio"); ransac.eps = argf("outlier-ratio");
est->setRansacParams(ransac); est->setRansacParams(ransac);
est->setMinInlierRatio(argf("min-inlier-ratio")); est->setMinInlierRatio(argf("min-inlier-ratio"));
est->setGridSize(Size(argi("extra-kps"), argi("extra-kps"))); est->setGridSize(Size(argi("extra-kps"), argi("extra-kps")));
stabilizer->setMotionEstimator(est); stabilizer->setMotionEstimator(est);
}
else if (arg("gpu") == "yes")
{
#ifdef HAVE_OPENCV_GPU
PyrLkRobustMotionEstimatorGpu *est = 0;
if (arg("ws-model") == "transl")
est = new PyrLkRobustMotionEstimatorGpu(MM_TRANSLATION);
else if (arg("ws-model") == "transl_and_scale")
est = new PyrLkRobustMotionEstimatorGpu(MM_TRANSLATION_AND_SCALE);
else if (arg("ws-model") == "linear_sim")
est = new PyrLkRobustMotionEstimatorGpu(MM_LINEAR_SIMILARITY);
else if (arg("ws-model") == "affine")
est = new PyrLkRobustMotionEstimatorGpu(MM_AFFINE);
else if (arg("ws-model") == "homography")
est = new PyrLkRobustMotionEstimatorGpu(MM_HOMOGRAPHY);
else
{
delete est;
throw runtime_error("unknown wobble suppression motion model: " + arg("ws-model"));
}
RansacParams ransac = est->ransacParams();
if (arg("ws-subset") != "auto") ransac.size = argi("ws-subset");
if (arg("ws-thresh") != "auto") ransac.thresh = argi("ws-thresh");
ransac.eps = argf("ws-outlier-ratio");
est->setRansacParams(ransac);
est->setMinInlierRatio(argf("ws-min-inlier-ratio"));
stabilizer->setMotionEstimator(est);
#else
throw runtime_error("OpenCV is built without GPU support");
#endif
}
else
{
throw runtime_error("bad gpu optimization argument value: " + arg("gpu"));
}
MotionModel model = stabilizer->motionEstimator()->motionModel(); MotionModel model = stabilizer->motionEstimator()->motionModel();
if (arg("load-motions") != "no") if (arg("load-motions") != "no")