Add interactive calibration app

This commit is contained in:
Vladislav Sovrasov 2016-04-08 16:30:56 +03:00
parent ec63343f34
commit 5a0c04409b
25 changed files with 3422 additions and 0 deletions

View File

@ -4,3 +4,4 @@ link_libraries(${OPENCV_LINKER_LIBS})
add_subdirectory(traincascade)
add_subdirectory(createsamples)
add_subdirectory(annotation)
add_subdirectory(interactive-calibration)

View File

@ -0,0 +1,48 @@
set(OPENCV_INTERACTIVECALIBRATION_DEPS opencv_core opencv_aruco opencv_highgui opencv_calib3d opencv_videoio)
ocv_check_dependencies(${OPENCV_INTERACTIVECALIBRATION_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND)
return()
endif()
find_package(LAPACK)
if(LAPACK_FOUND)
find_file(LAPACK_HEADER "lapacke.h")
if(LAPACK_HEADER)
add_definitions(-DUSE_LAPACK)
link_libraries(${LAPACK_LIBRARIES})
endif()
endif()
project(interactive-calibration)
set(the_target opencv_interactive-calibration)
ocv_target_include_directories(${the_target} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
ocv_target_include_modules_recurse(${the_target} ${OPENCV_INTERACTIVECALIBRATION_DEPS})
file(GLOB SRCS *.cpp)
file(GLOB HDRS *.h*)
set(interactive-calibration_files ${SRCS} ${HDRS})
ocv_add_executable(${the_target} ${interactive-calibration_files})
ocv_target_link_libraries(${the_target} ${OPENCV_INTERACTIVECALIBRATION_DEPS})
set_target_properties(${the_target} PROPERTIES
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
INSTALL_NAME_DIR lib
OUTPUT_NAME "opencv_interactive-calibration")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "applications")
endif()
if(INSTALL_CREATE_DISTRIB)
if(BUILD_SHARED_LIBS)
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
install(TARGETS ${the_target} OPTIONAL RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()

View File

@ -0,0 +1,118 @@
#ifndef CALIB_COMMON_HPP
#define CALIB_COMMON_HPP
#include <memory>
#include <opencv2/core.hpp>
#include <vector>
#include <string>
namespace calib
{
#define OVERLAY_DELAY 1000
#define IMAGE_MAX_WIDTH 1280
#define IMAGE_MAX_HEIGHT 960
bool showOverlayMessage(const std::string& message);
enum InputType { Video, Pictures };
enum InputVideoSource { Camera, File };
enum TemplateType { AcirclesGrid, Chessboard, chAruco, DoubleAcirclesGrid };
static const std::string mainWindowName = "Calibration";
static const std::string gridWindowName = "Board locations";
static const std::string consoleHelp = "Hot keys:\nesc - exit application\n"
"s - save current data to .xml file\n"
"r - delete last frame\n"
"u - enable/disable applying undistortion"
"d - delete all frames\n"
"v - switch visualization";
static const double sigmaMult = 1.96;
struct calibrationData
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
cv::Mat perViewErrors;
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
double totalAvgErr;
cv::Size imageSize;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector< std::vector<cv::Point3f> > objectPoints;
std::vector<cv::Mat> allCharucoCorners;
std::vector<cv::Mat> allCharucoIds;
cv::Mat undistMap1, undistMap2;
calibrationData()
{
imageSize = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
}
};
struct cameraParameters
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
double avgError;
cameraParameters(){}
cameraParameters(cv::Mat& _cameraMatrix, cv::Mat& _distCoeffs, cv::Mat& _stdDeviations, double _avgError = 0) :
cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), stdDeviations(_stdDeviations), avgError(_avgError)
{}
};
struct captureParameters
{
InputType captureMethod;
InputVideoSource source;
TemplateType board;
cv::Size boardSize;
int charucoDictName;
int calibrationStep;
float charucoSquareLenght, charucoMarkerSize;
float captureDelay;
float squareSize;
float templDst;
std::string videoFileName;
bool flipVertical;
int camID;
int fps;
cv::Size cameraResolution;
int maxFramesNum;
int minFramesNum;
captureParameters()
{
calibrationStep = 1;
captureDelay = 500.f;
maxFramesNum = 30;
minFramesNum = 10;
fps = 30;
cameraResolution = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
}
};
struct internalParameters
{
double solverEps;
int solverMaxIters;
bool fastSolving;
double filterAlpha;
internalParameters()
{
solverEps = 1e-7;
solverMaxIters = 30;
fastSolving = false;
filterAlpha = 0.1;
}
};
}
#endif

View File

@ -0,0 +1,327 @@
#include "calibController.hpp"
#include <algorithm>
#include <cmath>
#include <ctime>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
double calib::calibController::estimateCoverageQuality()
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibData->imagePoints.begin(); it != mCalibData->imagePoints.end(); ++it)
for(std::vector<cv::Point2f>::iterator pointIt = (*it).begin(); pointIt != (*it).end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(std::vector<cv::Mat>::iterator it = mCalibData->allCharucoCorners.begin(); it != mCalibData->allCharucoCorners.end(); ++it)
for(int l = 0; l < (*it).size[0]; l++) {
int i = (int)((*it).at<float>(l, 0) / xGridStep);
int j = (int)((*it).at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
calib::calibController::calibController()
{
mCalibFlags = 0;
}
calib::calibController::calibController(cv::Ptr<calib::calibrationData> data, int initialFlags, bool autoTuning, int minFramesNum) :
mCalibData(data)
{
mCalibFlags = initialFlags;
mNeedTuning = autoTuning;
mMinFramesNum = minFramesNum;
mConfIntervalsState = false;
mCoverageQualityState = false;
}
void calib::calibController::updateState()
{
if(mCalibData->cameraMatrix.total()) {
const double relErrEps = 0.05;
bool fConfState = false, cConfState = false, dConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(0) / mCalibData->cameraMatrix.at<double>(0,0) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(1) / mCalibData->cameraMatrix.at<double>(1,1) < relErrEps)
fConfState = true;
if(sigmaMult*mCalibData->stdDeviations.at<double>(2) / mCalibData->cameraMatrix.at<double>(0,2) < relErrEps &&
sigmaMult*mCalibData->stdDeviations.at<double>(3) / mCalibData->cameraMatrix.at<double>(1,2) < relErrEps)
cConfState = true;
for(int i = 0; i < 5; i++)
if(mCalibData->stdDeviations.at<double>(4+i) / fabs(mCalibData->distCoeffs.at<double>(i)) > 1)
dConfState = false;
mConfIntervalsState = fConfState && cConfState && dConfState;
}
if(getFramesNumberState())
mCoverageQualityState = estimateCoverageQuality() > 1.8 ? true : false;
if (getFramesNumberState() && mNeedTuning) {
if( !(mCalibFlags & cv::CALIB_FIX_ASPECT_RATIO) &&
mCalibData->cameraMatrix.total()) {
double fDiff = fabs(mCalibData->cameraMatrix.at<double>(0,0) -
mCalibData->cameraMatrix.at<double>(1,1));
if (fDiff < 3*mCalibData->stdDeviations.at<double>(0) &&
fDiff < 3*mCalibData->stdDeviations.at<double>(1)) {
mCalibFlags |= cv::CALIB_FIX_ASPECT_RATIO;
mCalibData->cameraMatrix.at<double>(0,0) =
mCalibData->cameraMatrix.at<double>(1,1);
}
}
if(!(mCalibFlags & cv::CALIB_ZERO_TANGENT_DIST)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(2)) < eps &&
fabs(mCalibData->distCoeffs.at<double>(3)) < eps)
mCalibFlags |= cv::CALIB_ZERO_TANGENT_DIST;
}
if(!(mCalibFlags & cv::CALIB_FIX_K1)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(0)) < eps)
mCalibFlags |= cv::CALIB_FIX_K1;
}
if(!(mCalibFlags & cv::CALIB_FIX_K2)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(1)) < eps)
mCalibFlags |= cv::CALIB_FIX_K2;
}
if(!(mCalibFlags & cv::CALIB_FIX_K3)) {
const double eps = 0.005;
if(fabs(mCalibData->distCoeffs.at<double>(4)) < eps)
mCalibFlags |= cv::CALIB_FIX_K3;
}
}
}
bool calib::calibController::getCommonCalibrationState() const
{
int rating = (int)getFramesNumberState() + (int)getConfidenceIntrervalsState() +
(int)getRMSState() + (int)mCoverageQualityState;
return rating == 4;
}
bool calib::calibController::getFramesNumberState() const
{
return std::max(mCalibData->imagePoints.size(), mCalibData->allCharucoCorners.size()) > mMinFramesNum;
}
bool calib::calibController::getConfidenceIntrervalsState() const
{
return mConfIntervalsState;
}
bool calib::calibController::getRMSState() const
{
return mCalibData->totalAvgErr < 0.5;
}
int calib::calibController::getNewFlags() const
{
return mCalibFlags;
}
//////////////////// calibDataController
double calib::calibDataController::estimateGridSubsetQuality(size_t excludedIndex)
{
{
int gridSize = 10;
int xGridStep = mCalibData->imageSize.width / gridSize;
int yGridStep = mCalibData->imageSize.height / gridSize;
std::vector<int> pointsInCell(gridSize*gridSize);
std::fill(pointsInCell.begin(), pointsInCell.end(), 0);
for(size_t k = 0; k < mCalibData->imagePoints.size(); k++)
if(k != excludedIndex)
for(std::vector<cv::Point2f>::iterator pointIt = mCalibData->imagePoints[k].begin(); pointIt != mCalibData->imagePoints[k].end(); ++pointIt) {
int i = (int)((*pointIt).x / xGridStep);
int j = (int)((*pointIt).y / yGridStep);
pointsInCell[i*gridSize + j]++;
}
for(size_t k = 0; k < mCalibData->allCharucoCorners.size(); k++)
if(k != excludedIndex)
for(int l = 0; l < mCalibData->allCharucoCorners[k].size[0]; l++) {
int i = (int)(mCalibData->allCharucoCorners[k].at<float>(l, 0) / xGridStep);
int j = (int)(mCalibData->allCharucoCorners[k].at<float>(l, 1) / yGridStep);
pointsInCell[i*gridSize + j]++;
}
cv::Mat mean, stdDev;
cv::meanStdDev(pointsInCell, mean, stdDev);
return mean.at<double>(0) / (stdDev.at<double>(0) + 1e-7);
}
}
calib::calibDataController::calibDataController(cv::Ptr<calib::calibrationData> data, int maxFrames, double convParameter) :
mCalibData(data), mParamsFileName("CamParams.xml")
{
mMaxFramesNum = maxFrames;
mAlpha = convParameter;
}
calib::calibDataController::calibDataController()
{
}
void calib::calibDataController::filterFrames()
{
size_t numberOfFrames = std::max(mCalibData->allCharucoIds.size(), mCalibData->imagePoints.size());
CV_Assert(numberOfFrames == mCalibData->perViewErrors.total());
if(numberOfFrames >= mMaxFramesNum) {
double worstValue = -HUGE_VAL, maxQuality = estimateGridSubsetQuality(numberOfFrames);
size_t worstElemIndex = 0;
for(size_t i = 0; i < numberOfFrames; i++) {
double gridQDelta = estimateGridSubsetQuality(i) - maxQuality;
double currentValue = mCalibData->perViewErrors.at<double>((int)i)*mAlpha + gridQDelta*(1. - mAlpha);
if(currentValue > worstValue) {
worstValue = currentValue;
worstElemIndex = i;
}
}
showOverlayMessage(cv::format("Frame %d is worst", worstElemIndex + 1));
if(mCalibData->imagePoints.size()) {
mCalibData->imagePoints.erase(mCalibData->imagePoints.begin() + worstElemIndex);
mCalibData->objectPoints.erase(mCalibData->objectPoints.begin() + worstElemIndex);
}
else {
mCalibData->allCharucoCorners.erase(mCalibData->allCharucoCorners.begin() + worstElemIndex);
mCalibData->allCharucoIds.erase(mCalibData->allCharucoIds.begin() + worstElemIndex);
}
cv::Mat newErrorsVec = cv::Mat((int)numberOfFrames - 1, 1, CV_64F);
std::copy(mCalibData->perViewErrors.ptr<double>(0),
mCalibData->perViewErrors.ptr<double>((int)worstElemIndex), newErrorsVec.ptr<double>(0));
std::copy(mCalibData->perViewErrors.ptr<double>((int)worstElemIndex + 1), mCalibData->perViewErrors.ptr<double>((int)numberOfFrames),
newErrorsVec.ptr<double>((int)worstElemIndex));
mCalibData->perViewErrors = newErrorsVec;
}
}
void calib::calibDataController::setParametersFileName(const std::string &name)
{
mParamsFileName = name;
}
void calib::calibDataController::deleteLastFrame()
{
if( !mCalibData->imagePoints.empty()) {
mCalibData->imagePoints.pop_back();
mCalibData->objectPoints.pop_back();
}
if (!mCalibData->allCharucoCorners.empty()) {
mCalibData->allCharucoCorners.pop_back();
mCalibData->allCharucoIds.pop_back();
}
if(!mParamsStack.empty()) {
mCalibData->cameraMatrix = (mParamsStack.top()).cameraMatrix;
mCalibData->distCoeffs = (mParamsStack.top()).distCoeffs;
mCalibData->stdDeviations = (mParamsStack.top()).stdDeviations;
mCalibData->totalAvgErr = (mParamsStack.top()).avgError;
mParamsStack.pop();
}
}
void calib::calibDataController::rememberCurrentParameters()
{
cv::Mat oldCameraMat, oldDistcoeefs, oldStdDevs;
mCalibData->cameraMatrix.copyTo(oldCameraMat);
mCalibData->distCoeffs.copyTo(oldDistcoeefs);
mCalibData->stdDeviations.copyTo(oldStdDevs);
mParamsStack.push(cameraParameters(oldCameraMat, oldDistcoeefs, oldStdDevs, mCalibData->totalAvgErr));
}
void calib::calibDataController::deleteAllData()
{
mCalibData->imagePoints.clear();
mCalibData->objectPoints.clear();
mCalibData->allCharucoCorners.clear();
mCalibData->allCharucoIds.clear();
mCalibData->cameraMatrix = mCalibData->distCoeffs = cv::Mat();
mParamsStack = std::stack<cameraParameters>();
rememberCurrentParameters();
}
bool calib::calibDataController::saveCurrentCameraParameters() const
{
bool success = false;
if(mCalibData->cameraMatrix.total()) {
cv::FileStorage parametersWriter(mParamsFileName, cv::FileStorage::WRITE);
if(parametersWriter.isOpened()) {
time_t rawtime;
time(&rawtime);
char buf[256];
strftime(buf, sizeof(buf)-1, "%c", localtime(&rawtime));
parametersWriter << "calibrationDate" << buf;
parametersWriter << "framesCount" << std::max((int)mCalibData->objectPoints.size(), (int)mCalibData->allCharucoCorners.size());
parametersWriter << "cameraResolution" << mCalibData->imageSize;
parametersWriter << "cameraMatrix" << mCalibData->cameraMatrix;
parametersWriter << "cameraMatrix_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(0, 4));
parametersWriter << "dist_coeffs" << mCalibData->distCoeffs;
parametersWriter << "dist_coeffs_std_dev" << mCalibData->stdDeviations.rowRange(cv::Range(4, 9));
parametersWriter << "avg_reprojection_error" << mCalibData->totalAvgErr;
parametersWriter.release();
success = true;
}
}
return success;
}
void calib::calibDataController::printParametersToConsole(std::ostream &output) const
{
const char* border = "---------------------------------------------------";
output << border << std::endl;
output << "Frames used for calibration: " << std::max(mCalibData->objectPoints.size(), mCalibData->allCharucoCorners.size())
<< " \t RMS = " << mCalibData->totalAvgErr << std::endl;
if(mCalibData->cameraMatrix.at<double>(0,0) == mCalibData->cameraMatrix.at<double>(1,1))
output << "F = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
else
output << "Fx = " << mCalibData->cameraMatrix.at<double>(0,0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(0) << " \t "
<< "Fy = " << mCalibData->cameraMatrix.at<double>(1,1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(1) << std::endl;
output << "Cx = " << mCalibData->cameraMatrix.at<double>(0,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(2) << " \t"
<< "Cy = " << mCalibData->cameraMatrix.at<double>(1,2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(3) << std::endl;
output << "K1 = " << mCalibData->distCoeffs.at<double>(0) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(4) << std::endl;
output << "K2 = " << mCalibData->distCoeffs.at<double>(1) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(5) << std::endl;
output << "K3 = " << mCalibData->distCoeffs.at<double>(4) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(8) << std::endl;
output << "TD1 = " << mCalibData->distCoeffs.at<double>(2) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(6) << std::endl;
output << "TD2 = " << mCalibData->distCoeffs.at<double>(3) << " +- " << sigmaMult*mCalibData->stdDeviations.at<double>(7) << std::endl;
}
void calib::calibDataController::updateUndistortMap()
{
cv::initUndistortRectifyMap(mCalibData->cameraMatrix, mCalibData->distCoeffs, cv::noArray(),
cv::getOptimalNewCameraMatrix(mCalibData->cameraMatrix, mCalibData->distCoeffs, mCalibData->imageSize, 0.0, mCalibData->imageSize),
mCalibData->imageSize, CV_16SC2, mCalibData->undistMap1, mCalibData->undistMap2);
}

View File

@ -0,0 +1,64 @@
#ifndef CALIB_CONTROLLER_HPP
#define CALIB_CONTROLLER_HPP
#include "calibCommon.hpp"
#include <stack>
#include <string>
#include <ostream>
namespace calib {
class calibController
{
protected:
cv::Ptr<calibrationData> mCalibData;
int mCalibFlags;
unsigned mMinFramesNum;
bool mNeedTuning;
bool mConfIntervalsState;
bool mCoverageQualityState;
double estimateCoverageQuality();
public:
calibController();
calibController(cv::Ptr<calibrationData> data, int initialFlags, bool autoTuning,
int minFramesNum);
void updateState();
bool getCommonCalibrationState() const;
bool getFramesNumberState() const;
bool getConfidenceIntrervalsState() const;
bool getRMSState() const;
bool getPointsCoverageState() const;
int getNewFlags() const;
};
class calibDataController
{
protected:
cv::Ptr<calibrationData> mCalibData;
std::stack<cameraParameters> mParamsStack;
std::string mParamsFileName;
unsigned mMaxFramesNum;
double mAlpha;
double estimateGridSubsetQuality(size_t excludedIndex);
public:
calibDataController(cv::Ptr<calibrationData> data, int maxFrames, double convParameter);
calibDataController();
void filterFrames();
void setParametersFileName(const std::string& name);
void deleteLastFrame();
void rememberCurrentParameters();
void deleteAllData();
bool saveCurrentCameraParameters() const;
void printParametersToConsole(std::ostream &output) const;
void updateUndistortMap();
};
}
#endif

View File

@ -0,0 +1,91 @@
#include "calibPipeline.hpp"
#include <opencv2/highgui.hpp>
#include <stdexcept>
using namespace calib;
#define CAP_DELAY 10
cv::Size CalibPipeline::getCameraResolution()
{
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, 10000);
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, 10000);
int w = (int)mCapture.get(cv::CAP_PROP_FRAME_WIDTH);
int h = (int)mCapture.get(cv::CAP_PROP_FRAME_HEIGHT);
return cv::Size(w,h);
}
CalibPipeline::CalibPipeline(captureParameters params) :
mCaptureParams(params)
{
}
PipelineExitStatus CalibPipeline::start(std::vector<cv::Ptr<FrameProcessor> > processors)
{
if(mCaptureParams.source == Camera && !mCapture.isOpened())
{
mCapture.open(mCaptureParams.camID);
cv::Size maxRes = getCameraResolution();
cv::Size neededRes = mCaptureParams.cameraResolution;
if(maxRes.width < neededRes.width) {
double aR = (double)maxRes.width / maxRes.height;
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, neededRes.width);
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, neededRes.width/aR);
}
else if(maxRes.height < neededRes.height) {
double aR = (double)maxRes.width / maxRes.height;
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, neededRes.height);
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, neededRes.height*aR);
}
else {
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, neededRes.height);
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, neededRes.width);
}
mCapture.set(cv::CAP_PROP_AUTOFOCUS, 0);
}
else if (mCaptureParams.source == File && !mCapture.isOpened())
mCapture.open(mCaptureParams.videoFileName);
mImageSize = cv::Size((int)mCapture.get(cv::CAP_PROP_FRAME_WIDTH), (int)mCapture.get(cv::CAP_PROP_FRAME_HEIGHT));
if(!mCapture.isOpened())
throw std::runtime_error("Unable to open video source");
cv::Mat frame, processedFrame;
while(mCapture.grab()) {
mCapture.retrieve(frame);
if(mCaptureParams.flipVertical)
cv::flip(frame, frame, -1);
frame.copyTo(processedFrame);
for (std::vector<cv::Ptr<FrameProcessor> >::iterator it = processors.begin(); it != processors.end(); ++it)
processedFrame = (*it)->processFrame(processedFrame);
cv::imshow(mainWindowName, processedFrame);
int key = cv::waitKey(CAP_DELAY);
if(key == 27) // esc
return Finished;
else if (key == 114) // r
return DeleteLastFrame;
else if (key == 100) // d
return DeleteAllFrames;
else if (key == 115) // s
return SaveCurrentData;
else if (key == 117) // u
return SwitchUndistort;
else if (key == 118) // v
return SwitchVisualisation;
for (std::vector<cv::Ptr<FrameProcessor> >::iterator it = processors.begin(); it != processors.end(); ++it)
if((*it)->isProcessed())
return Calibrate;
}
return Finished;
}
cv::Size CalibPipeline::getImageSize() const
{
return mImageSize;
}

View File

@ -0,0 +1,39 @@
#ifndef CALIB_PIPELINE_HPP
#define CALIB_PIPELINE_HPP
#include <vector>
#include <opencv2/highgui.hpp>
#include "calibCommon.hpp"
#include "frameProcessor.hpp"
namespace calib
{
enum PipelineExitStatus { Finished,
DeleteLastFrame,
Calibrate,
DeleteAllFrames,
SaveCurrentData,
SwitchUndistort,
SwitchVisualisation
};
class CalibPipeline
{
protected:
captureParameters mCaptureParams;
cv::Size mImageSize;
cv::VideoCapture mCapture;
cv::Size getCameraResolution();
public:
CalibPipeline(captureParameters params);
PipelineExitStatus start(std::vector<cv::Ptr<FrameProcessor> > processors);
cv::Size getImageSize() const;
};
}
#endif

View File

@ -0,0 +1,824 @@
#include <opencv2/calib3d.hpp>
#include "linalg.hpp"
#include "cvCalibrationFork.hpp"
using namespace cv;
static void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<uchar>& cols,
const std::vector<uchar>& rows);
static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12, 12x1, 1x14 or 14x1 floating-point vector";
static void cvEvaluateJtJ2(CvMat* _JtJ,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
const CvMat* object_points,
const CvMat* param,
const CvMat* npoints,
int flags, int NINTRINSIC, double aspectRatio)
{
int i, pos, ni, total = 0, npstep = 0, maxPoints = 0;
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
int nimages = npoints->rows*npoints->cols;
for( i = 0; i < nimages; i++ )
{
ni = npoints->data.i[i*npstep];
if( ni < 4 )
{
CV_Error_( CV_StsOutOfRange, ("The number of points in the view #%d is < 4", i));
}
maxPoints = MAX( maxPoints, ni );
total += ni;
}
Mat _Ji( maxPoints*2, NINTRINSIC, CV_64FC1, Scalar(0));
Mat _Je( maxPoints*2, 6, CV_64FC1 );
Mat _err( maxPoints*2, 1, CV_64FC1 );
Mat _m( 1, total, CV_64FC2 );
const Mat matM = cvarrToMat(object_points);
cvZero(_JtJ);
for(i = 0, pos = 0; i < nimages; i++, pos += ni )
{
CvMat _ri, _ti;
ni = npoints->data.i[i*npstep];
cvGetRows( param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
cvGetRows( param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
CvMat _Mi(matM.colRange(pos, pos + ni));
CvMat _mi(_m.colRange(pos, pos + ni));
_Je.resize(ni*2); _Ji.resize(ni*2); _err.resize(ni*2);
CvMat _dpdr(_Je.colRange(0, 3));
CvMat _dpdt(_Je.colRange(3, 6));
CvMat _dpdf(_Ji.colRange(0, 2));
CvMat _dpdc(_Ji.colRange(2, 4));
CvMat _dpdk(_Ji.colRange(4, NINTRINSIC));
CvMat _mp(_err.reshape(2, 1));
cvProjectPoints2( &_Mi, &_ri, &_ti, camera_matrix, distortion_coeffs, &_mp, &_dpdr, &_dpdt,
(flags & CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
(flags & CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
(flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
cvSub( &_mp, &_mi, &_mp );
Mat JtJ(cvarrToMat(_JtJ));
// see HZ: (A6.14) for details on the structure of the Jacobian
JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += _Ji.t() * _Ji;
JtJ(Rect(NINTRINSIC + i * 6, NINTRINSIC + i * 6, 6, 6)) = _Je.t() * _Je;
JtJ(Rect(NINTRINSIC + i * 6, 0, 6, NINTRINSIC)) = _Ji.t() * _Je;
}
}
double cvfork::cvCalibrateCamera2( const CvMat* objectPoints,
const CvMat* imagePoints, const CvMat* npoints,
CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
CvMat* rvecs, CvMat* tvecs, CvMat* stdDevs, CvMat* perViewErrors, int flags, CvTermCriteria termCrit )
{
{
const int NINTRINSIC = CV_CALIB_NINTRINSIC;
double reprojErr = 0;
Matx33d A;
double k[14] = {0};
CvMat matA = cvMat(3, 3, CV_64F, A.val), _k;
int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
double aspectRatio = 0.;
// 0. check the parameters & allocate buffers
if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||
!CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )
CV_Error( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );
if( imageSize.width <= 0 || imageSize.height <= 0 )
CV_Error( CV_StsOutOfRange, "image width and height must be positive" );
if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
(npoints->rows != 1 && npoints->cols != 1) )
CV_Error( CV_StsUnsupportedFormat,
"the array of point counters must be 1-dimensional integer vector" );
if(flags & CV_CALIB_TILTED_MODEL)
{
//when the tilted sensor model is used the distortion coefficients matrix must have 14 parameters
if (distCoeffs->cols*distCoeffs->rows != 14)
CV_Error( CV_StsBadArg, "The tilted sensor model must have 14 parameters in the distortion matrix" );
}
else
{
//when the thin prism model is used the distortion coefficients matrix must have 12 parameters
if(flags & CV_CALIB_THIN_PRISM_MODEL)
if (distCoeffs->cols*distCoeffs->rows != 12)
CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
}
nimages = npoints->rows*npoints->cols;
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
if( rvecs )
{
cn = CV_MAT_CN(rvecs->type);
if( !CV_IS_MAT(rvecs) ||
(CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||
((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&
(rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )
CV_Error( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "
"1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );
}
if( tvecs )
{
cn = CV_MAT_CN(tvecs->type);
if( !CV_IS_MAT(tvecs) ||
(CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||
((tvecs->rows != nimages || tvecs->cols*cn != 3) &&
(tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )
CV_Error( CV_StsBadArg, "the output array of translation vectors must be 3-channel "
"1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );
}
if( stdDevs )
{
cn = CV_MAT_CN(stdDevs->type);
if( !CV_IS_MAT(stdDevs) ||
(CV_MAT_DEPTH(stdDevs->type) != CV_32F && CV_MAT_DEPTH(stdDevs->type) != CV_64F) ||
((stdDevs->rows != (nimages*6 + NINTRINSIC) || stdDevs->cols*cn != 1) &&
(stdDevs->rows != 1 || stdDevs->cols != (nimages*6 + NINTRINSIC) || cn != 1)) )
CV_Error( CV_StsBadArg, "the output array of standard deviations vectors must be 1-channel "
"1x(n*6 + NINTRINSIC) or (n*6 + NINTRINSIC)x1 array, where n is the number of views" );
}
if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&
CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||
cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )
CV_Error( CV_StsBadArg,
"Intrinsic parameters must be 3x3 floating-point matrix" );
if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&
CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
(distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
(distCoeffs->cols*distCoeffs->rows != 4 &&
distCoeffs->cols*distCoeffs->rows != 5 &&
distCoeffs->cols*distCoeffs->rows != 8 &&
distCoeffs->cols*distCoeffs->rows != 12 &&
distCoeffs->cols*distCoeffs->rows != 14) )
CV_Error( CV_StsBadArg, cvDistCoeffErr );
for( i = 0; i < nimages; i++ )
{
ni = npoints->data.i[i*npstep];
if( ni < 4 )
{
CV_Error_( CV_StsOutOfRange, ("The number of points in the view #%d is < 4", i));
}
maxPoints = MAX( maxPoints, ni );
total += ni;
}
Mat matM( 1, total, CV_64FC3 );
Mat _m( 1, total, CV_64FC2 );
if(CV_MAT_CN(objectPoints->type) == 3) {
cvarrToMat(objectPoints).convertTo(matM, CV_64F);
} else {
convertPointsHomogeneous(cvarrToMat(objectPoints), matM);
}
if(CV_MAT_CN(imagePoints->type) == 2) {
cvarrToMat(imagePoints).convertTo(_m, CV_64F);
} else {
convertPointsHomogeneous(cvarrToMat(imagePoints), _m);
}
nparams = NINTRINSIC + nimages*6;
Mat _Ji( maxPoints*2, NINTRINSIC, CV_64FC1, Scalar(0));
Mat _Je( maxPoints*2, 6, CV_64FC1 );
Mat _err( maxPoints*2, 1, CV_64FC1 );
_k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 8 )
{
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 5 )
flags |= CALIB_FIX_K3;
flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6;
}
const double minValidAspectRatio = 0.01;
const double maxValidAspectRatio = 100.0;
// 1. initialize intrinsic parameters & LM solver
if( flags & CALIB_USE_INTRINSIC_GUESS )
{
cvConvert( cameraMatrix, &matA );
if( A(0, 0) <= 0 || A(1, 1) <= 0 )
CV_Error( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );
if( A(0, 2) < 0 || A(0, 2) >= imageSize.width ||
A(1, 2) < 0 || A(1, 2) >= imageSize.height )
CV_Error( CV_StsOutOfRange, "Principal point must be within the image" );
if( fabs(A(0, 1)) > 1e-5 )
CV_Error( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );
if( fabs(A(1, 0)) > 1e-5 || fabs(A(2, 0)) > 1e-5 ||
fabs(A(2, 1)) > 1e-5 || fabs(A(2,2)-1) > 1e-5 )
CV_Error( CV_StsOutOfRange,
"The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );
A(0, 1) = A(1, 0) = A(2, 0) = A(2, 1) = 0.;
A(2, 2) = 1.;
if( flags & CALIB_FIX_ASPECT_RATIO )
{
aspectRatio = A(0, 0)/A(1, 1);
if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
CV_Error( CV_StsOutOfRange,
"The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
}
cvConvert( distCoeffs, &_k );
}
else
{
Scalar mean, sdv;
meanStdDev(matM, mean, sdv);
if( fabs(mean[2]) > 1e-5 || fabs(sdv[2]) > 1e-5 )
CV_Error( CV_StsBadArg,
"For non-planar calibration rigs the initial intrinsic matrix must be specified" );
for( i = 0; i < total; i++ )
matM.at<Point3d>(i).z = 0.;
if( flags & CALIB_FIX_ASPECT_RATIO )
{
aspectRatio = cvmGet(cameraMatrix,0,0);
aspectRatio /= cvmGet(cameraMatrix,1,1);
if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
CV_Error( CV_StsOutOfRange,
"The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
}
CvMat _matM(matM), m(_m);
cvInitIntrinsicParams2D( &_matM, &m, npoints, imageSize, &matA, aspectRatio );
}
//CvLevMarq solver( nparams, 0, termCrit );
cvfork::CvLevMarqFork solver( nparams, 0, termCrit );
Mat allErrors(1, total, CV_64FC2);
if(flags & CALIB_USE_LU) {
solver.solveMethod = DECOMP_LU;
}
else if(flags & CALIB_USE_QR)
solver.solveMethod = DECOMP_QR;
{
double* param = solver.param->data.db;
uchar* mask = solver.mask->data.ptr;
param[0] = A(0, 0); param[1] = A(1, 1); param[2] = A(0, 2); param[3] = A(1, 2);
std::copy(k, k + 14, param + 4);
if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
mask[0] = mask[1] = 0;
if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )
mask[2] = mask[3] = 0;
if( flags & CV_CALIB_ZERO_TANGENT_DIST )
{
param[6] = param[7] = 0;
mask[6] = mask[7] = 0;
}
if( !(flags & CALIB_RATIONAL_MODEL) )
flags |= CALIB_FIX_K4 + CALIB_FIX_K5 + CALIB_FIX_K6;
if( !(flags & CV_CALIB_THIN_PRISM_MODEL))
flags |= CALIB_FIX_S1_S2_S3_S4;
if( !(flags & CV_CALIB_TILTED_MODEL))
flags |= CALIB_FIX_TAUX_TAUY;
mask[ 4] = !(flags & CALIB_FIX_K1);
mask[ 5] = !(flags & CALIB_FIX_K2);
mask[ 8] = !(flags & CALIB_FIX_K3);
mask[ 9] = !(flags & CALIB_FIX_K4);
mask[10] = !(flags & CALIB_FIX_K5);
mask[11] = !(flags & CALIB_FIX_K6);
if(flags & CALIB_FIX_S1_S2_S3_S4)
{
mask[12] = 0;
mask[13] = 0;
mask[14] = 0;
mask[15] = 0;
}
if(flags & CALIB_FIX_TAUX_TAUY)
{
mask[16] = 0;
mask[17] = 0;
}
}
// 2. initialize extrinsic parameters
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
{
CvMat _ri, _ti;
ni = npoints->data.i[i*npstep];
cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
CvMat _Mi(matM.colRange(pos, pos + ni));
CvMat _mi(_m.colRange(pos, pos + ni));
cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti );
}
// 3. run the optimization
for(;;)
{
const CvMat* _param = 0;
CvMat *_JtJ = 0, *_JtErr = 0;
double* _errNorm = 0;
bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );
double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;
if( flags & CALIB_FIX_ASPECT_RATIO )
{
param[0] = param[1]*aspectRatio;
pparam[0] = pparam[1]*aspectRatio;
}
A(0, 0) = param[0]; A(1, 1) = param[1]; A(0, 2) = param[2]; A(1, 2) = param[3];
std::copy(param + 4, param + 4 + 14, k);
if( !proceed ) {
//do errors estimation
if(stdDevs) {
Ptr<CvMat> JtJ(cvCreateMat(nparams, nparams, CV_64F));
CvMat cvMatM(matM);
cvEvaluateJtJ2(JtJ, &matA, &_k, &cvMatM, solver.param, npoints, flags, NINTRINSIC, aspectRatio);
Mat mask = cvarrToMat(solver.mask);
int nparams_nz = countNonZero(mask);
Mat JtJinv, JtJN;
JtJN.create(nparams_nz, nparams_nz, CV_64F);
subMatrix(cvarrToMat(JtJ), JtJN, mask, mask);
completeSymm(JtJN, false);
#ifndef USE_LAPACK
cv::invert(JtJN, JtJinv, DECOMP_SVD);
#else
cvfork::invert(JtJN, JtJinv, DECOMP_SVD);
#endif
double sigma2 = norm(allErrors, NORM_L2SQR) / (total - nparams_nz);
Mat stdDevsM = cvarrToMat(stdDevs);
int j = 0;
for (int s = 0; s < nparams; s++)
if(mask.data[s]) {
stdDevsM.at<double>(s) = std::sqrt(JtJinv.at<double>(j,j)*sigma2);
j++;
}
else
stdDevsM.at<double>(s) = 0;
}
break;
}
reprojErr = 0;
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
{
CvMat _ri, _ti;
ni = npoints->data.i[i*npstep];
cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
CvMat _Mi(matM.colRange(pos, pos + ni));
CvMat _mi(_m.colRange(pos, pos + ni));
CvMat _me(allErrors.colRange(pos, pos + ni));
_Je.resize(ni*2); _Ji.resize(ni*2); _err.resize(ni*2);
CvMat _dpdr(_Je.colRange(0, 3));
CvMat _dpdt(_Je.colRange(3, 6));
CvMat _dpdf(_Ji.colRange(0, 2));
CvMat _dpdc(_Ji.colRange(2, 4));
CvMat _dpdk(_Ji.colRange(4, NINTRINSIC));
CvMat _mp(_err.reshape(2, 1));
if( solver.state == CvLevMarq::CALC_J )
{
cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
(flags & CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
(flags & CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
(flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
}
else
cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp );
cvSub( &_mp, &_mi, &_mp );
if( solver.state == CvLevMarq::CALC_J )
{
Mat JtJ(cvarrToMat(_JtJ)), JtErr(cvarrToMat(_JtErr));
// see HZ: (A6.14) for details on the structure of the Jacobian
JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += _Ji.t() * _Ji;
JtJ(Rect(NINTRINSIC + i * 6, NINTRINSIC + i * 6, 6, 6)) = _Je.t() * _Je;
JtJ(Rect(NINTRINSIC + i * 6, 0, 6, NINTRINSIC)) = _Ji.t() * _Je;
JtErr.rowRange(0, NINTRINSIC) += _Ji.t() * _err;
JtErr.rowRange(NINTRINSIC + i * 6, NINTRINSIC + (i + 1) * 6) = _Je.t() * _err;
}
if (stdDevs || perViewErrors)
cvCopy(&_mp, &_me);
reprojErr += norm(_err, NORM_L2SQR);
}
if( _errNorm )
*_errNorm = reprojErr;
}
// 4. store the results
cvConvert( &matA, cameraMatrix );
cvConvert( &_k, distCoeffs );
for( i = 0, pos = 0; i < nimages; i++)
{
CvMat src, dst;
if( perViewErrors )
{
ni = npoints->data.i[i*npstep];
perViewErrors->data.db[i] = std::sqrt(cv::norm(allErrors.colRange(pos, pos + ni), NORM_L2SQR) / ni);
pos+=ni;
}
if( rvecs )
{
src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );
if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )
{
dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),
rvecs->data.ptr + rvecs->step*i );
cvRodrigues2( &src, &matA );
cvConvert( &matA, &dst );
}
else
{
dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?
rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :
rvecs->data.ptr + rvecs->step*i );
cvConvert( &src, &dst );
}
}
if( tvecs )
{
src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
dst = cvMat( 3, 1, CV_MAT_DEPTH(tvecs->type), tvecs->rows == 1 ?
tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
tvecs->data.ptr + tvecs->step*i );
cvConvert( &src, &dst );
}
}
return std::sqrt(reprojErr/total);
}
}
static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
{
Mat cameraMatrix = Mat::eye(3, 3, rtype);
if( cameraMatrix0.size() == cameraMatrix.size() )
cameraMatrix0.convertTo(cameraMatrix, rtype);
return cameraMatrix;
}
static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
{
Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 14) : Size(14, 1), rtype);
if( distCoeffs0.size() == Size(1, 4) ||
distCoeffs0.size() == Size(1, 5) ||
distCoeffs0.size() == Size(1, 8) ||
distCoeffs0.size() == Size(1, 12) ||
distCoeffs0.size() == Size(1, 14) ||
distCoeffs0.size() == Size(4, 1) ||
distCoeffs0.size() == Size(5, 1) ||
distCoeffs0.size() == Size(8, 1) ||
distCoeffs0.size() == Size(12, 1) ||
distCoeffs0.size() == Size(14, 1) )
{
Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
distCoeffs0.convertTo(dstCoeffs, rtype);
}
return distCoeffs;
}
static void collectCalibrationData( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1,
InputArrayOfArrays imagePoints2,
Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
Mat& npoints )
{
int nimages = (int)objectPoints.total();
int i, j = 0, ni = 0, total = 0;
CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
(!imgPtMat2 || nimages == (int)imagePoints2.total()));
for( i = 0; i < nimages; i++ )
{
ni = objectPoints.getMat(i).checkVector(3, CV_32F);
if( ni <= 0 )
CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
int ni1 = imagePoints1.getMat(i).checkVector(2, CV_32F);
if( ni1 <= 0 )
CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
CV_Assert( ni == ni1 );
total += ni;
}
npoints.create(1, (int)nimages, CV_32S);
objPtMat.create(1, (int)total, CV_32FC3);
imgPtMat1.create(1, (int)total, CV_32FC2);
Point2f* imgPtData2 = 0;
if( imgPtMat2 )
{
imgPtMat2->create(1, (int)total, CV_32FC2);
imgPtData2 = imgPtMat2->ptr<Point2f>();
}
Point3f* objPtData = objPtMat.ptr<Point3f>();
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
for( i = 0; i < nimages; i++, j += ni )
{
Mat objpt = objectPoints.getMat(i);
Mat imgpt1 = imagePoints1.getMat(i);
ni = objpt.checkVector(3, CV_32F);
npoints.at<int>(i) = ni;
memcpy( objPtData + j, objpt.ptr(), ni*sizeof(objPtData[0]) );
memcpy( imgPtData1 + j, imgpt1.ptr(), ni*sizeof(imgPtData1[0]) );
if( imgPtData2 )
{
Mat imgpt2 = imagePoints2.getMat(i);
int ni2 = imgpt2.checkVector(2, CV_32F);
CV_Assert( ni == ni2 );
memcpy( imgPtData2 + j, imgpt2.ptr(), ni*sizeof(imgPtData2[0]) );
}
}
}
double cvfork::calibrateCamera(InputArrayOfArrays _objectPoints,
InputArrayOfArrays _imagePoints,
Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, OutputArray _stdDeviations, OutputArray _perViewErrors, int flags, TermCriteria criteria )
{
int rtype = CV_64F;
Mat cameraMatrix = _cameraMatrix.getMat();
cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
Mat distCoeffs = _distCoeffs.getMat();
distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
if( !(flags & CALIB_RATIONAL_MODEL) &&
(!(flags & CALIB_THIN_PRISM_MODEL)) &&
(!(flags & CALIB_TILTED_MODEL)))
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
int nimages = int(_objectPoints.total());
CV_Assert( nimages > 0 );
Mat objPt, imgPt, npoints, rvecM, tvecM, stdDeviationsM, errorsM;
bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed(),
stddev_needed = _stdDeviations.needed(), errors_needed = _perViewErrors.needed();
bool rvecs_mat_vec = _rvecs.isMatVector();
bool tvecs_mat_vec = _tvecs.isMatVector();
if( rvecs_needed ) {
_rvecs.create(nimages, 1, CV_64FC3);
if(rvecs_mat_vec)
rvecM.create(nimages, 3, CV_64F);
else
rvecM = _rvecs.getMat();
}
if( tvecs_needed ) {
_tvecs.create(nimages, 1, CV_64FC3);
if(tvecs_mat_vec)
tvecM.create(nimages, 3, CV_64F);
else
tvecM = _tvecs.getMat();
}
if( stddev_needed ) {
_stdDeviations.create(nimages*6 + CV_CALIB_NINTRINSIC, 1, CV_64F);
stdDeviationsM = _stdDeviations.getMat();
}
if( errors_needed) {
_perViewErrors.create(nimages, 1, CV_64F);
errorsM = _perViewErrors.getMat();
}
collectCalibrationData( _objectPoints, _imagePoints, noArray(),
objPt, imgPt, 0, npoints );
CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvecM = rvecM, c_tvecM = tvecM, c_stdDev = stdDeviationsM, c_errors = errorsM;
double reprojErr = cvfork::cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
&c_cameraMatrix, &c_distCoeffs,
rvecs_needed ? &c_rvecM : NULL,
tvecs_needed ? &c_tvecM : NULL,
stddev_needed ? &c_stdDev : NULL,
errors_needed ? &c_errors : NULL, flags, criteria );
// overly complicated and inefficient rvec/ tvec handling to support vector<Mat>
for(int i = 0; i < nimages; i++ )
{
if( rvecs_needed && rvecs_mat_vec)
{
_rvecs.create(3, 1, CV_64F, i, true);
Mat rv = _rvecs.getMat(i);
memcpy(rv.ptr(), rvecM.ptr(i), 3*sizeof(double));
}
if( tvecs_needed && tvecs_mat_vec)
{
_tvecs.create(3, 1, CV_64F, i, true);
Mat tv = _tvecs.getMat(i);
memcpy(tv.ptr(), tvecM.ptr(i), 3*sizeof(double));
}
}
cameraMatrix.copyTo(_cameraMatrix);
distCoeffs.copyTo(_distCoeffs);
return reprojErr;
}
double cvfork::calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds,
Ptr<aruco::CharucoBoard> &_board, Size imageSize,
InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, OutputArray _stdDeviations, OutputArray _perViewErrors,
int flags, TermCriteria criteria) {
CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total()));
// Join object points of charuco corners in a single vector for calibrateCamera() function
std::vector< std::vector< Point3f > > allObjPoints;
allObjPoints.resize(_charucoIds.total());
for(unsigned int i = 0; i < _charucoIds.total(); i++) {
unsigned int nCorners = (unsigned int)_charucoIds.getMat(i).total();
CV_Assert(nCorners > 0 && nCorners == _charucoCorners.getMat(i).total()); //actually nCorners must be > 3 for calibration
allObjPoints[i].reserve(nCorners);
for(unsigned int j = 0; j < nCorners; j++) {
int pointId = _charucoIds.getMat(i).ptr< int >(0)[j];
CV_Assert(pointId >= 0 && pointId < (int)_board->chessboardCorners.size());
allObjPoints[i].push_back(_board->chessboardCorners[pointId]);
}
}
return cvfork::calibrateCamera(allObjPoints, _charucoCorners, imageSize, _cameraMatrix, _distCoeffs,
_rvecs, _tvecs, _stdDeviations, _perViewErrors, flags, criteria);
}
static void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<uchar>& cols,
const std::vector<uchar>& rows) {
int nonzeros_cols = cv::countNonZero(cols);
cv::Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
for (int i = 0, j = 0; i < (int)cols.size(); i++)
{
if (cols[i])
{
src.col(i).copyTo(tmp.col(j++));
}
}
int nonzeros_rows = cv::countNonZero(rows);
dst.create(nonzeros_rows, nonzeros_cols, CV_64FC1);
for (int i = 0, j = 0; i < (int)rows.size(); i++)
{
if (rows[i])
{
tmp.row(i).copyTo(dst.row(j++));
}
}
}
void cvfork::CvLevMarqFork::step()
{
using namespace cv;
const double LOG10 = log(10.);
double lambda = exp(lambdaLg10*LOG10);
int nparams = param->rows;
Mat _JtJ = cvarrToMat(JtJ);
Mat _mask = cvarrToMat(mask);
int nparams_nz = countNonZero(_mask);
if(!JtJN || JtJN->rows != nparams_nz) {
// prevent re-allocation in every step
JtJN.reset(cvCreateMat( nparams_nz, nparams_nz, CV_64F ));
JtJV.reset(cvCreateMat( nparams_nz, 1, CV_64F ));
JtJW.reset(cvCreateMat( nparams_nz, 1, CV_64F ));
}
Mat _JtJN = cvarrToMat(JtJN);
Mat _JtErr = cvarrToMat(JtJV);
Mat_<double> nonzero_param = cvarrToMat(JtJW);
subMatrix(cvarrToMat(JtErr), _JtErr, std::vector<uchar>(1, 1), _mask);
subMatrix(_JtJ, _JtJN, _mask, _mask);
if( !err )
completeSymm( _JtJN, completeSymmFlag );
#if 1
_JtJN.diag() *= 1. + lambda;
#else
_JtJN.diag() += lambda;
#endif
#ifndef USE_LAPACK
cv::solve(_JtJN, _JtErr, nonzero_param, solveMethod);
#else
cvfork::solve(_JtJN, _JtErr, nonzero_param, solveMethod);
#endif
int j = 0;
for( int i = 0; i < nparams; i++ )
param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? nonzero_param(j++) : 0);
}
cvfork::CvLevMarqFork::CvLevMarqFork(int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag)
{
init(nparams, nerrs, criteria0, _completeSymmFlag);
}
cvfork::CvLevMarqFork::~CvLevMarqFork()
{
clear();
}
bool cvfork::CvLevMarqFork::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm )
{
CV_Assert( !err );
if( state == DONE )
{
_param = param;
return false;
}
if( state == STARTED )
{
_param = param;
cvZero( JtJ );
cvZero( JtErr );
errNorm = 0;
_JtJ = JtJ;
_JtErr = JtErr;
_errNorm = &errNorm;
state = CALC_J;
return true;
}
if( state == CALC_J )
{
cvCopy( param, prevParam );
step();
_param = param;
prevErrNorm = errNorm;
errNorm = 0;
_errNorm = &errNorm;
state = CHECK_ERR;
return true;
}
assert( state == CHECK_ERR );
if( errNorm > prevErrNorm )
{
if( ++lambdaLg10 <= 16 )
{
step();
_param = param;
errNorm = 0;
_errNorm = &errNorm;
state = CHECK_ERR;
return true;
}
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
if( ++iters >= criteria.max_iter ||
cvNorm(param, prevParam, CV_RELATIVE_L2) < criteria.epsilon )
{
//printf("iters %i\n", iters);
_param = param;
state = DONE;
return false;
}
prevErrNorm = errNorm;
cvZero( JtJ );
cvZero( JtErr );
_param = param;
_JtJ = JtJ;
_JtErr = JtErr;
state = CALC_J;
return true;
}

View File

@ -0,0 +1,56 @@
#ifndef CV_CALIBRATION_FORK_HPP
#define CV_CALIBRATION_FORK_HPP
#include <opencv2/core.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/calib3d/calib3d_c.h>
namespace cvfork
{
using namespace cv;
#define CV_CALIB_NINTRINSIC 18
#define CALIB_USE_QR (1 << 18)
double calibrateCamera(InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviations,
OutputArray perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
double cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
CvMat* stdDeviations_vector CV_DEFAULT(NULL),
CvMat* perViewErrors_vector CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds,
Ptr<aruco::CharucoBoard> &_board, Size imageSize,
InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, OutputArray _stdDeviations, OutputArray _perViewErrors,
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
class CvLevMarqFork : public CvLevMarq
{
public:
CvLevMarqFork( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm );
void step();
~CvLevMarqFork();
};
}
#endif

View File

@ -0,0 +1,14 @@
<?xml version="1.0"?>
<opencv_storage>
<charuco_dict>0</charuco_dict>
<charuco_square_lenght>200</charuco_square_lenght>
<charuco_marker_size>100</charuco_marker_size>
<calibration_step>1</calibration_step>
<max_frames_num>30</max_frames_num>
<min_frames_num>10</min_frames_num>
<solver_eps>1e-7</solver_eps>
<solver_max_iters>30</solver_max_iters>
<fast_solver>0</fast_solver>
<frame_filter_conv_param>0.1</frame_filter_conv_param>
<camera_resolution>800 600</camera_resolution>
</opencv_storage>

View File

@ -0,0 +1,518 @@
#include "frameProcessor.hpp"
#include "rotationConverters.hpp"
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <string>
#include <algorithm>
#include <limits>
using namespace calib;
#define VIDEO_TEXT_SIZE 4
#define POINT_SIZE 5
static cv::SimpleBlobDetector::Params getDetectorParams()
{
cv::SimpleBlobDetector::Params detectorParams;
detectorParams.thresholdStep = 40;
detectorParams.minThreshold = 20;
detectorParams.maxThreshold = 500;
detectorParams.minRepeatability = 2;
detectorParams.minDistBetweenBlobs = 5;
detectorParams.filterByColor = true;
detectorParams.blobColor = 0;
detectorParams.filterByArea = true;
detectorParams.minArea = 5;
detectorParams.maxArea = 5000;
detectorParams.filterByCircularity = false;
detectorParams.minCircularity = 0.8f;
detectorParams.maxCircularity = std::numeric_limits<float>::max();
detectorParams.filterByInertia = true;
detectorParams.minInertiaRatio = 0.1f;
detectorParams.maxInertiaRatio = std::numeric_limits<float>::max();
detectorParams.filterByConvexity = true;
detectorParams.minConvexity = 0.8f;
detectorParams.maxConvexity = std::numeric_limits<float>::max();
return detectorParams;
}
FrameProcessor::~FrameProcessor()
{
}
bool CalibProcessor::detectAndParseChessboard(const cv::Mat &frame)
{
int chessBoardFlags = cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_FAST_CHECK;
bool isTemplateFound = cv::findChessboardCorners(frame, mBoardSize, mCurrentImagePoints, chessBoardFlags);
if (isTemplateFound) {
cv::Mat viewGray;
cv::cvtColor(frame, viewGray, cv::COLOR_BGR2GRAY);
cv::cornerSubPix(viewGray, mCurrentImagePoints, cv::Size(11,11),
cv::Size(-1,-1), cv::TermCriteria( cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 30, 0.1 ));
cv::drawChessboardCorners(frame, mBoardSize, cv::Mat(mCurrentImagePoints), isTemplateFound);
mTemplateLocations.insert(mTemplateLocations.begin(), mCurrentImagePoints[0]);
}
return isTemplateFound;
}
bool CalibProcessor::detectAndParseChAruco(const cv::Mat &frame)
{
cv::Ptr<cv::aruco::Board> board = mCharucoBoard.staticCast<cv::aruco::Board>();
std::vector<std::vector<cv::Point2f> > corners, rejected;
std::vector<int> ids;
cv::aruco::detectMarkers(frame, mArucoDictionary, corners, ids, cv::aruco::DetectorParameters::create(), rejected);
cv::aruco::refineDetectedMarkers(frame, board, corners, ids, rejected);
cv::Mat currentCharucoCorners, currentCharucoIds;
if(ids.size() > 0)
cv::aruco::interpolateCornersCharuco(corners, ids, frame, mCharucoBoard, currentCharucoCorners,
currentCharucoIds);
if(ids.size() > 0) cv::aruco::drawDetectedMarkers(frame, corners);
if(currentCharucoCorners.total() > 3) {
float centerX = 0, centerY = 0;
for (int i = 0; i < currentCharucoCorners.size[0]; i++) {
centerX += currentCharucoCorners.at<float>(i, 0);
centerY += currentCharucoCorners.at<float>(i, 1);
}
centerX /= currentCharucoCorners.size[0];
centerY /= currentCharucoCorners.size[0];
//cv::circle(frame, cv::Point2f(centerX, centerY), 10, cv::Scalar(0, 255, 0), 10);
mTemplateLocations.insert(mTemplateLocations.begin(), cv::Point2f(centerX, centerY));
cv::aruco::drawDetectedCornersCharuco(frame, currentCharucoCorners, currentCharucoIds);
mCurrentCharucoCorners = currentCharucoCorners;
mCurrentCharucoIds = currentCharucoIds;
return true;
}
return false;
}
bool CalibProcessor::detectAndParseACircles(const cv::Mat &frame)
{
bool isTemplateFound = findCirclesGrid(frame, mBoardSize, mCurrentImagePoints, cv::CALIB_CB_ASYMMETRIC_GRID, mBlobDetectorPtr);
if(isTemplateFound) {
mTemplateLocations.insert(mTemplateLocations.begin(), mCurrentImagePoints[0]);
cv::drawChessboardCorners(frame, mBoardSize, cv::Mat(mCurrentImagePoints), isTemplateFound);
}
return isTemplateFound;
}
bool CalibProcessor::detectAndParseDualACircles(const cv::Mat &frame)
{
std::vector<cv::Point2f> blackPointbuf;
cv::Mat invertedView;
cv::bitwise_not(frame, invertedView);
bool isWhiteGridFound = cv::findCirclesGrid(frame, mBoardSize, mCurrentImagePoints, cv::CALIB_CB_ASYMMETRIC_GRID, mBlobDetectorPtr);
if(!isWhiteGridFound)
return false;
bool isBlackGridFound = cv::findCirclesGrid(invertedView, mBoardSize, blackPointbuf, cv::CALIB_CB_ASYMMETRIC_GRID, mBlobDetectorPtr);
if(!isBlackGridFound)
{
mCurrentImagePoints.clear();
return false;
}
cv::drawChessboardCorners(frame, mBoardSize, cv::Mat(mCurrentImagePoints), isWhiteGridFound);
cv::drawChessboardCorners(frame, mBoardSize, cv::Mat(blackPointbuf), isBlackGridFound);
mCurrentImagePoints.insert(mCurrentImagePoints.end(), blackPointbuf.begin(), blackPointbuf.end());
mTemplateLocations.insert(mTemplateLocations.begin(), mCurrentImagePoints[0]);
return true;
}
void CalibProcessor::saveFrameData()
{
std::vector<cv::Point3f> objectPoints;
switch(mBoardType)
{
case Chessboard:
objectPoints.reserve(mBoardSize.height*mBoardSize.width);
for( int i = 0; i < mBoardSize.height; ++i )
for( int j = 0; j < mBoardSize.width; ++j )
objectPoints.push_back(cv::Point3f(j*mSquareSize, i*mSquareSize, 0));
mCalibData->imagePoints.push_back(mCurrentImagePoints);
mCalibData->objectPoints.push_back(objectPoints);
break;
case chAruco:
mCalibData->allCharucoCorners.push_back(mCurrentCharucoCorners);
mCalibData->allCharucoIds.push_back(mCurrentCharucoIds);
break;
case AcirclesGrid:
objectPoints.reserve(mBoardSize.height*mBoardSize.width);
for( int i = 0; i < mBoardSize.height; i++ )
for( int j = 0; j < mBoardSize.width; j++ )
objectPoints.push_back(cv::Point3f((2*j + i % 2)*mSquareSize, i*mSquareSize, 0));
mCalibData->imagePoints.push_back(mCurrentImagePoints);
mCalibData->objectPoints.push_back(objectPoints);
break;
case DoubleAcirclesGrid:
{
float gridCenterX = (2*((float)mBoardSize.width - 1) + 1)*mSquareSize + mTemplDist / 2;
float gridCenterY = (mBoardSize.height - 1)*mSquareSize / 2;
objectPoints.reserve(2*mBoardSize.height*mBoardSize.width);
//white part
for( int i = 0; i < mBoardSize.height; i++ )
for( int j = 0; j < mBoardSize.width; j++ )
objectPoints.push_back(
cv::Point3f(-float((2*j + i % 2)*mSquareSize + mTemplDist +
(2*(mBoardSize.width - 1) + 1)*mSquareSize - gridCenterX),
-float(i*mSquareSize) - gridCenterY,
0));
//black part
for( int i = 0; i < mBoardSize.height; i++ )
for( int j = 0; j < mBoardSize.width; j++ )
objectPoints.push_back(cv::Point3f(-float((2*j + i % 2)*mSquareSize - gridCenterX),
-float(i*mSquareSize) - gridCenterY, 0));
mCalibData->imagePoints.push_back(mCurrentImagePoints);
mCalibData->objectPoints.push_back(objectPoints);
}
break;
}
}
void CalibProcessor::showCaptureMessage(const cv::Mat& frame, const std::string &message)
{
cv::Point textOrigin(100, 100);
double textSize = VIDEO_TEXT_SIZE * frame.cols / (double) IMAGE_MAX_WIDTH;
cv::bitwise_not(frame, frame);
cv::putText(frame, message, textOrigin, 1, textSize, cv::Scalar(0,0,255), 2, cv::LINE_AA);
cv::imshow(mainWindowName, frame);
cv::waitKey(300);
}
bool CalibProcessor::checkLastFrame()
{
bool isFrameBad = false;
cv::Mat tmpCamMatrix;
const double badAngleThresh = 40;
if(!mCalibData->cameraMatrix.total()) {
tmpCamMatrix = cv::Mat::eye(3, 3, CV_64F);
tmpCamMatrix.at<double>(0,0) = 20000;
tmpCamMatrix.at<double>(1,1) = 20000;
tmpCamMatrix.at<double>(0,2) = mCalibData->imageSize.height/2;
tmpCamMatrix.at<double>(1,2) = mCalibData->imageSize.width/2;
}
else
mCalibData->cameraMatrix.copyTo(tmpCamMatrix);
if(mBoardType != chAruco) {
cv::Mat r, t, angles;
cv::solvePnP(mCalibData->objectPoints.back(), mCurrentImagePoints, tmpCamMatrix, mCalibData->distCoeffs, r, t);
RodriguesToEuler(r, angles, CALIB_DEGREES);
if(fabs(angles.at<double>(0)) > badAngleThresh || fabs(angles.at<double>(1)) > badAngleThresh) {
mCalibData->objectPoints.pop_back();
mCalibData->imagePoints.pop_back();
isFrameBad = true;
}
}
else {
cv::Mat r, t, angles;
std::vector<cv::Point3f> allObjPoints;
allObjPoints.reserve(mCurrentCharucoIds.total());
for(size_t i = 0; i < mCurrentCharucoIds.total(); i++) {
int pointID = mCurrentCharucoIds.at<int>((int)i);
CV_Assert(pointID >= 0 && pointID < (int)mCharucoBoard->chessboardCorners.size());
allObjPoints.push_back(mCharucoBoard->chessboardCorners[pointID]);
}
cv::solvePnP(allObjPoints, mCurrentCharucoCorners, tmpCamMatrix, mCalibData->distCoeffs, r, t);
RodriguesToEuler(r, angles, CALIB_DEGREES);
if(180.0 - fabs(angles.at<double>(0)) > badAngleThresh || fabs(angles.at<double>(1)) > badAngleThresh) {
isFrameBad = true;
mCalibData->allCharucoCorners.pop_back();
mCalibData->allCharucoIds.pop_back();
}
}
return isFrameBad;
}
CalibProcessor::CalibProcessor(cv::Ptr<calibrationData> data, captureParameters &capParams) :
mCalibData(data), mBoardType(capParams.board), mBoardSize(capParams.boardSize)
{
mCapuredFrames = 0;
mNeededFramesNum = capParams.calibrationStep;
mDelayBetweenCaptures = static_cast<int>(capParams.captureDelay * capParams.fps);
mMaxTemplateOffset = std::sqrt(std::pow(mCalibData->imageSize.height, 2) +
std::pow(mCalibData->imageSize.width, 2)) / 20.0;
mSquareSize = capParams.squareSize;
mTemplDist = capParams.templDst;
switch(mBoardType)
{
case chAruco:
mArucoDictionary = cv::aruco::getPredefinedDictionary(
cv::aruco::PREDEFINED_DICTIONARY_NAME(capParams.charucoDictName));
mCharucoBoard = cv::aruco::CharucoBoard::create(mBoardSize.width, mBoardSize.height, capParams.charucoSquareLenght,
capParams.charucoMarkerSize, mArucoDictionary);
break;
case AcirclesGrid:
mBlobDetectorPtr = cv::SimpleBlobDetector::create();
break;
case DoubleAcirclesGrid:
mBlobDetectorPtr = cv::SimpleBlobDetector::create(getDetectorParams());
break;
case Chessboard:
break;
}
}
cv::Mat CalibProcessor::processFrame(const cv::Mat &frame)
{
cv::Mat frameCopy;
frame.copyTo(frameCopy);
bool isTemplateFound = false;
mCurrentImagePoints.clear();
switch(mBoardType)
{
case Chessboard:
isTemplateFound = detectAndParseChessboard(frameCopy);
break;
case chAruco:
isTemplateFound = detectAndParseChAruco(frameCopy);
break;
case AcirclesGrid:
isTemplateFound = detectAndParseACircles(frameCopy);
break;
case DoubleAcirclesGrid:
isTemplateFound = detectAndParseDualACircles(frameCopy);
break;
}
if(mTemplateLocations.size() > mDelayBetweenCaptures)
mTemplateLocations.pop_back();
if(mTemplateLocations.size() == mDelayBetweenCaptures && isTemplateFound) {
if(cv::norm(mTemplateLocations.front() - mTemplateLocations.back()) < mMaxTemplateOffset) {
saveFrameData();
bool isFrameBad = checkLastFrame();
if (!isFrameBad) {
std::string displayMessage = cv::format("Frame # %d captured", std::max(mCalibData->imagePoints.size(),
mCalibData->allCharucoCorners.size()));
if(!showOverlayMessage(displayMessage))
showCaptureMessage(frame, displayMessage);
mCapuredFrames++;
}
else {
std::string displayMessage = "Frame rejected";
if(!showOverlayMessage(displayMessage))
showCaptureMessage(frame, displayMessage);
}
mTemplateLocations.clear();
mTemplateLocations.reserve(mDelayBetweenCaptures);
}
}
return frameCopy;
}
bool CalibProcessor::isProcessed() const
{
if(mCapuredFrames < mNeededFramesNum)
return false;
else
return true;
}
void CalibProcessor::resetState()
{
mCapuredFrames = 0;
mTemplateLocations.clear();
}
CalibProcessor::~CalibProcessor()
{
}
////////////////////////////////////////////
void ShowProcessor::drawBoard(cv::Mat &img, cv::InputArray points)
{
cv::Mat tmpView = cv::Mat::zeros(img.rows, img.cols, CV_8UC3);
std::vector<cv::Point2f> templateHull;
std::vector<cv::Point> poly;
cv::convexHull(points, templateHull);
poly.resize(templateHull.size());
for(size_t i=0; i<templateHull.size();i++)
poly[i] = cv::Point((int)(templateHull[i].x*mGridViewScale), (int)(templateHull[i].y*mGridViewScale));
cv::fillConvexPoly(tmpView, poly, cv::Scalar(0, 255, 0), cv::LINE_AA);
cv::addWeighted(tmpView, .2, img, 1, 0, img);
}
void ShowProcessor::drawGridPoints(const cv::Mat &frame)
{
if(mBoardType != chAruco)
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibdata->imagePoints.begin(); it != mCalibdata->imagePoints.end(); ++it)
for(std::vector<cv::Point2f>::iterator pointIt = (*it).begin(); pointIt != (*it).end(); ++pointIt)
cv::circle(frame, *pointIt, POINT_SIZE, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);
else
for(std::vector<cv::Mat>::iterator it = mCalibdata->allCharucoCorners.begin(); it != mCalibdata->allCharucoCorners.end(); ++it)
for(int i = 0; i < (*it).size[0]; i++)
cv::circle(frame, cv::Point((int)(*it).at<float>(i, 0), (int)(*it).at<float>(i, 1)),
POINT_SIZE, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);
}
ShowProcessor::ShowProcessor(cv::Ptr<calibrationData> data, cv::Ptr<calibController> controller, TemplateType board) :
mCalibdata(data), mController(controller), mBoardType(board)
{
mNeedUndistort = true;
mVisMode = Grid;
mGridViewScale = 0.5;
mTextSize = VIDEO_TEXT_SIZE;
}
cv::Mat ShowProcessor::processFrame(const cv::Mat &frame)
{
if(mCalibdata->cameraMatrix.size[0] && mCalibdata->distCoeffs.size[0]) {
mTextSize = VIDEO_TEXT_SIZE * (double) frame.cols / IMAGE_MAX_WIDTH;
cv::Scalar textColor = cv::Scalar(0,0,255);
cv::Mat frameCopy;
if (mNeedUndistort && mController->getFramesNumberState()) {
if(mVisMode == Grid)
drawGridPoints(frame);
cv::remap(frame, frameCopy, mCalibdata->undistMap1, mCalibdata->undistMap2, cv::INTER_LINEAR);
int baseLine = 100;
cv::Size textSize = cv::getTextSize("Undistorted view", 1, mTextSize, 2, &baseLine);
cv::Point textOrigin(baseLine, frame.rows - (int)(2.5*textSize.height));
cv::putText(frameCopy, "Undistorted view", textOrigin, 1, mTextSize, textColor, 2, cv::LINE_AA);
}
else {
frame.copyTo(frameCopy);
if(mVisMode == Grid)
drawGridPoints(frameCopy);
}
std::string displayMessage;
if(mCalibdata->stdDeviations.at<double>(0) == 0)
displayMessage = cv::format("F = %d RMS = %.3f", (int)mCalibdata->cameraMatrix.at<double>(0,0), mCalibdata->totalAvgErr);
else
displayMessage = cv::format("Fx = %d Fy = %d RMS = %.3f", (int)mCalibdata->cameraMatrix.at<double>(0,0),
(int)mCalibdata->cameraMatrix.at<double>(1,1), mCalibdata->totalAvgErr);
if(mController->getRMSState() && mController->getFramesNumberState())
displayMessage.append(" OK");
int baseLine = 100;
cv::Size textSize = cv::getTextSize(displayMessage, 1, mTextSize - 1, 2, &baseLine);
cv::Point textOrigin = cv::Point(baseLine, 2*textSize.height);
cv::putText(frameCopy, displayMessage, textOrigin, 1, mTextSize - 1, textColor, 2, cv::LINE_AA);
if(mCalibdata->stdDeviations.at<double>(0) == 0)
displayMessage = cv::format("DF = %.2f", mCalibdata->stdDeviations.at<double>(1)*sigmaMult);
else
displayMessage = cv::format("DFx = %.2f DFy = %.2f", mCalibdata->stdDeviations.at<double>(0)*sigmaMult,
mCalibdata->stdDeviations.at<double>(1)*sigmaMult);
if(mController->getConfidenceIntrervalsState() && mController->getFramesNumberState())
displayMessage.append(" OK");
cv::putText(frameCopy, displayMessage, cv::Point(baseLine, 4*textSize.height), 1, mTextSize - 1, textColor, 2, cv::LINE_AA);
if(mController->getCommonCalibrationState()) {
displayMessage = cv::format("Calibration is done");
cv::putText(frameCopy, displayMessage, cv::Point(baseLine, 6*textSize.height), 1, mTextSize - 1, textColor, 2, cv::LINE_AA);
}
int calibFlags = mController->getNewFlags();
displayMessage = "";
if(!(calibFlags & cv::CALIB_FIX_ASPECT_RATIO))
displayMessage.append(cv::format("AR=%.3f ", mCalibdata->cameraMatrix.at<double>(0,0)/mCalibdata->cameraMatrix.at<double>(1,1)));
if(calibFlags & cv::CALIB_ZERO_TANGENT_DIST)
displayMessage.append("TD=0 ");
displayMessage.append(cv::format("K1=%.2f K2=%.2f K3=%.2f", mCalibdata->distCoeffs.at<double>(0), mCalibdata->distCoeffs.at<double>(1),
mCalibdata->distCoeffs.at<double>(4)));
cv::putText(frameCopy, displayMessage, cv::Point(baseLine, frameCopy.rows - (int)(1.5*textSize.height)),
1, mTextSize - 1, textColor, 2, cv::LINE_AA);
return frameCopy;
}
return frame;
}
bool ShowProcessor::isProcessed() const
{
return false;
}
void ShowProcessor::resetState()
{
}
void ShowProcessor::setVisualizationMode(visualisationMode mode)
{
mVisMode = mode;
}
void ShowProcessor::switchVisualizationMode()
{
if(mVisMode == Grid) {
mVisMode = Window;
updateBoardsView();
}
else {
mVisMode = Grid;
cv::destroyWindow(gridWindowName);
}
}
void ShowProcessor::clearBoardsView()
{
cv::imshow(gridWindowName, cv::Mat());
}
void ShowProcessor::updateBoardsView()
{
if(mVisMode == Window) {
cv::Size originSize = mCalibdata->imageSize;
cv::Mat altGridView = cv::Mat::zeros((int)(originSize.height*mGridViewScale), (int)(originSize.width*mGridViewScale), CV_8UC3);
if(mBoardType != chAruco)
for(std::vector<std::vector<cv::Point2f> >::iterator it = mCalibdata->imagePoints.begin(); it != mCalibdata->imagePoints.end(); ++it)
if(mBoardType != DoubleAcirclesGrid)
drawBoard(altGridView, *it);
else {
size_t pointsNum = (*it).size()/2;
std::vector<cv::Point2f> points(pointsNum);
std::copy((*it).begin(), (*it).begin() + pointsNum, points.begin());
drawBoard(altGridView, points);
std::copy((*it).begin() + pointsNum, (*it).begin() + 2*pointsNum, points.begin());
drawBoard(altGridView, points);
}
else
for(std::vector<cv::Mat>::iterator it = mCalibdata->allCharucoCorners.begin(); it != mCalibdata->allCharucoCorners.end(); ++it)
drawBoard(altGridView, *it);
cv::imshow(gridWindowName, altGridView);
}
}
void ShowProcessor::switchUndistort()
{
mNeedUndistort = !mNeedUndistort;
}
void ShowProcessor::setUndistort(bool isEnabled)
{
mNeedUndistort = isEnabled;
}
ShowProcessor::~ShowProcessor()
{
}

View File

@ -0,0 +1,95 @@
#ifndef FRAME_PROCESSOR_HPP
#define FRAME_PROCESSOR_HPP
#include <opencv2/core.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/calib3d.hpp>
#include "calibCommon.hpp"
#include "calibController.hpp"
namespace calib
{
class FrameProcessor
{
protected:
public:
virtual ~FrameProcessor();
virtual cv::Mat processFrame(const cv::Mat& frame) = 0;
virtual bool isProcessed() const = 0;
virtual void resetState() = 0;
};
class CalibProcessor : public FrameProcessor
{
protected:
cv::Ptr<calibrationData> mCalibData;
TemplateType mBoardType;
cv::Size mBoardSize;
std::vector<cv::Point2f> mTemplateLocations;
std::vector<cv::Point2f> mCurrentImagePoints;
cv::Mat mCurrentCharucoCorners;
cv::Mat mCurrentCharucoIds;
cv::Ptr<cv::SimpleBlobDetector> mBlobDetectorPtr;
cv::Ptr<cv::aruco::Dictionary> mArucoDictionary;
cv::Ptr<cv::aruco::CharucoBoard> mCharucoBoard;
int mNeededFramesNum;
unsigned mDelayBetweenCaptures;
int mCapuredFrames;
double mMaxTemplateOffset;
float mSquareSize;
float mTemplDist;
bool detectAndParseChessboard(const cv::Mat& frame);
bool detectAndParseChAruco(const cv::Mat& frame);
bool detectAndParseACircles(const cv::Mat& frame);
bool detectAndParseDualACircles(const cv::Mat& frame);
void saveFrameData();
void showCaptureMessage(const cv::Mat &frame, const std::string& message);
bool checkLastFrame();
public:
CalibProcessor(cv::Ptr<calibrationData> data, captureParameters& capParams);
virtual cv::Mat processFrame(const cv::Mat& frame);
virtual bool isProcessed() const;
virtual void resetState();
~CalibProcessor();
};
enum visualisationMode {Grid, Window};
class ShowProcessor : public FrameProcessor
{
protected:
cv::Ptr<calibrationData> mCalibdata;
cv::Ptr<calibController> mController;
TemplateType mBoardType;
visualisationMode mVisMode;
bool mNeedUndistort;
double mGridViewScale;
double mTextSize;
void drawBoard(cv::Mat& img, cv::InputArray points);
void drawGridPoints(const cv::Mat& frame);
public:
ShowProcessor(cv::Ptr<calibrationData> data, cv::Ptr<calibController> controller, TemplateType board);
virtual cv::Mat processFrame(const cv::Mat& frame);
virtual bool isProcessed() const;
virtual void resetState();
void setVisualizationMode(visualisationMode mode);
void switchVisualizationMode();
void clearBoardsView();
void updateBoardsView();
void switchUndistort();
void setUndistort(bool isEnabled);
~ShowProcessor();
};
}
#endif

View File

@ -0,0 +1,491 @@
#include "linalg.hpp"
#ifdef USE_LAPACK
typedef int integer;
#include <lapacke.h>
#include <cassert>
using namespace cv;
bool cvfork::solve(InputArray _src, const InputArray _src2arg, OutputArray _dst, int method )
{
bool result = true;
Mat src = _src.getMat(), _src2 = _src2arg.getMat();
int type = src.type();
bool is_normal = (method & DECOMP_NORMAL) != 0;
CV_Assert( type == _src2.type() && (type == CV_32F || type == CV_64F) );
method &= ~DECOMP_NORMAL;
CV_Assert( (method != DECOMP_LU && method != DECOMP_CHOLESKY) ||
is_normal || src.rows == src.cols );
double rcond=-1, s1=0, work1=0, *work=0, *s=0;
float frcond=-1, fs1=0, fwork1=0, *fwork=0, *fs=0;
integer m = src.rows, m_ = m, n = src.cols, mn = std::max(m,n),
nm = std::min(m, n), nb = _src2.cols, lwork=-1, liwork=0, iwork1=0,
lda = m, ldx = mn, info=0, rank=0, *iwork=0;
int elem_size = CV_ELEM_SIZE(type);
bool copy_rhs=false;
int buf_size=0;
AutoBuffer<uchar> buffer;
uchar* ptr;
char N[] = {'N', '\0'}, L[] = {'L', '\0'};
Mat src2 = _src2;
_dst.create( src.cols, src2.cols, src.type() );
Mat dst = _dst.getMat();
if( m <= n )
is_normal = false;
else if( is_normal )
m_ = n;
buf_size += (is_normal ? n*n : m*n)*elem_size;
if( m_ != n || nb > 1 || !dst.isContinuous() )
{
copy_rhs = true;
if( is_normal )
buf_size += n*nb*elem_size;
else
buf_size += mn*nb*elem_size;
}
if( method == DECOMP_SVD || method == DECOMP_EIG )
{
integer nlvl = cvRound(std::log(std::max(std::min(m_,n)/25., 1.))/CV_LOG2) + 1;
liwork = std::min(m_,n)*(3*std::max(nlvl,(integer)0) + 11);
if( type == CV_32F )
sgelsd_(&m_, &n, &nb, (float*)src.data, &lda, (float*)dst.data, &ldx,
&fs1, &frcond, &rank, &fwork1, &lwork, &iwork1, &info);
else
dgelsd_(&m_, &n, &nb, (double*)src.data, &lda, (double*)dst.data, &ldx,
&s1, &rcond, &rank, &work1, &lwork, &iwork1, &info );
buf_size += nm*elem_size + (liwork + 1)*sizeof(integer);
}
else if( method == DECOMP_QR )
{
if( type == CV_32F )
sgels_(N, &m_, &n, &nb, (float*)src.data, &lda,
(float*)dst.data, &ldx, &fwork1, &lwork, &info );
else
dgels_(N, &m_, &n, &nb, (double*)src.data, &lda,
(double*)dst.data, &ldx, &work1, &lwork, &info );
}
else if( method == DECOMP_LU )
{
buf_size += (n+1)*sizeof(integer);
}
else if( method == DECOMP_CHOLESKY )
;
else
CV_Error( Error::StsBadArg, "Unknown method" );
assert(info == 0);
lwork = cvRound(type == CV_32F ? (double)fwork1 : work1);
buf_size += lwork*elem_size;
buffer.allocate(buf_size);
ptr = (uchar*)buffer;
Mat at(n, m_, type, ptr);
ptr += n*m_*elem_size;
if( method == DECOMP_CHOLESKY || method == DECOMP_EIG )
src.copyTo(at);
else if( !is_normal )
transpose(src, at);
else
mulTransposed(src, at, true);
Mat xt;
if( !is_normal )
{
if( copy_rhs )
{
Mat temp(nb, mn, type, ptr);
ptr += nb*mn*elem_size;
Mat bt = temp.colRange(0, m);
xt = temp.colRange(0, n);
transpose(src2, bt);
}
else
{
src2.copyTo(dst);
xt = Mat(1, n, type, dst.data);
}
}
else
{
if( copy_rhs )
{
xt = Mat(nb, n, type, ptr);
ptr += nb*n*elem_size;
}
else
xt = Mat(1, n, type, dst.data);
// (a'*b)' = b'*a
gemm( src2, src, 1, Mat(), 0, xt, GEMM_1_T );
}
lda = (int)(at.step ? at.step/elem_size : at.cols);
ldx = (int)(xt.step ? xt.step/elem_size : (!is_normal && copy_rhs ? mn : n));
if( method == DECOMP_SVD || method == DECOMP_EIG )
{
if( type == CV_32F )
{
fs = (float*)ptr;
ptr += nm*elem_size;
fwork = (float*)ptr;
ptr += lwork*elem_size;
iwork = (integer*)alignPtr(ptr, sizeof(integer));
sgelsd_(&m_, &n, &nb, (float*)at.data, &lda, (float*)xt.data, &ldx,
fs, &frcond, &rank, fwork, &lwork, iwork, &info);
}
else
{
s = (double*)ptr;
ptr += nm*elem_size;
work = (double*)ptr;
ptr += lwork*elem_size;
iwork = (integer*)alignPtr(ptr, sizeof(integer));
dgelsd_(&m_, &n, &nb, (double*)at.data, &lda, (double*)xt.data, &ldx,
s, &rcond, &rank, work, &lwork, iwork, &info);
}
}
else if( method == DECOMP_QR )
{
if( type == CV_32F )
{
fwork = (float*)ptr;
sgels_(N, &m_, &n, &nb, (float*)at.data, &lda,
(float*)xt.data, &ldx, fwork, &lwork, &info);
}
else
{
work = (double*)ptr;
dgels_(N, &m_, &n, &nb, (double*)at.data, &lda,
(double*)xt.data, &ldx, work, &lwork, &info);
}
}
else if( method == DECOMP_CHOLESKY || (method == DECOMP_LU && is_normal) )
{
if( type == CV_32F )
{
spotrf_(L, &n, (float*)at.data, &lda, &info);
if(info==0)
spotrs_(L, &n, &nb, (float*)at.data, &lda, (float*)xt.data, &ldx, &info);
}
else
{
dpotrf_(L, &n, (double*)at.data, &lda, &info);
if(info==0)
dpotrs_(L, &n, &nb, (double*)at.data, &lda, (double*)xt.data, &ldx, &info);
}
}
else if( method == DECOMP_LU )
{
iwork = (integer*)alignPtr(ptr, sizeof(integer));
if( type == CV_32F )
sgesv_(&n, &nb, (float*)at.data, &lda, iwork, (float*)xt.data, &ldx, &info );
else
dgesv_(&n, &nb, (double*)at.data, &lda, iwork, (double*)xt.data, &ldx, &info );
}
else
assert(0);
result = info == 0;
if( !result )
dst = Scalar(0);
else if( xt.data != dst.data )
transpose( xt, dst );
return result;
}
static void _SVDcompute( const InputArray _aarr, OutputArray _w,
OutputArray _u, OutputArray _vt, int flags = 0)
{
Mat a = _aarr.getMat(), u, vt;
integer m = a.rows, n = a.cols, mn = std::max(m, n), nm = std::min(m, n);
int type = a.type(), elem_size = (int)a.elemSize();
bool compute_uv = _u.needed() || _vt.needed();
if( flags & SVD::NO_UV )
{
_u.release();
_vt.release();
compute_uv = false;
}
if( compute_uv )
{
_u.create( (int)m, (int)((flags & SVD::FULL_UV) ? m : nm), type );
_vt.create( (int)((flags & SVD::FULL_UV) ? n : nm), n, type );
u = _u.getMat();
vt = _vt.getMat();
}
_w.create(nm, 1, type, -1, true);
Mat _a = a, w = _w.getMat();
CV_Assert( w.isContinuous() );
int work_ofs=0, iwork_ofs=0, buf_size = 0;
bool temp_a = false;
double u1=0, v1=0, work1=0;
float uf1=0, vf1=0, workf1=0;
integer lda, ldu, ldv, lwork=-1, iwork1=0, info=0;
char mode[] = {compute_uv ? 'S' : 'N', '\0'};
if( m != n && compute_uv && (flags & SVD::FULL_UV) )
mode[0] = 'A';
if( !(flags & SVD::MODIFY_A) )
{
if( mode[0] == 'N' || mode[0] == 'A' )
temp_a = true;
else if( compute_uv && (a.size() == vt.size() || a.size() == u.size()) && mode[0] == 'S' )
mode[0] = 'O';
}
lda = a.cols;
ldv = ldu = mn;
if( type == CV_32F )
{
sgesdd_(mode, &n, &m, (float*)a.data, &lda, (float*)w.data,
&vf1, &ldv, &uf1, &ldu, &workf1, &lwork, &iwork1, &info );
lwork = cvRound(workf1);
}
else
{
dgesdd_(mode, &n, &m, (double*)a.data, &lda, (double*)w.data,
&v1, &ldv, &u1, &ldu, &work1, &lwork, &iwork1, &info );
lwork = cvRound(work1);
}
assert(info == 0);
if( temp_a )
{
buf_size += n*m*elem_size;
}
work_ofs = buf_size;
buf_size += lwork*elem_size;
buf_size = alignSize(buf_size, sizeof(integer));
iwork_ofs = buf_size;
buf_size += 8*nm*sizeof(integer);
AutoBuffer<uchar> buf(buf_size);
uchar* buffer = (uchar*)buf;
if( temp_a )
{
_a = Mat(a.rows, a.cols, type, buffer );
a.copyTo(_a);
}
if( !(flags & SVD::MODIFY_A) && !temp_a )
{
if( compute_uv && a.size() == vt.size() )
{
a.copyTo(vt);
_a = vt;
}
else if( compute_uv && a.size() == u.size() )
{
a.copyTo(u);
_a = u;
}
}
if( compute_uv )
{
ldv = (int)(vt.step ? vt.step/elem_size : vt.cols);
ldu = (int)(u.step ? u.step/elem_size : u.cols);
}
lda = (int)(_a.step ? _a.step/elem_size : _a.cols);
if( type == CV_32F )
{
sgesdd_(mode, &n, &m, _a.ptr<float>(), &lda, w.ptr<float>(),
vt.data ? vt.ptr<float>() : (float*)&v1, &ldv,
u.data ? u.ptr<float>() : (float*)&u1, &ldu,
(float*)(buffer + work_ofs), &lwork,
(integer*)(buffer + iwork_ofs), &info );
}
else
{
dgesdd_(mode, &n, &m, _a.ptr<double>(), &lda, w.ptr<double>(),
vt.data ? vt.ptr<double>() : &v1, &ldv,
u.data ? u.ptr<double>() : &u1, &ldu,
(double*)(buffer + work_ofs), &lwork,
(integer*)(buffer + iwork_ofs), &info );
}
CV_Assert(info >= 0);
if(info != 0)
{
if( u.data )
u = Scalar(0.);
if( vt.data )
vt = Scalar(0.);
w = Scalar(0.);
}
}
//////////////////////////////////////////////////////////
template<typename T1, typename T2, typename T3> static void
MatrAXPY( int m, int n, const T1* x, int dx,
const T2* a, int inca, T3* y, int dy )
{
int i, j;
for( i = 0; i < m; i++, x += dx, y += dy )
{
T2 s = a[i*inca];
for( j = 0; j <= n - 4; j += 4 )
{
T3 t0 = (T3)(y[j] + s*x[j]);
T3 t1 = (T3)(y[j+1] + s*x[j+1]);
y[j] = t0;
y[j+1] = t1;
t0 = (T3)(y[j+2] + s*x[j+2]);
t1 = (T3)(y[j+3] + s*x[j+3]);
y[j+2] = t0;
y[j+3] = t1;
}
for( ; j < n; j++ )
y[j] = (T3)(y[j] + s*x[j]);
}
}
template<typename T> static void
SVBkSb( int m, int n, const T* w, int incw,
const T* u, int ldu, int uT,
const T* v, int ldv, int vT,
const T* b, int ldb, int nb,
T* x, int ldx, double* buffer, T eps )
{
double threshold = 0;
int udelta0 = uT ? ldu : 1, udelta1 = uT ? 1 : ldu;
int vdelta0 = vT ? ldv : 1, vdelta1 = vT ? 1 : ldv;
int i, j, nm = std::min(m, n);
if( !b )
nb = m;
for( i = 0; i < n; i++ )
for( j = 0; j < nb; j++ )
x[i*ldx + j] = 0;
for( i = 0; i < nm; i++ )
threshold += w[i*incw];
threshold *= eps;
// v * inv(w) * uT * b
for( i = 0; i < nm; i++, u += udelta0, v += vdelta0 )
{
double wi = w[i*incw];
if( wi <= threshold )
continue;
wi = 1/wi;
if( nb == 1 )
{
double s = 0;
if( b )
for( j = 0; j < m; j++ )
s += u[j*udelta1]*b[j*ldb];
else
s = u[0];
s *= wi;
for( j = 0; j < n; j++ )
x[j*ldx] = (T)(x[j*ldx] + s*v[j*vdelta1]);
}
else
{
if( b )
{
for( j = 0; j < nb; j++ )
buffer[j] = 0;
MatrAXPY( m, nb, b, ldb, u, udelta1, buffer, 0 );
for( j = 0; j < nb; j++ )
buffer[j] *= wi;
}
else
{
for( j = 0; j < nb; j++ )
buffer[j] = u[j*udelta1]*wi;
}
MatrAXPY( n, nb, buffer, 0, v, vdelta1, x, ldx );
}
}
}
static void _backSubst( const InputArray _w, const InputArray _u, const InputArray _vt,
const InputArray _rhs, OutputArray _dst )
{
Mat w = _w.getMat(), u = _u.getMat(), vt = _vt.getMat(), rhs = _rhs.getMat();
int type = w.type(), esz = (int)w.elemSize();
int m = u.rows, n = vt.cols, nb = rhs.data ? rhs.cols : m;
AutoBuffer<double> buffer(nb);
CV_Assert( u.data && vt.data && w.data );
CV_Assert( rhs.data == 0 || (rhs.type() == type && rhs.rows == m) );
_dst.create( n, nb, type );
Mat dst = _dst.getMat();
if( type == CV_32F )
SVBkSb(m, n, (float*)w.data, 1, (float*)u.data, (int)(u.step/esz), false,
(float*)vt.data, (int)(vt.step/esz), true, (float*)rhs.data, (int)(rhs.step/esz),
nb, (float*)dst.data, (int)(dst.step/esz), buffer, 10*FLT_EPSILON );
else if( type == CV_64F )
SVBkSb(m, n, (double*)w.data, 1, (double*)u.data, (int)(u.step/esz), false,
(double*)vt.data, (int)(vt.step/esz), true, (double*)rhs.data, (int)(rhs.step/esz),
nb, (double*)dst.data, (int)(dst.step/esz), buffer, 2*DBL_EPSILON );
else
CV_Error( Error::StsUnsupportedFormat, "" );
}
///////////////////////////////////////////
#define Sf( y, x ) ((float*)(srcdata + y*srcstep))[x]
#define Sd( y, x ) ((double*)(srcdata + y*srcstep))[x]
#define Df( y, x ) ((float*)(dstdata + y*dststep))[x]
#define Dd( y, x ) ((double*)(dstdata + y*dststep))[x]
double cvfork::invert( InputArray _src, OutputArray _dst, int method )
{
Mat src = _src.getMat();
int type = src.type();
CV_Assert(type == CV_32F || type == CV_64F);
size_t esz = CV_ELEM_SIZE(type);
int m = src.rows, n = src.cols;
if( method == DECOMP_SVD )
{
int nm = std::min(m, n);
AutoBuffer<uchar> _buf((m*nm + nm + nm*n)*esz + sizeof(double));
uchar* buf = alignPtr((uchar*)_buf, (int)esz);
Mat u(m, nm, type, buf);
Mat w(nm, 1, type, u.ptr() + m*nm*esz);
Mat vt(nm, n, type, w.ptr() + nm*esz);
_SVDcompute(src, w, u, vt);
_backSubst(w, u, vt, Mat(), _dst);
return type == CV_32F ?
(w.ptr<float>()[0] >= FLT_EPSILON ?
w.ptr<float>()[n-1]/w.ptr<float>()[0] : 0) :
(w.ptr<double>()[0] >= DBL_EPSILON ?
w.ptr<double>()[n-1]/w.ptr<double>()[0] : 0);
}
return 0;
}
#endif //USE_LAPACK

View File

@ -0,0 +1,13 @@
#ifndef LINALG_HPP
#define LINALG_HPP
#include <opencv2/core.hpp>
namespace cvfork {
double invert( cv::InputArray _src, cv::OutputArray _dst, int method );
bool solve(cv::InputArray _src, cv::InputArray _src2arg, cv::OutputArray _dst, int method );
}
#endif

View File

@ -0,0 +1,210 @@
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/cvconfig.h>
#include <opencv2/highgui.hpp>
#include <string>
#include <vector>
#include <stdexcept>
#include <algorithm>
#include <iostream>
#include "calibCommon.hpp"
#include "calibPipeline.hpp"
#include "frameProcessor.hpp"
#include "cvCalibrationFork.hpp"
#include "calibController.hpp"
#include "parametersController.hpp"
#include "rotationConverters.hpp"
using namespace calib;
const std::string keys =
"{v | | Input from video file }"
"{ci | 0 | Default camera id }"
"{flip | false | Vertical flip of input frames }"
"{t | circles | Template for calibration (circles, chessboard, dualCircles, chAruco) }"
"{sz | 16.3 | Distance between two nearest centers of circles or squares on calibration board}"
"{dst | 295 | Distance between white and black parts of daulCircles template}"
"{w | | Width of template (in corners or circles)}"
"{h | | Height of template (in corners or circles)}"
"{of | cameraParameters.xml | Output file name}"
"{ft | true | Auto tuning of calibration flags}"
"{vis | grid | Captured boards visualisation (grid, window)}"
"{d | 0.8 | Min delay between captures}"
"{pf | defaultConfig.xml| Advanced application parameters}"
"{help | | Print help}";
bool calib::showOverlayMessage(const std::string& message)
{
#ifdef HAVE_QT
cv::displayOverlay(mainWindowName, message, OVERLAY_DELAY);
return true;
#else
std::cout << message << std::endl;
return false;
#endif
}
static void deleteButton(int state, void* data)
{
state++; //to avoid gcc warnings
(static_cast<cv::Ptr<calibDataController>*>(data))->get()->deleteLastFrame();
calib::showOverlayMessage("Last frame deleted");
}
static void deleteAllButton(int state, void* data)
{
state++;
(static_cast<cv::Ptr<calibDataController>*>(data))->get()->deleteAllData();
calib::showOverlayMessage("All frames deleted");
}
static void saveCurrentParamsButton(int state, void* data)
{
state++;
if((static_cast<cv::Ptr<calibDataController>*>(data))->get()->saveCurrentCameraParameters())
calib::showOverlayMessage("Calibration parameters saved");
}
#ifdef HAVE_QT
static void switchVisualizationModeButton(int state, void* data)
{
state++;
ShowProcessor* processor = static_cast<ShowProcessor*>(((cv::Ptr<FrameProcessor>*)data)->get());
processor->switchVisualizationMode();
}
static void undistortButton(int state, void* data)
{
ShowProcessor* processor = static_cast<ShowProcessor*>(((cv::Ptr<FrameProcessor>*)data)->get());
processor->setUndistort(static_cast<bool>(state));
calib::showOverlayMessage(std::string("Undistort is ") +
(static_cast<bool>(state) ? std::string("on") : std::string("off")));
}
#endif //HAVE_QT
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv, keys);
if(parser.has("help")) {
parser.printMessage();
return 0;
}
std::cout << consoleHelp << std::endl;
parametersController paramsController;
if(!paramsController.loadFromParser(parser))
return 0;
captureParameters capParams = paramsController.getCaptureParameters();
internalParameters intParams = paramsController.getInternalParameters();
cv::TermCriteria solverTermCrit = cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS,
intParams.solverMaxIters, intParams.solverEps);
cv::Ptr<calibrationData> globalData(new calibrationData);
if(!parser.has("v")) globalData->imageSize = capParams.cameraResolution;
int calibrationFlags = 0;
if(intParams.fastSolving) calibrationFlags |= CALIB_USE_QR;
cv::Ptr<calibController> controller(new calibController(globalData, calibrationFlags,
parser.get<bool>("ft"), capParams.minFramesNum));
cv::Ptr<calibDataController> dataController(new calibDataController(globalData, capParams.maxFramesNum,
intParams.filterAlpha));
dataController->setParametersFileName(parser.get<std::string>("of"));
cv::Ptr<FrameProcessor> capProcessor, showProcessor;
capProcessor = cv::Ptr<FrameProcessor>(new CalibProcessor(globalData, capParams));
showProcessor = cv::Ptr<FrameProcessor>(new ShowProcessor(globalData, controller, capParams.board));
if(parser.get<std::string>("vis").find("window") == 0) {
static_cast<ShowProcessor*>(showProcessor.get())->setVisualizationMode(Window);
cv::namedWindow(gridWindowName);
cv::moveWindow(gridWindowName, 1280, 500);
}
cv::Ptr<CalibPipeline> pipeline(new CalibPipeline(capParams));
std::vector<cv::Ptr<FrameProcessor> > processors;
processors.push_back(capProcessor);
processors.push_back(showProcessor);
cv::namedWindow(mainWindowName);
cv::moveWindow(mainWindowName, 10, 10);
#ifdef HAVE_QT
cv::createButton("Delete last frame", deleteButton, &dataController, cv::QT_PUSH_BUTTON);
cv::createButton("Delete all frames", deleteAllButton, &dataController, cv::QT_PUSH_BUTTON);
cv::createButton("Undistort", undistortButton, &showProcessor, cv::QT_CHECKBOX, false);
cv::createButton("Save current parameters", saveCurrentParamsButton, &dataController, cv::QT_PUSH_BUTTON);
cv::createButton("Switch visualisation mode", switchVisualizationModeButton, &showProcessor, cv::QT_PUSH_BUTTON);
#endif //HAVE_QT
try {
bool pipelineFinished = false;
while(!pipelineFinished)
{
PipelineExitStatus exitStatus = pipeline->start(processors);
if (exitStatus == Finished) {
if(controller->getCommonCalibrationState())
saveCurrentParamsButton(0, &dataController);
pipelineFinished = true;
continue;
}
else if (exitStatus == Calibrate) {
dataController->rememberCurrentParameters();
globalData->imageSize = pipeline->getImageSize();
calibrationFlags = controller->getNewFlags();
if(capParams.board != chAruco) {
globalData->totalAvgErr =
cvfork::calibrateCamera(globalData->objectPoints, globalData->imagePoints,
globalData->imageSize, globalData->cameraMatrix,
globalData->distCoeffs, cv::noArray(), cv::noArray(),
globalData->stdDeviations, globalData->perViewErrors,
calibrationFlags, solverTermCrit);
}
else {
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::PREDEFINED_DICTIONARY_NAME(capParams.charucoDictName));
cv::Ptr<cv::aruco::CharucoBoard> charucoboard =
cv::aruco::CharucoBoard::create(capParams.boardSize.width, capParams.boardSize.height,
capParams.charucoSquareLenght, capParams.charucoMarkerSize, dictionary);
globalData->totalAvgErr =
cvfork::calibrateCameraCharuco(globalData->allCharucoCorners, globalData->allCharucoIds,
charucoboard, globalData->imageSize,
globalData->cameraMatrix, globalData->distCoeffs,
cv::noArray(), cv::noArray(), globalData->stdDeviations,
globalData->perViewErrors, calibrationFlags, solverTermCrit);
}
dataController->updateUndistortMap();
dataController->printParametersToConsole(std::cout);
controller->updateState();
for(int j = 0; j < capParams.calibrationStep; j++)
dataController->filterFrames();
static_cast<ShowProcessor*>(showProcessor.get())->updateBoardsView();
}
else if (exitStatus == DeleteLastFrame) {
deleteButton(0, &dataController);
static_cast<ShowProcessor*>(showProcessor.get())->updateBoardsView();
}
else if (exitStatus == DeleteAllFrames) {
deleteAllButton(0, &dataController);
static_cast<ShowProcessor*>(showProcessor.get())->updateBoardsView();
}
else if (exitStatus == SaveCurrentData) {
saveCurrentParamsButton(0, &dataController);
}
else if (exitStatus == SwitchUndistort)
static_cast<ShowProcessor*>(showProcessor.get())->switchUndistort();
else if (exitStatus == SwitchVisualisation)
static_cast<ShowProcessor*>(showProcessor.get())->switchVisualizationMode();
for (std::vector<cv::Ptr<FrameProcessor> >::iterator it = processors.begin(); it != processors.end(); ++it)
(*it)->resetState();
}
}
catch (std::runtime_error exp) {
std::cout << exp.what() << std::endl;
}
return 0;
}

View File

@ -0,0 +1,138 @@
#include "parametersController.hpp"
#include <iostream>
template <typename T>
static bool readFromNode(cv::FileNode node, T& value)
{
if(!node.isNone()) {
node >> value;
return true;
}
else
return false;
}
static bool checkAssertion(bool value, const std::string& msg)
{
if(!value)
std::cerr << "Error: " << msg << std::endl;
return value;
}
bool calib::parametersController::loadFromFile(const std::string &inputFileName)
{
cv::FileStorage reader;
reader.open(inputFileName, cv::FileStorage::READ);
if(!reader.isOpened()) {
std::cerr << "Warning: Unable to open " << inputFileName <<
" Applicatioin stated with default advanced parameters" << std::endl;
return true;
}
readFromNode(reader["charuco_dict"], mCapParams.charucoDictName);
readFromNode(reader["charuco_square_lenght"], mCapParams.charucoSquareLenght);
readFromNode(reader["charuco_marker_size"], mCapParams.charucoMarkerSize);
readFromNode(reader["camera_resolution"], mCapParams.cameraResolution);
readFromNode(reader["calibration_step"], mCapParams.calibrationStep);
readFromNode(reader["max_frames_num"], mCapParams.maxFramesNum);
readFromNode(reader["min_frames_num"], mCapParams.minFramesNum);
readFromNode(reader["solver_eps"], mInternalParameters.solverEps);
readFromNode(reader["solver_max_iters"], mInternalParameters.solverMaxIters);
readFromNode(reader["fast_solver"], mInternalParameters.fastSolving);
readFromNode(reader["frame_filter_conv_param"], mInternalParameters.filterAlpha);
bool retValue =
checkAssertion(mCapParams.charucoDictName >= 0, "Dict name must be >= 0") &&
checkAssertion(mCapParams.charucoMarkerSize > 0, "Marker size must be positive") &&
checkAssertion(mCapParams.charucoSquareLenght > 0, "Square size must be positive") &&
checkAssertion(mCapParams.minFramesNum > 1, "Minimal number of frames for calibration < 1") &&
checkAssertion(mCapParams.calibrationStep > 0, "Calibration step must be positive") &&
checkAssertion(mCapParams.maxFramesNum > mCapParams.minFramesNum, "maxFramesNum < minFramesNum") &&
checkAssertion(mInternalParameters.solverEps > 0, "Solver precision must be positive") &&
checkAssertion(mInternalParameters.solverMaxIters > 0, "Max solver iterations number must be positive") &&
checkAssertion(mInternalParameters.filterAlpha >=0 && mInternalParameters.filterAlpha <=1 ,
"Frame filter convolution parameter must be in [0,1] interval") &&
checkAssertion(mCapParams.cameraResolution.width > 0 && mCapParams.cameraResolution.height > 0,
"Wrong camera resolution values");
reader.release();
return retValue;
}
calib::parametersController::parametersController()
{
}
calib::captureParameters calib::parametersController::getCaptureParameters() const
{
return mCapParams;
}
calib::internalParameters calib::parametersController::getInternalParameters() const
{
return mInternalParameters;
}
bool calib::parametersController::loadFromParser(cv::CommandLineParser &parser)
{
mCapParams.flipVertical = parser.get<bool>("flip");
mCapParams.captureDelay = parser.get<float>("d");
mCapParams.squareSize = parser.get<float>("sz");
mCapParams.templDst = parser.get<float>("dst");
if(!checkAssertion(mCapParams.squareSize > 0, "Distance between corners or circles must be positive"))
return false;
if(!checkAssertion(mCapParams.templDst > 0, "Distance betwen parts of dual template must be positive"))
return false;
if (parser.has("v")) {
mCapParams.source = File;
mCapParams.videoFileName = parser.get<std::string>("v");
}
else {
mCapParams.source = Camera;
mCapParams.camID = parser.get<int>("ci");
}
std::string templateType = parser.get<std::string>("t");
if(templateType.find("circles", 0) == 0) {
mCapParams.board = AcirclesGrid;
mCapParams.boardSize = cv::Size(4, 11);
}
else if(templateType.find("chessboard", 0) == 0) {
mCapParams.board = Chessboard;
mCapParams.boardSize = cv::Size(7, 7);
}
else if(templateType.find("dualcircles", 0) == 0) {
mCapParams.board = DoubleAcirclesGrid;
mCapParams.boardSize = cv::Size(4, 11);
}
else if(templateType.find("charuco", 0) == 0) {
mCapParams.board = chAruco;
mCapParams.boardSize = cv::Size(6, 8);
mCapParams.charucoDictName = 0;
mCapParams.charucoSquareLenght = 200;
mCapParams.charucoMarkerSize = 100;
}
else {
std::cerr << "Wrong template name\n";
return false;
}
if(parser.has("w") && parser.has("h")) {
mCapParams.boardSize = cv::Size(parser.get<int>("w"), parser.get<int>("h"));
if(!checkAssertion(mCapParams.boardSize.width > 0 || mCapParams.boardSize.height > 0,
"Board size must be positive"))
return false;
}
if(!checkAssertion(parser.get<std::string>("of").find(".xml") > 0,
"Wrong output file name: correct format is [name].xml"))
return false;
loadFromFile(parser.get<std::string>("pf"));
return true;
}

View File

@ -0,0 +1,29 @@
#ifndef PARAMETERS_CONTROLLER_HPP
#define PARAMETERS_CONTROLLER_HPP
#include <string>
#include <opencv2/core.hpp>
#include "calibCommon.hpp"
namespace calib {
class parametersController
{
protected:
captureParameters mCapParams;
internalParameters mInternalParameters;
bool loadFromFile(const std::string& inputFileName);
public:
parametersController();
parametersController(cv::Ptr<captureParameters> params);
captureParameters getCaptureParameters() const;
internalParameters getInternalParameters() const;
bool loadFromParser(cv::CommandLineParser& parser);
};
}
#endif

View File

@ -0,0 +1,121 @@
#include "rotationConverters.hpp"
#include <cmath>
#include <opencv2/calib3d.hpp>
#include <opencv2/core.hpp>
#define CALIB_PI 3.14159265358979323846
#define CALIB_PI_2 1.57079632679489661923
void calib::Euler(const cv::Mat& src, cv::Mat& dst, int argType)
{
if((src.rows == 3) && (src.cols == 3))
{
//convert rotaion matrix to 3 angles (pitch, yaw, roll)
dst = cv::Mat(3, 1, CV_64F);
double pitch, yaw, roll;
if(src.at<double>(0,2) < -0.998)
{
pitch = -atan2(src.at<double>(1,0), src.at<double>(1,1));
yaw = -CALIB_PI_2;
roll = 0.;
}
else if(src.at<double>(0,2) > 0.998)
{
pitch = atan2(src.at<double>(1,0), src.at<double>(1,1));
yaw = CALIB_PI_2;
roll = 0.;
}
else
{
pitch = atan2(-src.at<double>(1,2), src.at<double>(2,2));
yaw = asin(src.at<double>(0,2));
roll = atan2(-src.at<double>(0,1), src.at<double>(0,0));
}
if(argType == CALIB_DEGREES)
{
pitch *= 180./CALIB_PI;
yaw *= 180./CALIB_PI;
roll *= 180./CALIB_PI;
}
else if(argType != CALIB_RADIANS)
CV_Error(cv::Error::StsBadFlag, "Invalid argument type");
dst.at<double>(0,0) = pitch;
dst.at<double>(1,0) = yaw;
dst.at<double>(2,0) = roll;
}
else if( (src.cols == 1 && src.rows == 3) ||
(src.cols == 3 && src.rows == 1 ) )
{
//convert vector which contains 3 angles (pitch, yaw, roll) to rotaion matrix
double pitch, yaw, roll;
if(src.cols == 1 && src.rows == 3)
{
pitch = src.at<double>(0,0);
yaw = src.at<double>(1,0);
roll = src.at<double>(2,0);
}
else{
pitch = src.at<double>(0,0);
yaw = src.at<double>(0,1);
roll = src.at<double>(0,2);
}
if(argType == CALIB_DEGREES)
{
pitch *= CALIB_PI / 180.;
yaw *= CALIB_PI / 180.;
roll *= CALIB_PI / 180.;
}
else if(argType != CALIB_RADIANS)
CV_Error(cv::Error::StsBadFlag, "Invalid argument type");
dst = cv::Mat(3, 3, CV_64F);
cv::Mat M(3, 3, CV_64F);
cv::Mat i = cv::Mat::eye(3, 3, CV_64F);
i.copyTo(dst);
i.copyTo(M);
double* pR = dst.ptr<double>();
pR[4] = cos(pitch);
pR[7] = sin(pitch);
pR[8] = pR[4];
pR[5] = -pR[7];
double* pM = M.ptr<double>();
pM[0] = cos(yaw);
pM[2] = sin(yaw);
pM[8] = pM[0];
pM[6] = -pM[2];
dst *= M;
i.copyTo(M);
pM[0] = cos(roll);
pM[3] = sin(roll);
pM[4] = pM[0];
pM[1] = -pM[3];
dst *= M;
}
else
CV_Error(cv::Error::StsBadFlag, "Input matrix must be 1x3, 3x1 or 3x3" );
}
void calib::RodriguesToEuler(const cv::Mat& src, cv::Mat& dst, int argType)
{
CV_Assert((src.cols == 1 && src.rows == 3) || (src.cols == 3 && src.rows == 1));
cv::Mat R;
cv::Rodrigues(src, R);
Euler(R, dst, argType);
}
void calib::EulerToRodrigues(const cv::Mat& src, cv::Mat& dst, int argType)
{
CV_Assert((src.cols == 1 && src.rows == 3) || (src.cols == 3 && src.rows == 1));
cv::Mat R;
Euler(src, R, argType);
cv::Rodrigues(R, dst);
}

View File

@ -0,0 +1,16 @@
#ifndef RAOTATION_CONVERTERS_HPP
#define RAOTATION_CONVERTERS_HPP
#include <opencv2/core.hpp>
namespace calib
{
#define CALIB_RADIANS 0
#define CALIB_DEGREES 1
void Euler(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
void RodriguesToEuler(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
void EulerToRodrigues(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
}
#endif

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

View File

@ -0,0 +1,198 @@
Interactive camera calibration application {#tutorial_interactive_calibration}
==============================
According to classical calibration technique user must collect all data first and when run @ref cv::calibrateCamera function
to obtain camera parameters. If average re-projection error is huge or if estimated parameters seems to be wrong, process of
selection or collecting data and starting of @ref cv::calibrateCamera repeats.
Interactive calibration process assumes that after each new data portion user can see results and errors estimation, also
he can delete last data portion and finally, when dataset for calibration is big enough starts process of auto data selection.
Main application features
------
The sample application will:
- Determine the distortion matrix and confidence interval for each element
- Determine the camera matrix and confidence interval for each element
- Take input from camera or video file
- Read configuration from XML file
- Save the results into XML file
- Calculate re-projection error
- Reject patterns views on sharp angles to prevent appear of ill-conditioned jacobian blocks
- Auto switch calibration flags (fix aspect ratio and elements of distortion matrix if needed)
- Auto detect when calibration is done by using several criteria
- Auto capture of static patterns (user doesn't need press any keys to capture frame, just don't move pattern for a second)
Supported patterns:
- Black-white chessboard
- Asymmetrical circle pattern
- Dual asymmetrical circle pattern
- chAruco (chessboard with Aruco markers)
Description of parameters
------
Application has two groups of parameters: primary (passed through command line) and advances (passed through XML file).
### Primary parameters:
All of this parameters are passed to application through a command line.
-[parameter]=[default value]: description
- -v=[filename]: get video from filename, default input -- camera with id=0
- -ci=[0]: get video from camera with specified id
- -flip=[false]: vertical flip of input frames
- -t=[circles]: pattern for calibration (circles, chessboard, dualCircles, chAruco)
- -sz=[16.3]: distance between two nearest centers of circles or squares on calibration board
- -dst=[295] distance between white and black parts of daulCircles pattern
- -w=[width]: width of pattern (in corners or circles)
- -h=[height]: height of pattern (in corners or circles)
- -of=[camParams.xml]: output file name
- -ft=[true]: auto tuning of calibration flags
- -vis=[grid]: captured boards visualization (grid, window)
- -d=[0.8]: delay between captures in seconds
- -pf=[defaultConfig.xml]: advanced application parameters file
### Advanced parameters:
By default values of advanced parameters are stored in defaultConfig.xml
@code{.xml}
<?xml version="1.0"?>
<opencv_storage>
<charuco_dict>0</charuco_dict>
<charuco_square_lenght>200</charuco_square_lenght>
<charuco_marker_size>100</charuco_marker_size>
<calibration_step>1</calibration_step>
<max_frames_num>30</max_frames_num>
<min_frames_num>10</min_frames_num>
<solver_eps>1e-7</solver_eps>
<solver_max_iters>30</solver_max_iters>
<fast_solver>0</fast_solver>
<frame_filter_conv_param>0.1</frame_filter_conv_param>
<camera_resolution>1280 720</camera_resolution>
</opencv_storage>
@endcode
- *charuco_dict*: name of special dictionary, which has been used for generation of chAruco pattern
- *charuco_square_lenght*: size of square on chAruco board (in pixels)
- *charuco_marker_size*: size of Aruco markers on chAruco board (in pixels)
- *calibration_step*: interval in frames between launches of @ref cv::calibrateCamera
- *max_frames_num*: if number of frames for calibration is greater then this value frames filter starts working.
After filtration size of calibration dataset is equals to *max_frames_num*
- *min_frames_num*: if number of frames is greater then this value turns on auto flags tuning, undistorted view and quality evaluation
- *solver_eps*: precision of Levenberg-Marquardt solver in @ref cv::calibrateCamera
- *solver_max_iters*: iterations limit of solver
- *fast_solver*: if this value is nonzero and Lapack is found QR decomposition is used instead of SVD in solver.
QR faster than SVD, but potentially less precise
- *frame_filter_conv_param*: parameter which used in linear convolution of bicriterial frames filter
- *camera_resolution*: resolution of camera which is used for calibration
**Note:** *charuco_dict*, *charuco_square_lenght* and *charuco_marker_size* are used for chAruco pattern generation
(see Aruco module description for details: [Aruco tutorials](https://github.com/Itseez/opencv_contrib/tree/master/modules/aruco/tutorials))
Default chAruco pattern:
![](images/charuco_board.png)
Dual circles pattern
------
To make this pattern you need standard OpenCV circles pattern and binary inverted one.
Place two patterns on one plane in order when all horizontal lines of circles in one pattern are
continuations of similar lines in another.
Measure distance between patterns as shown at picture below pass it as **dst** command line parameter. Also measure distance between centers of nearest circles and pass
this value as **sz** command line parameter.
![](images/dualCircles.jpg)
This pattern is very sensitive to quality of production and measurements.
Data filtration
------
When size of calibration dataset is greater then *max_frames_num* starts working
data filter. It tries to remove "bad" frames from dataset. Filter removes the frame
on which \f$loss\_function\f$ takes maximum.
\f[loss\_function(i)=\alpha RMS(i)+(1-\alpha)reducedGridQuality(i)\f]
**RMS** is an average re-projection error calculated for frame *i*, **reducedGridQuality**
is scene coverage quality evaluation without frame *i*. \f$\alpha\f$ is equals to
**frame_filter_conv_param**.
Calibration process
------
To start calibration just run application. Place pattern ahead the camera and fixate pattern in some pose.
After that wait for capturing (will be shown message like "Frame #i captured").
Current focal distance and re-projection error will be shown at the main screen. Move pattern to the next position and repeat procedure. Try to cover image plane
uniformly and don't show pattern on sharp angles to the image plane.
![](images/screen_charuco.jpg)
If calibration seems to be successful (confidence intervals and average re-projection
error are small, frame coverage quality and number of pattern views are big enough)
application will show a message like on screen below.
![](images/screen_finish.jpg)
Hot keys:
- Esc -- exit application
- s -- save current data to XML file
- r -- delete last frame
- d -- delete all frames
- u -- enable/disable applying of undistortion
- v -- switch visualization mode
Results
------
As result you will get camera parameters and confidence intervals for them.
Example of output XML file:
@code{.xml}
<?xml version="1.0"?>
<opencv_storage>
<calibrationDate>"Thu 07 Apr 2016 04:23:03 PM MSK"</calibrationDate>
<framesCount>21</framesCount>
<cameraResolution>
1280 720</cameraResolution>
<cameraMatrix type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
1.2519588293098975e+03 0. 6.6684948780852471e+02 0.
1.2519588293098975e+03 3.6298123112613683e+02 0. 0. 1.</data></cameraMatrix>
<cameraMatrix_std_dev type_id="opencv-matrix">
<rows>4</rows>
<cols>1</cols>
<dt>d</dt>
<data>
0. 1.2887048808572649e+01 2.8536856683866230e+00
2.8341737483430314e+00</data></cameraMatrix_std_dev>
<dist_coeffs type_id="opencv-matrix">
<rows>1</rows>
<cols>5</cols>
<dt>d</dt>
<data>
1.3569117181595716e-01 -8.2513063822554633e-01 0. 0.
1.6412101575010554e+00</data></dist_coeffs>
<dist_coeffs_std_dev type_id="opencv-matrix">
<rows>5</rows>
<cols>1</cols>
<dt>d</dt>
<data>
1.5570675523402111e-02 8.7229075437543435e-02 0. 0.
1.8382427901856876e-01</data></dist_coeffs_std_dev>
<avg_reprojection_error>4.2691743074130178e-01</avg_reprojection_error>
</opencv_storage>
@endcode

View File

@ -30,3 +30,14 @@ how to find out from the 2D images information about the 3D world.
Real time pose estimation of a textured object using ORB features, FlannBased matcher, PnP
approach plus Ransac and Linear Kalman Filter to reject possible bad poses.
- @subpage tutorial_interactive_calibration
*Compatibility:* \> OpenCV 3.1
*Author:* Vladislav Sovrasov
Camera calibration by using either the chessboard, chAruco, asymmetrical circle or dual asymmetrical circle
pattern. Calibration process is continious, so you can see results after each new pattern shot.
As an output you get average reprojection error, intrinsic camera parameters, distortion coefficients and
confidence intervals for all of evaluated variables.