resolved conflicts, updated retina class interface and optimized a heavy retinacolor process

This commit is contained in:
alexandre benoit
2013-04-29 19:06:35 +02:00
2166 changed files with 342437 additions and 211189 deletions

View File

@@ -1,3 +1,5 @@
add_definitions(-D__OPENCV_BUILD=1)
if(NOT OPENCV_MODULES_PATH)
set(OPENCV_MODULES_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
endif()

View File

@@ -6,7 +6,7 @@ set(the_description "Auxiliary module for Android native camera support")
set(OPENCV_MODULE_TYPE STATIC)
ocv_define_module(androidcamera INTERNAL opencv_core log dl)
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/camera_wrapper")
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/camera_wrapper" "${OpenCV_SOURCE_DIR}/android/service/engine/jni/include")
# Android source tree for native camera
SET (ANDROID_SOURCE_TREE "ANDROID_SOURCE_TREE-NOTFOUND" CACHE PATH

View File

@@ -1,4 +1,4 @@
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1) && !defined(ANDROID_r4_0_0) && !defined(ANDROID_r4_0_3) && !defined(ANDROID_r4_1_1)
#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1) && !defined(ANDROID_r4_0_0) && !defined(ANDROID_r4_0_3) && !defined(ANDROID_r4_1_1) && !defined(ANDROID_r4_2_0)
# error Building camera wrapper for your version of Android is not supported by OpenCV. You need to modify OpenCV sources in order to compile camera wrapper for your version of Android.
#endif
@@ -18,7 +18,7 @@
# define MAGIC_OPENCV_TEXTURE_ID (0x10)
#else // defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
//TODO: This is either 2.2 or 2.3. Include the headers for ISurface.h access
#if defined(ANDROID_r4_1_1)
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
#include <gui/ISurface.h>
#include <gui/BufferQueue.h>
#else
@@ -60,7 +60,7 @@ using namespace android;
void debugShowFPS();
#if defined(ANDROID_r4_1_1)
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
class ConsumerListenerStub: public BufferQueue::ConsumerListener
{
public:
@@ -280,7 +280,7 @@ public:
}
virtual void postData(int32_t msgType, const sp<IMemory>& dataPtr
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1)
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
,camera_frame_metadata_t*
#endif
)
@@ -526,7 +526,7 @@ CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback,
pdstatus = camera->setPreviewTexture(surfaceTexture);
if (pdstatus != 0)
LOGE("initCameraConnect: failed setPreviewTexture call; camera migth not work correctly");
#elif defined(ANDROID_r4_1_1)
#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0)
sp<BufferQueue> bufferQueue = new BufferQueue();
sp<BufferQueue::ConsumerListener> queueListener = new ConsumerListenerStub();
bufferQueue->consumerConnect(queueListener);

View File

@@ -2,7 +2,6 @@
#define _CAMERAACTIVITY_H_
#include <camera_properties.h>
//#include <opencv2/core/core.hpp>
class CameraActivity
{

View File

@@ -3,12 +3,15 @@
#include <sys/stat.h>
#include <dirent.h>
#include <android/log.h>
#include <string>
#include <cctype>
#include <vector>
#include <algorithm>
#include <opencv2/core/version.hpp>
#include "camera_activity.hpp"
#include "camera_wrapper.h"
#include "EngineCommon.h"
#include "opencv2/core.hpp"
#undef LOG_TAG
#undef LOGE
@@ -26,8 +29,10 @@
#include <sys/types.h>
#include <dirent.h>
using namespace std;
struct str_greater
{
bool operator() (const cv::String& a, const cv::String& b) { return a > b; }
};
class CameraWrapperConnector
{
@@ -38,17 +43,17 @@ public:
static CameraActivity::ErrorCode getProperty(void* camera, int propIdx, double* value);
static CameraActivity::ErrorCode applyProperties(void** ppcamera);
static void setPathLibFolder(const std::string& path);
static void setPathLibFolder(const cv::String& path);
private:
static std::string pathLibFolder;
static cv::String pathLibFolder;
static bool isConnectedToLib;
static std::string getPathLibFolder();
static std::string getDefaultPathLibFolder();
static cv::String getPathLibFolder();
static cv::String getDefaultPathLibFolder();
static CameraActivity::ErrorCode connectToLib();
static CameraActivity::ErrorCode getSymbolFromLib(void * libHandle, const char* symbolName, void** ppSymbol);
static void fillListWrapperLibs(const string& folderPath, vector<string>& listLibs);
static void fillListWrapperLibs(const cv::String& folderPath, std::vector<cv::String>& listLibs);
static InitCameraConnectC pInitCameraC;
static CloseCameraConnectC pCloseCameraC;
@@ -59,7 +64,7 @@ private:
friend bool nextFrame(void* buffer, size_t bufferSize, void* userData);
};
std::string CameraWrapperConnector::pathLibFolder;
cv::String CameraWrapperConnector::pathLibFolder;
bool CameraWrapperConnector::isConnectedToLib = false;
InitCameraConnectC CameraWrapperConnector::pInitCameraC = 0;
@@ -166,7 +171,7 @@ CameraActivity::ErrorCode CameraWrapperConnector::connectToLib()
}
dlerror();
string folderPath = getPathLibFolder();
cv::String folderPath = getPathLibFolder();
if (folderPath.empty())
{
LOGD("Trying to find native camera in default OpenCV packages");
@@ -175,12 +180,12 @@ CameraActivity::ErrorCode CameraWrapperConnector::connectToLib()
LOGD("CameraWrapperConnector::connectToLib: folderPath=%s", folderPath.c_str());
vector<string> listLibs;
std::vector<cv::String> listLibs;
fillListWrapperLibs(folderPath, listLibs);
std::sort(listLibs.begin(), listLibs.end(), std::greater<string>());
std::sort(listLibs.begin(), listLibs.end(), str_greater());
void * libHandle=0;
string cur_path;
cv::String cur_path;
for(size_t i = 0; i < listLibs.size(); i++) {
cur_path=folderPath + listLibs[i];
LOGD("try to load library '%s'", listLibs[i].c_str());
@@ -246,7 +251,7 @@ CameraActivity::ErrorCode CameraWrapperConnector::getSymbolFromLib(void* libHand
return CameraActivity::NO_ERROR;
}
void CameraWrapperConnector::fillListWrapperLibs(const string& folderPath, vector<string>& listLibs)
void CameraWrapperConnector::fillListWrapperLibs(const cv::String& folderPath, std::vector<cv::String>& listLibs)
{
DIR *dp;
struct dirent *ep;
@@ -265,14 +270,15 @@ void CameraWrapperConnector::fillListWrapperLibs(const string& folderPath, vecto
}
}
std::string CameraWrapperConnector::getDefaultPathLibFolder()
cv::String CameraWrapperConnector::getDefaultPathLibFolder()
{
const string packageList[] = {"tegra3", "armv7a_neon", "armv7a", "armv5", "x86"};
for (size_t i = 0; i < 5; i++)
#define BIN_PACKAGE_NAME(x) "org.opencv.lib_v" CVAUX_STR(CV_VERSION_EPOCH) CVAUX_STR(CV_VERSION_MAJOR) "_" x
const char* const packageList[] = {BIN_PACKAGE_NAME("armv7a"), OPENCV_ENGINE_PACKAGE};
for (size_t i = 0; i < sizeof(packageList)/sizeof(packageList[0]); i++)
{
char path[128];
sprintf(path, "/data/data/org.opencv.lib_v%d%d_%s/lib/", CV_MAJOR_VERSION, CV_MINOR_VERSION, packageList[i].c_str());
LOGD("Trying package \"%s\" (\"%s\")", packageList[i].c_str(), path);
sprintf(path, "/data/data/%s/lib/", packageList[i]);
LOGD("Trying package \"%s\" (\"%s\")", packageList[i], path);
DIR* dir = opendir(path);
if (!dir)
@@ -287,10 +293,10 @@ std::string CameraWrapperConnector::getDefaultPathLibFolder()
}
}
return string();
return cv::String();
}
std::string CameraWrapperConnector::getPathLibFolder()
cv::String CameraWrapperConnector::getPathLibFolder()
{
if (!pathLibFolder.empty())
return pathLibFolder;
@@ -301,8 +307,8 @@ std::string CameraWrapperConnector::getPathLibFolder()
LOGD("Library name: %s", dl_info.dli_fname);
LOGD("Library base address: %p", dl_info.dli_fbase);
const char* libName=dl_info.dli_fname;
while( ((*libName)=='/') || ((*libName)=='.') )
const char* libName=dl_info.dli_fname;
while( ((*libName)=='/') || ((*libName)=='.') )
libName++;
char lineBuf[2048];
@@ -310,9 +316,9 @@ std::string CameraWrapperConnector::getPathLibFolder()
if(file)
{
while (fgets(lineBuf, sizeof lineBuf, file) != NULL)
{
//verify that line ends with library name
while (fgets(lineBuf, sizeof lineBuf, file) != NULL)
{
//verify that line ends with library name
int lineLength = strlen(lineBuf);
int libNameLength = strlen(libName);
@@ -325,7 +331,7 @@ std::string CameraWrapperConnector::getPathLibFolder()
if (0 != strncmp(lineBuf + lineLength - libNameLength, libName, libNameLength))
{
//the line does not contain the library name
//the line does not contain the library name
continue;
}
@@ -344,24 +350,24 @@ std::string CameraWrapperConnector::getPathLibFolder()
fclose(file);
return pathBegin;
}
fclose(file);
LOGE("Could not find library path");
}
fclose(file);
LOGE("Could not find library path");
}
else
{
LOGE("Could not read /proc/self/smaps");
LOGE("Could not read /proc/self/smaps");
}
}
else
{
LOGE("Could not get library name and base address");
LOGE("Could not get library name and base address");
}
return string();
return cv::String();
}
void CameraWrapperConnector::setPathLibFolder(const string& path)
void CameraWrapperConnector::setPathLibFolder(const cv::String& path)
{
pathLibFolder=path;
}
@@ -427,7 +433,6 @@ void CameraActivity::applyProperties()
int CameraActivity::getFrameWidth()
{
LOGD("CameraActivity::getFrameWidth()");
if (frameWidth <= 0)
frameWidth = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH);
return frameWidth;
@@ -435,7 +440,6 @@ int CameraActivity::getFrameWidth()
int CameraActivity::getFrameHeight()
{
LOGD("CameraActivity::getFrameHeight()");
if (frameHeight <= 0)
frameHeight = getProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT);
return frameHeight;

View File

@@ -68,7 +68,7 @@ is extended as:
.. math::
\begin{array}{l} \vecthree{x}{y}{z} = R \vecthree{X}{Y}{Z} + t \\ x' = x/z \\ y' = y/z \\ x'' = x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) \\ y'' = y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' \\ \text{where} \quad r^2 = x'^2 + y'^2 \\ u = f_x*x'' + c_x \\ v = f_y*y'' + c_y \end{array}
\begin{array}{l} \vecthree{x}{y}{z} = R \vecthree{X}{Y}{Z} + t \\ x' = x/z \\ y' = y/z \\ x'' = x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\ y'' = y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_1 r^2 + s_2 r^4 \\ \text{where} \quad r^2 = x'^2 + y'^2 \\ u = f_x*x'' + c_x \\ v = f_y*y'' + c_y \end{array}
:math:`k_1`,
:math:`k_2`,
@@ -78,11 +78,15 @@ is extended as:
:math:`k_6` are radial distortion coefficients.
:math:`p_1` and
:math:`p_2` are tangential distortion coefficients.
:math:`s_1`,
:math:`s_2`,
:math:`s_3`, and
:math:`s_4`, are the thin prism distortion coefficients.
Higher-order coefficients are not considered in OpenCV. In the functions below the coefficients are passed or returned as
.. math::
(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])
(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])
vector. That is, if the vector contains four elements, it means that
:math:`k_3=0` .
@@ -111,14 +115,12 @@ calibrateCamera
---------------
Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) )
.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0, TermCriteria criteria=TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) )
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize[, cameraMatrix[, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
.. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* object_points, const CvMat* image_points, const CvMat* point_counts, CvSize image_size, CvMat* camera_matrix, CvMat* distortion_coeffs, CvMat* rotation_vectors=NULL, CvMat* translation_vectors=NULL, int flags=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) )
.. ocv:pyoldfunction:: cv.CalibrateCamera2(objectPoints, imagePoints, pointCounts, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags=0)-> None
:param objectPoints: In the new interface it is a vector of vectors of calibration pattern points in the calibration pattern coordinate space. The outer vector contains as many elements as the number of the pattern views. If the same calibration pattern is shown in each view and it is fully visible, all the vectors will be the same. Although, it is possible to use partially occluded patterns, or even different patterns in different views. Then, the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object point is 0.
In the old interface all the vectors of object points from different views are concatenated together.
@@ -133,7 +135,7 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali
:param cameraMatrix: Output 3x3 floating-point camera matrix :math:`A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}` . If ``CV_CALIB_USE_INTRINSIC_GUESS`` and/or ``CV_CALIB_FIX_ASPECT_RATIO`` are specified, some or all of ``fx, fy, cx, cy`` must be initialized before calling the function.
:param distCoeffs: Output vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements.
:param distCoeffs: Output vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements.
:param rvecs: Output vector of rotation vectors (see :ocv:func:`Rodrigues` ) estimated for each pattern view. That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
@@ -153,6 +155,11 @@ Finds the camera intrinsic and extrinsic parameters from several views of a cali
* **CV_CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the backward compatibility, this extra flag should be explicitly specified to make the calibration function use the rational model and return 8 coefficients. If the flag is not set, the function computes and returns only 5 distortion coefficients.
* **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the backward compatibility, this extra flag should be explicitly specified to make the calibration function use the thin prism model and return 12 coefficients. If the flag is not set, the function computes and returns only 5 distortion coefficients.
* **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during the optimization. If ``CV_CALIB_USE_INTRINSIC_GUESS`` is set, the coefficient from the supplied ``distCoeffs`` matrix is used. Otherwise, it is set to 0.
:param criteria: Termination criteria for the iterative optimization algorithm.
:param term_crit: same as ``criteria``.
@@ -270,8 +277,6 @@ For points in an image of a stereo pair, computes the corresponding epilines in
.. ocv:cfunction:: void cvComputeCorrespondEpilines( const CvMat* points, int which_image, const CvMat* fundamental_matrix, CvMat* correspondent_lines )
.. ocv:pyoldfunction:: cv.ComputeCorrespondEpilines(points, whichImage, F, lines) -> None
:param points: Input points. :math:`N \times 1` or :math:`1 \times N` matrix of type ``CV_32FC2`` or ``vector<Point2f>`` .
:param whichImage: Index of the image (1 or 2) that contains the ``points`` .
@@ -345,7 +350,6 @@ Converts points to/from homogeneous coordinates.
.. ocv:function:: void convertPointsHomogeneous( InputArray src, OutputArray dst )
.. ocv:cfunction:: void cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst )
.. ocv:pyoldfunction:: cv.ConvertPointsHomogeneous(src, dst) -> None
:param src: Input array or vector of 2D, 3D, or 4D points.
@@ -391,8 +395,6 @@ Decomposes a projection matrix into a rotation matrix and a camera matrix.
.. ocv:cfunction:: void cvDecomposeProjectionMatrix( const CvMat * projMatr, CvMat * calibMatr, CvMat * rotMatr, CvMat * posVect, CvMat * rotMatrX=NULL, CvMat * rotMatrY=NULL, CvMat * rotMatrZ=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.DecomposeProjectionMatrix(projMatrix, cameraMatrix, rotMatrix, transVect, rotMatrX=None, rotMatrY=None, rotMatrZ=None) -> eulerAngles
:param projMatrix: 3x4 input projection matrix P.
:param cameraMatrix: Output 3x3 camera matrix K.
@@ -424,10 +426,9 @@ Renders the detected chessboard corners.
.. ocv:function:: void drawChessboardCorners( InputOutputArray image, Size patternSize, InputArray corners, bool patternWasFound )
.. ocv:pyfunction:: cv2.drawChessboardCorners(image, patternSize, corners, patternWasFound) -> None
.. ocv:pyfunction:: cv2.drawChessboardCorners(image, patternSize, corners, patternWasFound) -> image
.. ocv:cfunction:: void cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CvPoint2D32f* corners, int count, int pattern_was_found )
.. ocv:pyoldfunction:: cv.DrawChessboardCorners(image, patternSize, corners, patternWasFound)-> None
:param image: Destination image. It must be an 8-bit color image.
@@ -445,12 +446,11 @@ findChessboardCorners
-------------------------
Finds the positions of internal corners of the chessboard.
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE )
.. ocv:function:: bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, int flags=CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyfunction:: cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners
.. ocv:cfunction:: int cvFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE )
.. ocv:pyoldfunction:: cv.FindChessboardCorners(image, patternSize, flags=CV_CALIB_CB_ADAPTIVE_THRESH) -> corners
:param image: Source chessboard view. It must be an 8-bit grayscale or color image.
@@ -506,7 +506,7 @@ Finds centers in the grid of circles.
.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
.. ocv:pyfunction:: cv2.findCirclesGridDefault(image, patternSize[, centers[, flags]]) -> retval, centers
.. ocv:pyfunction:: cv2.findCirclesGrid(image, patternSize[, centers[, flags[, blobDetector]]]) -> retval, centers
:param image: grid view of input circles; it must be an 8-bit grayscale or color image.
@@ -555,15 +555,13 @@ Finds an object pose from 3D-2D point correspondences.
.. ocv:cfunction:: void cvFindExtrinsicCameraParams2( const CvMat* object_points, const CvMat* image_points, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* rotation_vector, CvMat* translation_vector, int use_extrinsic_guess=0 )
.. ocv:pyoldfunction:: cv.FindExtrinsicCameraParams2(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess=0 ) -> None
:param objectPoints: Array of object points in the object coordinate space, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. ``vector<Point3f>`` can be also passed here.
:param imagePoints: Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, where N is the number of points. ``vector<Point2f>`` can be also passed here.
:param cameraMatrix: Input camera matrix :math:`A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param rvec: Output rotation vector (see :ocv:func:`Rodrigues` ) that, together with ``tvec`` , brings points from the model coordinate system to the camera coordinate system.
@@ -595,7 +593,7 @@ Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
:param cameraMatrix: Input camera matrix :math:`A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param rvec: Output rotation vector (see :ocv:func:`Rodrigues` ) that, together with ``tvec`` , brings points from the model coordinate system to the camera coordinate system.
@@ -627,7 +625,6 @@ Calculates a fundamental matrix from the corresponding points in two images.
.. ocv:pyfunction:: cv2.findFundamentalMat(points1, points2[, method[, param1[, param2[, mask]]]]) -> retval, mask
.. ocv:cfunction:: int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, CvMat* fundamental_matrix, int method=CV_FM_RANSAC, double param1=3., double param2=0.99, CvMat* status=NULL )
.. ocv:pyoldfunction:: cv.FindFundamentalMat(points1, points2, fundamentalMatrix, method=CV_FM_RANSAC, param1=1., param2=0.99, status=None) -> retval
:param points1: Array of ``N`` points from the first image. The point coordinates should be floating-point (single or double precision).
@@ -681,6 +678,124 @@ corresponding to the specified points. It can also be passed to
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
findEssentialMat
------------------
Calculates an essential matrix from the corresponding points in two images.
.. ocv:function:: Mat findEssentialMat( InputArray points1, InputArray points2, double focal=1.0, Point2d pp=Point2d(0, 0), int method=RANSAC, double prob=0.999, double threshold=1.0, OutputArray mask=noArray() )
:param points1: Array of ``N`` ``(N >= 5)`` 2D points from the first image. The point coordinates should be floating-point (single or double precision).
:param points2: Array of the second image points of the same size and format as ``points1`` .
:param focal: focal length of the camera. Note that this function assumes that ``points1`` and ``points2`` are feature points from cameras with same focal length and principle point.
:param pp: principle point of the camera.
:param method: Method for computing a fundamental matrix.
* **CV_RANSAC** for the RANSAC algorithm.
* **CV_LMEDS** for the LMedS algorithm.
:param threshold: Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels, beyond which the point is considered an outlier and is not used for computing the final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the point localization, image resolution, and the image noise.
:param prob: Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence (probability) that the estimated matrix is correct.
:param mask: Output array of N elements, every element of which is set to 0 for outliers and to 1 for the other points. The array is computed only in the RANSAC and LMedS methods.
This function estimates essential matrix based on the five-point algorithm solver in [Nister03]_. [SteweniusCFS]_ is also a related.
The epipolar geometry is described by the following equation:
.. math::
[p_2; 1]^T K^T E K [p_1; 1] = 0 \\
K =
\begin{bmatrix}
f & 0 & x_{pp} \\
0 & f & y_{pp} \\
0 & 0 & 1
\end{bmatrix}
where
:math:`E` is an essential matrix,
:math:`p_1` and
:math:`p_2` are corresponding points in the first and the second images, respectively.
The result of this function may be passed further to ``decomposeEssentialMat()`` or ``recoverPose()`` to recover the relative pose between cameras.
decomposeEssentialMat
-------------------------
Decompose an essential matrix to possible rotations and translation.
.. ocv:function:: void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t )
:param E: The input essential matrix.
:param R1: One possible rotation matrix.
:param R2: Another possible rotation matrix.
:param t: One possible translation.
This function decompose an essential matrix ``E`` using svd decomposition [HartleyZ00]_. Generally 4 possible poses exists for a given ``E``.
They are
:math:`[R_1, t]`,
:math:`[R_1, -t]`,
:math:`[R_2, t]`,
:math:`[R_2, -t]`.
recoverPose
---------------
Recover relative camera rotation and translation from an estimated essential matrix and the corresponding points in two images, using cheirality check.
Returns the number of inliers which pass the check.
.. ocv:function:: int recoverPose( InputArray E, InputArray points1, InputArray points2, OutputArray R, OutputArray t, double focal = 1.0, Point2d pp = Point2d(0, 0), InputOutputArray mask = noArray())
:param E: The input essential matrix.
:param points1: Array of ``N`` 2D points from the first image. The point coordinates should be floating-point (single or double precision).
:param points2: Array of the second image points of the same size and format as ``points1`` .
:param R: Recovered relative rotation.
:param t: Recoverd relative translation.
:param focal: Focal length of the camera. Note that this function assumes that ``points1`` and ``points2`` are feature points from cameras with same focal length and principle point.
:param pp: Principle point of the camera.
:param mask: Input/output mask for inliers in ``points1`` and ``points2``.
If it is not empty, then it marks inliers in ``points1`` and ``points2`` for then given essential matrix ``E``.
Only these inliers will be used to recover pose.
In the output mask only inliers which pass the cheirality check.
This function decomposes an essential matrix using ``decomposeEssentialMat()`` and then verifies possible pose hypotheses by doing cheirality check.
The cheirality check basically means that the triangulated 3D points should have positive depth. Some details can be found from [Nister03]_.
This function can be used to process output ``E`` and ``mask`` from ``findEssentialMat()``.
In this scenario, ``points1`` and ``points2`` are the same input for ``findEssentialMat()``. ::
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ... */
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
double focal = 1.0;
cv::Point2d pp(0.0, 0.0);
Mat E, R, t, mask;
E = findEssentialMat(points1, points2, focal, pp, CV_RANSAC, 0.999, 1.0, mask);
recoverPose(E, points1, points2, R, t, focal, pp, mask);
findHomography
@@ -693,8 +808,6 @@ Finds a perspective transformation between two planes.
.. ocv:cfunction:: int cvFindHomography( const CvMat* src_points, const CvMat* dst_points, CvMat* homography, int method=0, double ransacReprojThreshold=3, CvMat* mask=0 )
.. ocv:pyoldfunction:: cv.FindHomography(srcPoints, dstPoints, H, method=0, ransacReprojThreshold=3.0, status=None) -> None
:param srcPoints: Coordinates of the points in the original plane, a matrix of the type ``CV_32FC2`` or ``vector<Point2f>`` .
:param dstPoints: Coordinates of the points in the target plane, a matrix of the type ``CV_32FC2`` or a ``vector<Point2f>`` .
@@ -796,7 +909,7 @@ Filters off small noise blobs (speckles) in the disparity map
.. ocv:function:: void filterSpeckles( InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff, InputOutputArray buf=noArray() )
.. ocv:pyfunction:: cv2.filterSpeckles(img, newVal, maxSpeckleSize, maxDiff[, buf]) -> None
.. ocv:pyfunction:: cv2.filterSpeckles(img, newVal, maxSpeckleSize, maxDiff[, buf]) -> img, buf
:param img: The input 16-bit signed disparity image
@@ -819,11 +932,9 @@ Returns the new camera matrix based on the free scaling parameter.
.. ocv:cfunction:: void cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, const CvMat* dist_coeffs, CvSize image_size, double alpha, CvMat* new_camera_matrix, CvSize new_imag_size=cvSize(0,0), CvRect* valid_pixel_ROI=0, int center_principal_point=0 )
.. ocv:pyoldfunction:: cv.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newCameraMatrix, newImageSize=(0, 0), validPixROI=0, centerPrincipalPoint=0) -> None
:param cameraMatrix: Input camera matrix.
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param imageSize: Original image size.
@@ -848,14 +959,12 @@ initCameraMatrix2D
----------------------
Finds an initial camera matrix from 3D-2D point correspondences.
.. ocv:function:: Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio=1.)
.. ocv:function:: Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, double aspectRatio=1.0 )
.. ocv:pyfunction:: cv2.initCameraMatrix2D(objectPoints, imagePoints, imageSize[, aspectRatio]) -> retval
.. ocv:cfunction:: void cvInitIntrinsicParams2D( const CvMat* object_points, const CvMat* image_points, const CvMat* npoints, CvSize image_size, CvMat* camera_matrix, double aspect_ratio=1. )
.. ocv:pyoldfunction:: cv.InitIntrinsicParams2D(objectPoints, imagePoints, npoints, imageSize, cameraMatrix, aspectRatio=1.) -> None
:param objectPoints: Vector of vectors of the calibration pattern points in the calibration pattern coordinate space. In the old interface all the per-view vectors are concatenated. See :ocv:func:`calibrateCamera` for details.
:param imagePoints: Vector of vectors of the projections of the calibration pattern points. In the old interface all the per-view vectors are concatenated.
@@ -903,8 +1012,6 @@ Projects 3D points to an image plane.
.. ocv:cfunction:: void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, const CvMat* translation_vector, const CvMat* camera_matrix, const CvMat* distortion_coeffs, CvMat* image_points, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL, double aspect_ratio=0 )
.. ocv:pyoldfunction:: cv.ProjectPoints2(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, dpdrot=None, dpdt=None, dpdf=None, dpdc=None, dpddist=None)-> None
:param objectPoints: Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or ``vector<Point3f>`` ), where N is the number of points in the view.
:param rvec: Rotation vector. See :ocv:func:`Rodrigues` for details.
@@ -913,7 +1020,7 @@ Projects 3D points to an image plane.
:param cameraMatrix: Camera matrix :math:`A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}` .
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param distCoeffs: Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 or 12 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
:param imagePoints: Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or ``vector<Point2f>`` .
@@ -948,15 +1055,13 @@ Reprojects a disparity image to 3D space.
.. ocv:cfunction:: void cvReprojectImageTo3D( const CvArr* disparityImage, CvArr* _3dImage, const CvMat* Q, int handleMissingValues=0 )
.. ocv:pyoldfunction:: cv.ReprojectImageTo3D(disparity, _3dImage, Q, handleMissingValues=0) -> None
:param disparity: Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit floating-point disparity image.
:param _3dImage: Output 3-channel floating-point image of the same size as ``disparity`` . Each element of ``_3dImage(x,y)`` contains 3D coordinates of the point ``(x,y)`` computed from the disparity map.
:param Q: :math:`4 \times 4` perspective transformation matrix that can be obtained with :ocv:func:`stereoRectify`.
:param handleMissingValues: Indicates, whether the function should handle missing values (i.e. points where the disparity was not computed). If ``handleMissingValues=true``, then pixels with the minimal disparity that corresponds to the outliers (see :ocv:funcx:`StereoBM::operator()` ) are transformed to 3D points with a very large Z value (currently set to 10000).
:param handleMissingValues: Indicates, whether the function should handle missing values (i.e. points where the disparity was not computed). If ``handleMissingValues=true``, then pixels with the minimal disparity that corresponds to the outliers (see :ocv:funcx:`StereoMatcher::compute` ) are transformed to 3D points with a very large Z value (currently set to 10000).
:param ddepth: The optional output array depth. If it is ``-1``, the output image will have ``CV_32F`` depth. ``ddepth`` can also be set to ``CV_16S``, ``CV_32S`` or ``CV_32F``.
@@ -982,7 +1087,6 @@ Computes an RQ decomposition of 3x3 matrices.
.. ocv:pyfunction:: cv2.RQDecomp3x3(src[, mtxR[, mtxQ[, Qx[, Qy[, Qz]]]]]) -> retval, mtxR, mtxQ, Qx, Qy, Qz
.. ocv:cfunction:: void cvRQDecomp3x3( const CvMat * matrixM, CvMat * matrixR, CvMat * matrixQ, CvMat * matrixQx=NULL, CvMat * matrixQy=NULL, CvMat * matrixQz=NULL, CvPoint3D64f * eulerAngles=NULL )
.. ocv:pyoldfunction:: cv.RQDecomp3x3(M, R, Q, Qx=None, Qy=None, Qz=None) -> eulerAngles
:param src: 3x3 input matrix.
@@ -1013,8 +1117,6 @@ Converts a rotation matrix to a rotation vector or vice versa.
.. ocv:cfunction:: int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian=0 )
.. ocv:pyoldfunction:: cv.Rodrigues2(src, dst, jacobian=0)-> None
:param src: Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
:param dst: Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
@@ -1039,155 +1141,78 @@ used in the global 3D geometry optimization procedures like
:ocv:func:`solvePnP` .
StereoMatcher
-------------
.. ocv:class:: StereoMatcher : public Algorithm
StereoBM
--------
.. ocv:class:: StereoBM
The base class for stereo correspondence algorithms.
Class for computing stereo correspondence using the block matching algorithm. ::
// Block matching stereo correspondence algorithm class StereoBM
{
enum { NORMALIZED_RESPONSE = CV_STEREO_BM_NORMALIZED_RESPONSE,
BASIC_PRESET=CV_STEREO_BM_BASIC,
FISH_EYE_PRESET=CV_STEREO_BM_FISH_EYE,
NARROW_PRESET=CV_STEREO_BM_NARROW };
StereoBM();
// the preset is one of ..._PRESET above.
// ndisparities is the size of disparity range,
// in which the optimal disparity at each pixel is searched for.
// SADWindowSize is the size of averaging window used to match pixel blocks
// (larger values mean better robustness to noise, but yield blurry disparity maps)
StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);
// separate initialization function
void init(int preset, int ndisparities=0, int SADWindowSize=21);
// computes the disparity for the two rectified 8-bit single-channel images.
// the disparity will be 16-bit signed (fixed-point) or 32-bit floating-point image of the same size as left.
void operator()( InputArray left, InputArray right, OutputArray disparity, int disptype=CV_16S );
Ptr<CvStereoBMState> state;
};
The class is a C++ wrapper for the associated functions. In particular, :ocv:funcx:`StereoBM::operator()` is the wrapper for
:ocv:cfunc:`cvFindStereoCorrespondenceBM`.
StereoBM::StereoBM
------------------
The constructors.
.. ocv:function:: StereoBM::StereoBM()
.. ocv:function:: StereoBM::StereoBM(int preset, int ndisparities=0, int SADWindowSize=21)
.. ocv:pyfunction:: cv2.StereoBM([preset[, ndisparities[, SADWindowSize]]]) -> <StereoBM object>
.. ocv:cfunction:: CvStereoBMState* cvCreateStereoBMState( int preset=CV_STEREO_BM_BASIC, int numberOfDisparities=0 )
.. ocv:pyoldfunction:: cv.CreateStereoBMState(preset=CV_STEREO_BM_BASIC, numberOfDisparities=0)-> CvStereoBMState
:param preset: specifies the whole set of algorithm parameters, one of:
* BASIC_PRESET - parameters suitable for general cameras
* FISH_EYE_PRESET - parameters suitable for wide-angle cameras
* NARROW_PRESET - parameters suitable for narrow-angle cameras
After constructing the class, you can override any parameters set by the preset.
:param ndisparities: the disparity search range. For each pixel algorithm will find the best disparity from 0 (default minimum disparity) to ``ndisparities``. The search range can then be shifted by changing the minimum disparity.
:param SADWindowSize: the linear size of the blocks compared by the algorithm. The size should be odd (as the block is centered at the current pixel). Larger block size implies smoother, though less accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher chance for algorithm to find a wrong correspondence.
The constructors initialize ``StereoBM`` state. You can then call ``StereoBM::operator()`` to compute disparity for a specific stereo pair.
.. note:: In the C API you need to deallocate ``CvStereoBM`` state when it is not needed anymore using ``cvReleaseStereoBMState(&stereobm)``.
StereoBM::operator()
StereoMatcher::compute
-----------------------
Computes disparity using the BM algorithm for a rectified stereo pair.
Computes disparity map for the specified stereo pair
.. ocv:function:: void StereoBM::operator()( InputArray left, InputArray right, OutputArray disparity, int disptype=CV_16S )
.. ocv:function:: void StereoMatcher::compute( InputArray left, InputArray right, OutputArray disparity )
.. ocv:pyfunction:: cv2.StereoBM.compute(left, right[, disparity[, disptype]]) -> disparity
.. ocv:cfunction:: void cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right, CvArr* disparity, CvStereoBMState* state )
.. ocv:pyoldfunction:: cv.FindStereoCorrespondenceBM(left, right, disparity, state)-> None
.. ocv:pyfunction:: cv2.StereoBM.compute(left, right[, disparity]) -> disparity
:param left: Left 8-bit single-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disparity: Output disparity map. It has the same size as the input images. When ``disptype==CV_16S``, the map is a 16-bit signed single-channel image, containing disparity values scaled by 16. To get the true disparity values from such fixed-point representation, you will need to divide each ``disp`` element by 16. If ``disptype==CV_32F``, the disparity map will already contain the real disparity values on output.
:param disparity: Output disparity map. It has the same size as the input images. Some algorithms, like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
:param disptype: Type of the output disparity map, ``CV_16S`` (default) or ``CV_32F``.
:param state: The pre-initialized ``CvStereoBMState`` structure in the case of the old API.
StereoBM
--------
.. ocv:class:: StereoBM : public StereoMatcher
The method executes the BM algorithm on a rectified stereo pair. See the ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method. Note that the method is not constant, thus you should not use the same ``StereoBM`` instance from within different threads simultaneously. The function is parallelized with the TBB library.
Class for computing stereo correspondence using the block matching algorithm, introduced and contributed to OpenCV by K. Konolige.
createStereoBM
------------------
Creates StereoBM object
.. ocv:function:: Ptr<StereoBM> createStereoBM(int numDisparities=0, int blockSize=21)
.. ocv:pyfunction:: cv2.createStereoBM([numDisparities[, blockSize]]) -> retval
:param numDisparities: the disparity search range. For each pixel algorithm will find the best disparity from 0 (default minimum disparity) to ``numDisparities``. The search range can then be shifted by changing the minimum disparity.
:param blockSize: the linear size of the blocks compared by the algorithm. The size should be odd (as the block is centered at the current pixel). Larger block size implies smoother, though less accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher chance for algorithm to find a wrong correspondence.
The function create ``StereoBM`` object. You can then call ``StereoBM::compute()`` to compute disparity for a specific stereo pair.
StereoSGBM
----------
.. ocv:class:: StereoSGBM
Class for computing stereo correspondence using the semi-global block matching algorithm. ::
class StereoSGBM
{
StereoSGBM();
StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
int P1=0, int P2=0, int disp12MaxDiff=0,
int preFilterCap=0, int uniquenessRatio=0,
int speckleWindowSize=0, int speckleRange=0,
bool fullDP=false);
virtual ~StereoSGBM();
virtual void operator()(InputArray left, InputArray right, OutputArray disp);
int minDisparity;
int numberOfDisparities;
int SADWindowSize;
int preFilterCap;
int uniquenessRatio;
int P1, P2;
int speckleWindowSize;
int speckleRange;
int disp12MaxDiff;
bool fullDP;
...
};
.. ocv:class:: StereoSGBM : public StereoMatcher
The class implements the modified H. Hirschmuller algorithm [HH08]_ that differs from the original one as follows:
* By default, the algorithm is single-pass, which means that you consider only 5 directions instead of 8. Set ``fullDP=true`` to run the full variant of the algorithm but beware that it may consume a lot of memory.
* By default, the algorithm is single-pass, which means that you consider only 5 directions instead of 8. Set ``mode=StereoSGBM::MODE_HH`` in ``createStereoSGBM`` to run the full variant of the algorithm but beware that it may consume a lot of memory.
* The algorithm matches blocks, not individual pixels. Though, setting ``SADWindowSize=1`` reduces the blocks to single pixels.
* The algorithm matches blocks, not individual pixels. Though, setting ``blockSize=1`` reduces the blocks to single pixels.
* Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi sub-pixel metric from [BT98]_ is used. Though, the color images are supported as well.
* Some pre- and post- processing steps from K. Konolige algorithm :ocv:funcx:`StereoBM::operator()` are included, for example: pre-filtering (``CV_STEREO_BM_XSOBEL`` type) and post-filtering (uniqueness check, quadratic interpolation and speckle filtering).
* Some pre- and post- processing steps from K. Konolige algorithm ``StereoBM`` are included, for example: pre-filtering (``StereoBM::PREFILTER_XSOBEL`` type) and post-filtering (uniqueness check, quadratic interpolation and speckle filtering).
StereoSGBM::StereoSGBM
createStereoSGBM
--------------------------
.. ocv:function:: StereoSGBM::StereoSGBM()
Creates StereoSGBM object
.. ocv:function:: StereoSGBM::StereoSGBM( int minDisparity, int numDisparities, int SADWindowSize, int P1=0, int P2=0, int disp12MaxDiff=0, int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, bool fullDP=false)
.. ocv:function:: Ptr<StereoSGBM> createStereoSGBM( int minDisparity, int numDisparities, int blockSize, int P1=0, int P2=0, int disp12MaxDiff=0, int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, int mode=StereoSGBM::MODE_SGBM)
.. ocv:pyfunction:: cv2.StereoSGBM([minDisparity, numDisparities, SADWindowSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, fullDP]]]]]]]]]) -> <StereoSGBM object>
Initializes ``StereoSGBM`` and sets parameters to custom values.??
.. ocv:pyfunction:: cv2.createStereoSGBM(minDisparity, numDisparities, blockSize[, P1[, P2[, disp12MaxDiff[, preFilterCap[, uniquenessRatio[, speckleWindowSize[, speckleRange[, mode]]]]]]]]) -> retval
:param minDisparity: Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
:param numDisparities: Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
:param SADWindowSize: Matched block size. It must be an odd number ``>=1`` . Normally, it should be somewhere in the ``3..11`` range.
:param blockSize: Matched block size. It must be an odd number ``>=1`` . Normally, it should be somewhere in the ``3..11`` range.
:param P1: The first parameter controlling the disparity smoothness. See below.
@@ -1201,46 +1226,24 @@ StereoSGBM::StereoSGBM
:param speckleWindowSize: Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
:param speckleRange: Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, multiple of 16. Normally, 16 or 32 is good enough.
:param speckleRange: Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough.
:param fullDP: Set it to ``true`` to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to ``false`` .
:param mode: Set it to ``StereoSGBM::MODE_HH`` to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to ``false`` .
The first constructor initializes ``StereoSGBM`` with all the default parameters. So, you only have to set ``StereoSGBM::numberOfDisparities`` at minimum. The second constructor enables you to set each parameter to a custom value.
The first constructor initializes ``StereoSGBM`` with all the default parameters. So, you only have to set ``StereoSGBM::numDisparities`` at minimum. The second constructor enables you to set each parameter to a custom value.
StereoSGBM::operator ()
-----------------------
.. ocv:function:: void StereoSGBM::operator()(InputArray left, InputArray right, OutputArray disp)
.. ocv:pyfunction:: cv2.StereoSGBM.compute(left, right[, disp]) -> disp
Computes disparity using the SGBM algorithm for a rectified stereo pair.
:param left: Left 8-bit single-channel or 3-channel image.
:param right: Right image of the same size and the same type as the left one.
:param disp: Output disparity map. It is a 16-bit signed single-channel image of the same size as the input image. It contains disparity values scaled by 16. So, to get the floating-point disparity map, you need to divide each ``disp`` element by 16.
The method executes the SGBM algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method.
.. note:: The method is not constant, so you should not use the same ``StereoSGBM`` instance from different threads simultaneously.
stereoCalibrate
-------------------
Calibrates the stereo camera.
.. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC )
.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, const CvMat* image_points2, const CvMat* npoints, CvMat* camera_matrix1, CvMat* dist_coeffs1, CvMat* camera_matrix2, CvMat* dist_coeffs2, CvSize image_size, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:pyoldfunction:: cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, term_crit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
:param objectPoints: Vector of vectors of the calibration pattern points.
:param imagePoints1: Vector of vectors of the projections of the calibration pattern points, observed by the first camera.
@@ -1249,7 +1252,7 @@ Calibrates the stereo camera.
:param cameraMatrix1: Input/output first camera matrix: :math:`\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}` , :math:`j = 0,\, 1` . If any of ``CV_CALIB_USE_INTRINSIC_GUESS`` , ``CV_CALIB_FIX_ASPECT_RATIO`` , ``CV_CALIB_FIX_INTRINSIC`` , or ``CV_CALIB_FIX_FOCAL_LENGTH`` are specified, some or all of the matrix components must be initialized. See the flags description for details.
:param distCoeffs1: Input/output vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` of 4, 5, or 8 elements. The output vector length depends on the flags.
:param distCoeffs1: Input/output vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])` of 4, 5, 8 ot 12 elements. The output vector length depends on the flags.
:param cameraMatrix2: Input/output second camera matrix. The parameter is similar to ``cameraMatrix1`` .
@@ -1287,6 +1290,10 @@ Calibrates the stereo camera.
* **CV_CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward compatibility, this extra flag should be explicitly specified to make the calibration function use the rational model and return 8 coefficients. If the flag is not set, the function computes and returns only 5 distortion coefficients.
* **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the backward compatibility, this extra flag should be explicitly specified to make the calibration function use the thin prism model and return 12 coefficients. If the flag is not set, the function computes and returns only 5 distortion coefficients.
* **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during the optimization. If ``CV_CALIB_USE_INTRINSIC_GUESS`` is set, the coefficient from the supplied ``distCoeffs`` matrix is used. Otherwise, it is set to 0.
The function estimates transformation between two cameras making a stereo pair. If you have a stereo camera where the relative position and orientation of two cameras is fixed, and if you computed poses of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2), respectively (this can be done with
:ocv:func:`solvePnP` ), then those poses definitely relate to each other. This means that, given (
:math:`R_1`,:math:`T_1` ), it should be possible to compute (
@@ -1328,8 +1335,6 @@ Computes rectification transforms for each head of a calibrated stereo camera.
.. ocv:cfunction:: void cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, CvSize image_size, const CvMat* R, const CvMat* T, CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, CvMat* Q=0, int flags=CV_CALIB_ZERO_DISPARITY, double alpha=-1, CvSize new_image_size=cvSize(0,0), CvRect* valid_pix_ROI1=0, CvRect* valid_pix_ROI2=0 )
.. ocv:pyoldfunction:: cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0)) -> (roi1, roi2)
:param cameraMatrix1: First camera matrix.
:param cameraMatrix2: Second camera matrix.
@@ -1417,8 +1422,6 @@ Computes a rectification transform for an uncalibrated stereo camera.
.. ocv:cfunction:: int cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, const CvMat* F, CvSize img_size, CvMat* H1, CvMat* H2, double threshold=5 )
.. ocv:pyoldfunction:: cv.StereoRectifyUncalibrated(points1, points2, F, imageSize, H1, H2, threshold=5)-> None
:param points1: Array of feature points in the first image.
:param points2: The corresponding points in the second image. The same formats as in :ocv:func:`findFundamentalMat` are supported.
@@ -1475,8 +1478,14 @@ The function reconstructs 3-dimensional points (in homogeneous coordinates) by u
.. [Hartley99] Hartley, R.I., Theory and Practice of Projective Rectification. IJCV 35 2, pp 115-127 (1999)
.. [HartleyZ00] Hartley, R. and Zisserman, A. Multiple View Geomtry in Computer Vision, Cambridge University Press, 2000.
.. [HH08] Hirschmuller, H. Stereo Processing by Semiglobal Matching and Mutual Information, PAMI(30), No. 2, February 2008, pp. 328-341.
.. [Nister03] Nistér, D. An efficient solution to the five-point relative pose problem, CVPR 2003.
.. [SteweniusCFS] Stewénius, H., Calibrated Fivepoint solver. http://www.vis.uky.edu/~stewe/FIVEPOINT/
.. [Slabaugh] Slabaugh, G.G. Computing Euler angles from a rotation matrix. http://gregslabaugh.name/publications/euler.pdf
.. [Zhang2000] Z. Zhang. A Flexible New Technique for Camera Calibration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.

View File

@@ -0,0 +1,416 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CALIB3D_HPP__
#define __OPENCV_CALIB3D_HPP__
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
namespace cv
{
//! type of the robust estimation algorithm
enum { LMEDS = 4, //!< least-median algorithm
RANSAC = 8 //!< RANSAC algorithm
};
enum { ITERATIVE = 0,
EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
enum { CALIB_CB_ADAPTIVE_THRESH = 1,
CALIB_CB_NORMALIZE_IMAGE = 2,
CALIB_CB_FILTER_QUADS = 4,
CALIB_CB_FAST_CHECK = 8
};
enum { CALIB_CB_SYMMETRIC_GRID = 1,
CALIB_CB_ASYMMETRIC_GRID = 2,
CALIB_CB_CLUSTERING = 4
};
enum { CALIB_USE_INTRINSIC_GUESS = 0x00001,
CALIB_FIX_ASPECT_RATIO = 0x00002,
CALIB_FIX_PRINCIPAL_POINT = 0x00004,
CALIB_ZERO_TANGENT_DIST = 0x00008,
CALIB_FIX_FOCAL_LENGTH = 0x00010,
CALIB_FIX_K1 = 0x00020,
CALIB_FIX_K2 = 0x00040,
CALIB_FIX_K3 = 0x00080,
CALIB_FIX_K4 = 0x00800,
CALIB_FIX_K5 = 0x01000,
CALIB_FIX_K6 = 0x02000,
CALIB_RATIONAL_MODEL = 0x04000,
CALIB_THIN_PRISM_MODEL = 0x08000,
CALIB_FIX_S1_S2_S3_S4 = 0x10000,
// only for stereo
CALIB_FIX_INTRINSIC = 0x00100,
CALIB_SAME_FOCAL_LENGTH = 0x00200,
// for stereo rectification
CALIB_ZERO_DISPARITY = 0x00400
};
//! the algorithm for finding fundamental matrix
enum { FM_7POINT = 1, //!< 7-point algorithm
FM_8POINT = 2, //!< 8-point algorithm
FM_LMEDS = 4, //!< least-median algorithm
FM_RANSAC = 8 //!< RANSAC algorithm
};
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
int method = 0, double ransacReprojThreshold = 3,
OutputArray mask=noArray());
//! variant of findHomography for backward compatibility
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
//! Computes RQ decomposition of 3x3 matrix
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
OutputArray Qx = noArray(),
OutputArray Qy = noArray(),
OutputArray Qz = noArray());
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
OutputArray rotMatrix, OutputArray transVect,
OutputArray rotMatrixX = noArray(),
OutputArray rotMatrixY = noArray(),
OutputArray rotMatrixZ = noArray(),
OutputArray eulerAngles =noArray() );
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
InputArray rvec2, InputArray tvec2,
OutputArray rvec3, OutputArray tvec3,
OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),
OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),
OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
InputArray rvec, InputArray tvec,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray imagePoints,
OutputArray jacobian = noArray(),
double aspectRatio = 0 );
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess = false, int flags = ITERATIVE );
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, int minInliersCount = 100,
OutputArray inliers = noArray(), int flags = ITERATIVE );
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize, double aspectRatio = 1.0 );
//! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
//! draws the checkerboard pattern (found or partly found) in the image
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
double apertureWidth, double apertureHeight,
CV_OUT double& fovx, CV_OUT double& fovy,
CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
CV_OUT double& aspectRatio );
//! finds intrinsic and extrinsic parameters of a stereo camera
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
int flags = CALIB_FIX_INTRINSIC );
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
Size imageSize, InputArray R, InputArray T,
OutputArray R1, OutputArray R2,
OutputArray P1, OutputArray P2,
OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
double alpha = -1, Size newImageSize = Size(),
CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
InputArray F, Size imgSize,
OutputArray H1, OutputArray H2,
double threshold = 5 );
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
InputArray cameraMatrix3, InputArray distCoeffs3,
InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
Size imageSize, InputArray R12, InputArray T12,
InputArray R13, InputArray T13,
OutputArray R1, OutputArray R2, OutputArray R3,
OutputArray P1, OutputArray P2, OutputArray P3,
OutputArray Q, double alpha, Size newImgSize,
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
//! returns the optimal new camera matrix
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, double alpha, Size newImgSize = Size(),
CV_OUT Rect* validPixROI = 0,
bool centerPrincipalPoint = false);
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
//! for backward compatibility
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
//! finds fundamental matrix from a set of corresponding 2D points
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
int method = FM_RANSAC,
double param1 = 3., double param2 = 0.99,
OutputArray mask = noArray() );
//! variant of findFundamentalMat for backward compatibility
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, int method = FM_RANSAC,
double param1 = 3., double param2 = 0.99 );
//! finds essential matrix from a set of corresponding 2D points using five-point algorithm
CV_EXPORTS Mat findEssentialMat( InputArray points1, InputArray points2,
double focal = 1.0, Point2d pp = Point2d(0, 0),
int method = RANSAC, double prob = 0.999,
double threshold = 1.0, OutputArray mask = noArray() );
//! decompose essential matrix to possible rotation matrix and one translation vector
CV_EXPORTS void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
//! recover relative camera pose from a set of corresponding 2D points
CV_EXPORTS int recoverPose( InputArray E, InputArray points1, InputArray points2,
OutputArray R, OutputArray t,
double focal = 1.0, Point2d pp = Point2d(0, 0),
InputOutputArray mask = noArray() );
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points, int whichImage,
InputArray F, OutputArray lines );
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
InputArray projPoints1, InputArray projPoints2,
OutputArray points4D );
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
OutputArray newPoints1, OutputArray newPoints2 );
//! filters off speckles (small regions of incorrectly computed disparity)
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
int maxSpeckleSize, double maxDiff,
InputOutputArray buf = noArray() );
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities,
int SADWindowSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDisp = 1 );
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues = false,
int ddepth = -1 );
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold = 3, double confidence = 0.99);
class CV_EXPORTS_W StereoMatcher : public Algorithm
{
public:
enum { DISP_SHIFT = 4,
DISP_SCALE = (1 << DISP_SHIFT)
};
CV_WRAP virtual void compute( InputArray left, InputArray right,
OutputArray disparity ) = 0;
CV_WRAP virtual int getMinDisparity() const = 0;
CV_WRAP virtual void setMinDisparity(int minDisparity) = 0;
CV_WRAP virtual int getNumDisparities() const = 0;
CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;
CV_WRAP virtual int getBlockSize() const = 0;
CV_WRAP virtual void setBlockSize(int blockSize) = 0;
CV_WRAP virtual int getSpeckleWindowSize() const = 0;
CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0;
CV_WRAP virtual int getSpeckleRange() const = 0;
CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0;
CV_WRAP virtual int getDisp12MaxDiff() const = 0;
CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0;
};
class CV_EXPORTS_W StereoBM : public StereoMatcher
{
public:
enum { PREFILTER_NORMALIZED_RESPONSE = 0,
PREFILTER_XSOBEL = 1
};
CV_WRAP virtual int getPreFilterType() const = 0;
CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
CV_WRAP virtual int getPreFilterSize() const = 0;
CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0;
CV_WRAP virtual int getPreFilterCap() const = 0;
CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
CV_WRAP virtual int getTextureThreshold() const = 0;
CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0;
CV_WRAP virtual int getUniquenessRatio() const = 0;
CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
CV_WRAP virtual int getSmallerBlockSize() const = 0;
CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0;
CV_WRAP virtual Rect getROI1() const = 0;
CV_WRAP virtual void setROI1(Rect roi1) = 0;
CV_WRAP virtual Rect getROI2() const = 0;
CV_WRAP virtual void setROI2(Rect roi2) = 0;
};
CV_EXPORTS_W Ptr<StereoBM> createStereoBM(int numDisparities = 0, int blockSize = 21);
class CV_EXPORTS_W StereoSGBM : public StereoMatcher
{
public:
enum { MODE_SGBM = 0,
MODE_HH = 1
};
CV_WRAP virtual int getPreFilterCap() const = 0;
CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
CV_WRAP virtual int getUniquenessRatio() const = 0;
CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
CV_WRAP virtual int getP1() const = 0;
CV_WRAP virtual void setP1(int P1) = 0;
CV_WRAP virtual int getP2() const = 0;
CV_WRAP virtual void setP2(int P2) = 0;
CV_WRAP virtual int getMode() const = 0;
CV_WRAP virtual void setMode(int mode) = 0;
};
CV_EXPORTS_W Ptr<StereoSGBM> createStereoSGBM(int minDisparity, int numDisparities, int blockSize,
int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
int preFilterCap = 0, int uniquenessRatio = 0,
int speckleWindowSize = 0, int speckleRange = 0,
int mode = StereoSGBM::MODE_SGBM);
} // cv
#endif

View File

@@ -7,11 +7,12 @@
// copy or use the software.
//
//
// License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -40,712 +41,8 @@
//
//M*/
#ifndef __OPENCV_CALIB3D_HPP__
#define __OPENCV_CALIB3D_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#ifdef __cplusplus
extern "C" {
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
typedef struct CvPOSITObject CvPOSITObject;
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
an object given its model and projection in a weak-perspective case */
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
double focal_length, CvTermCriteria criteria,
float* rotation_matrix, float* translation_vector);
/* Releases CvPOSITObject structure */
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
/* updates the number of RANSAC iterations */
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
int model_points, int max_iters );
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
/* Calculates fundamental matrix given a set of corresponding points */
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
CvMat* status CV_DEFAULT(NULL) );
/* For each input point on one of images
computes parameters of the corresponding
epipolar line on the other image */
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
int which_image,
const CvMat* fundamental_matrix,
CvMat* correspondent_lines );
/* Triangulation functions */
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CvMat* projPoints1, CvMat* projPoints2,
CvMat* points4D);
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
alpha=0 - only valid pixels will be retained in the undistorted image
alpha=1 - all the source image pixels will be retained in the undistorted image
*/
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
const CvMat* dist_coeffs,
CvSize image_size, double alpha,
CvMat* new_camera_matrix,
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pixel_ROI CV_DEFAULT(0),
int center_principal_point CV_DEFAULT(0));
/* Converts rotation vector to rotation matrix or vice versa */
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
CvMat* jacobian CV_DEFAULT(0) );
/* Finds perspective transformation between the object plane and image (view) plane */
CVAPI(int) cvFindHomography( const CvMat* src_points,
const CvMat* dst_points,
CvMat* homography,
int method CV_DEFAULT(0),
double ransacReprojThreshold CV_DEFAULT(3),
CvMat* mask CV_DEFAULT(0));
/* Computes RQ decomposition for 3x3 matrices */
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat *matrixQx CV_DEFAULT(NULL),
CvMat *matrixQy CV_DEFAULT(NULL),
CvMat *matrixQz CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes projection matrix decomposition */
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
CvMat *rotMatr, CvMat *posVect,
CvMat *rotMatrX CV_DEFAULT(NULL),
CvMat *rotMatrY CV_DEFAULT(NULL),
CvMat *rotMatrZ CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes d(AB)/dA and d(AB)/dB */
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
const CvMat* _rvec2, const CvMat* _tvec2,
CvMat* _rvec3, CvMat* _tvec3,
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
/* Projects object points to the view plane using
the specified extrinsic and intrinsic camera parameters */
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
const CvMat* translation_vector, const CvMat* camera_matrix,
const CvMat* distortion_coeffs, CvMat* image_points,
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
CvMat* dpddist CV_DEFAULT(NULL),
double aspect_ratio CV_DEFAULT(0));
/* Finds extrinsic camera parameters from
a few known corresponding point pairs and intrinsic parameters */
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
CvMat* rotation_vector,
CvMat* translation_vector,
int use_extrinsic_guess CV_DEFAULT(0) );
/* Computes initial estimate of the intrinsic camera parameters
in case of planar calibration target (e.g. chessboard) */
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
const CvMat* image_points,
const CvMat* npoints, CvSize image_size,
CvMat* camera_matrix,
double aspect_ratio CV_DEFAULT(1.) );
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
CvPoint2D32f* corners,
int count, int pattern_was_found );
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
/* Computes the transformation from one camera coordinate system to another one
from a few correspondent views of the same calibration target. Optionally, calibrates
both cameras */
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
const CvMat* image_points2, const CvMat* npoints,
CvMat* camera_matrix1, CvMat* dist_coeffs1,
CvMat* camera_matrix2, CvMat* dist_coeffs2,
CvSize image_size, CvMat* R, CvMat* T,
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
#define CV_CALIB_ZERO_DISPARITY 1024
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
views parallel (=> to make all the epipolar lines horizontal or vertical) */
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
CvSize image_size, const CvMat* R, const CvMat* T,
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
CvMat* Q CV_DEFAULT(0),
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
double alpha CV_DEFAULT(-1),
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
/* Computes rectification transformations for uncalibrated pair of images using a set
of point correspondences */
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
const CvMat* F, CvSize img_size,
CvMat* H1, CvMat* H2,
double threshold CV_DEFAULT(5));
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
/* Block matching algorithm structure */
typedef struct CvStereoBMState
{
// pre-filtering (normalization of input images)
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
int preFilterSize; // averaging window size: ~5x5..21x21
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
// correspondence using Sum of Absolute Difference (SAD)
int SADWindowSize; // ~5x5..21x21
int minDisparity; // minimum disparity (can be negative)
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
// post-filtering
int textureThreshold; // the disparity is only computed for pixels
// with textured enough neighborhood
int uniquenessRatio; // accept the computed disparity d* only if
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
// for any d != d*+/-1 within the search range.
int speckleWindowSize; // disparity variation window
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
// temporary buffers
CvMat* preFilteredImg0;
CvMat* preFilteredImg1;
CvMat* slidingSumBuf;
CvMat* cost;
CvMat* disp;
} CvStereoBMState;
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
int numberOfDisparities CV_DEFAULT(0));
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
#ifdef __cplusplus
}
//////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS CvLevMarq
{
public:
CvLevMarq();
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
~CvLevMarq();
void init( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
cv::Ptr<CvMat> J;
cv::Ptr<CvMat> err;
cv::Ptr<CvMat> JtJ;
cv::Ptr<CvMat> JtJN;
cv::Ptr<CvMat> JtErr;
cv::Ptr<CvMat> JtJV;
cv::Ptr<CvMat> JtJW;
double prevErrNorm, errNorm;
int lambdaLg10;
CvTermCriteria criteria;
int state;
int iters;
bool completeSymmFlag;
};
namespace cv
{
//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
//! type of the robust estimation algorithm
enum
{
LMEDS=CV_LMEDS, //!< least-median algorithm
RANSAC=CV_RANSAC //!< RANSAC algorithm
};
//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
int method=0, double ransacReprojThreshold=3,
OutputArray mask=noArray());
//! variant of findHomography for backward compatibility
CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
OutputArray mask, int method=0, double ransacReprojThreshold=3);
//! Computes RQ decomposition of 3x3 matrix
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
OutputArray Qx=noArray(),
OutputArray Qy=noArray(),
OutputArray Qz=noArray());
//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
OutputArray rotMatrix, OutputArray transVect,
OutputArray rotMatrixX=noArray(),
OutputArray rotMatrixY=noArray(),
OutputArray rotMatrixZ=noArray(),
OutputArray eulerAngles=noArray() );
//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
OutputArray dABdA,
OutputArray dABdB );
//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
InputArray rvec2, InputArray tvec2,
OutputArray rvec3, OutputArray tvec3,
OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
CV_EXPORTS_W void projectPoints( InputArray objectPoints,
InputArray rvec, InputArray tvec,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray imagePoints,
OutputArray jacobian=noArray(),
double aspectRatio=0 );
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
enum
{
ITERATIVE=CV_ITERATIVE,
EPNP=CV_EPNP,
P3P=CV_P3P
};
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec,
bool useExtrinsicGuess=false, int flags=ITERATIVE);
//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
InputArray imagePoints,
InputArray cameraMatrix,
InputArray distCoeffs,
OutputArray rvec,
OutputArray tvec,
bool useExtrinsicGuess = false,
int iterationsCount = 100,
float reprojectionError = 8.0,
int minInliersCount = 100,
OutputArray inliers = noArray(),
int flags = ITERATIVE);
//! initializes camera matrix from a few 3D points and the corresponding projections.
CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize, double aspectRatio=1. );
enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
//! finds checkerboard pattern of the specified size in the image
CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
OutputArray corners,
int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
//! finds subpixel-accurate positions of the chessboard corners
CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
//! draws the checkerboard pattern (found or partly found) in the image
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
CALIB_CB_CLUSTERING = 4 };
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
//! the deprecated function. Use findCirclesGrid() instead of it.
CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
enum
{
CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
CALIB_FIX_K1 = CV_CALIB_FIX_K1,
CALIB_FIX_K2 = CV_CALIB_FIX_K2,
CALIB_FIX_K3 = CV_CALIB_FIX_K3,
CALIB_FIX_K4 = CV_CALIB_FIX_K4,
CALIB_FIX_K5 = CV_CALIB_FIX_K5,
CALIB_FIX_K6 = CV_CALIB_FIX_K6,
CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
// only for stereo
CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
// for stereo rectification
CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
};
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints,
Size imageSize,
CV_OUT InputOutputArray cameraMatrix,
CV_OUT InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags=0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
Size imageSize,
double apertureWidth,
double apertureHeight,
CV_OUT double& fovx,
CV_OUT double& fovy,
CV_OUT double& focalLength,
CV_OUT Point2d& principalPoint,
CV_OUT double& aspectRatio );
//! finds intrinsic and extrinsic parameters of a stereo camera
CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1,
InputArrayOfArrays imagePoints2,
CV_OUT InputOutputArray cameraMatrix1,
CV_OUT InputOutputArray distCoeffs1,
CV_OUT InputOutputArray cameraMatrix2,
CV_OUT InputOutputArray distCoeffs2,
Size imageSize, OutputArray R,
OutputArray T, OutputArray E, OutputArray F,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
int flags=CALIB_FIX_INTRINSIC );
//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
Size imageSize, InputArray R, InputArray T,
OutputArray R1, OutputArray R2,
OutputArray P1, OutputArray P2,
OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
double alpha=-1, Size newImageSize=Size(),
CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
InputArray F, Size imgSize,
OutputArray H1, OutputArray H2,
double threshold=5 );
//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
InputArray cameraMatrix2, InputArray distCoeffs2,
InputArray cameraMatrix3, InputArray distCoeffs3,
InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
Size imageSize, InputArray R12, InputArray T12,
InputArray R13, InputArray T13,
OutputArray R1, OutputArray R2, OutputArray R3,
OutputArray P1, OutputArray P2, OutputArray P3,
OutputArray Q, double alpha, Size newImgSize,
CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
//! returns the optimal new camera matrix
CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
Size imageSize, double alpha, Size newImgSize=Size(),
CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
//! for backward compatibility
CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
//! the algorithm for finding fundamental matrix
enum
{
FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
};
//! finds fundamental matrix from a set of corresponding 2D points
CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
int method=FM_RANSAC,
double param1=3., double param2=0.99,
OutputArray mask=noArray());
//! variant of findFundamentalMat for backward compatibility
CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
OutputArray mask, int method=FM_RANSAC,
double param1=3., double param2=0.99);
//! finds coordinates of epipolar lines corresponding the specified points
CV_EXPORTS void computeCorrespondEpilines( InputArray points,
int whichImage, InputArray F,
OutputArray lines );
CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
InputArray projPoints1, InputArray projPoints2,
OutputArray points4D );
CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
OutputArray newPoints1, OutputArray newPoints2 );
template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
/*!
Block Matching Stereo Correspondence Algorithm
The class implements BM stereo correspondence algorithm by K. Konolige.
*/
class CV_EXPORTS_W StereoBM
{
public:
enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1,
BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 };
//! the default constructor
CV_WRAP StereoBM();
//! the full constructor taking the camera-specific preset, number of disparities and the SAD window size
CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);
//! the method that reinitializes the state. The previous content is destroyed
void init(int preset, int ndisparities=0, int SADWindowSize=21);
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right,
OutputArray disparity, int disptype=CV_16S );
//! pointer to the underlying CvStereoBMState
Ptr<CvStereoBMState> state;
};
/*!
Semi-Global Block Matching Stereo Correspondence Algorithm
The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification.
*/
class CV_EXPORTS_W StereoSGBM
{
public:
enum { DISP_SHIFT=4, DISP_SCALE = (1<<DISP_SHIFT) };
//! the default constructor
CV_WRAP StereoSGBM();
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
int P1=0, int P2=0, int disp12MaxDiff=0,
int preFilterCap=0, int uniquenessRatio=0,
int speckleWindowSize=0, int speckleRange=0,
bool fullDP=false);
//! the destructor
virtual ~StereoSGBM();
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
CV_WRAP_AS(compute) virtual void operator()(InputArray left, InputArray right,
OutputArray disp);
CV_PROP_RW int minDisparity;
CV_PROP_RW int numberOfDisparities;
CV_PROP_RW int SADWindowSize;
CV_PROP_RW int preFilterCap;
CV_PROP_RW int uniquenessRatio;
CV_PROP_RW int P1;
CV_PROP_RW int P2;
CV_PROP_RW int speckleWindowSize;
CV_PROP_RW int speckleRange;
CV_PROP_RW int disp12MaxDiff;
CV_PROP_RW bool fullDP;
protected:
Mat buffer;
};
//! filters off speckles (small regions of incorrectly computed disparity)
CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff,
InputOutputArray buf=noArray() );
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities,
int SADWindowSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDisp=1 );
//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
OutputArray _3dImage, InputArray Q,
bool handleMissingValues=false,
int ddepth=-1 );
CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold=3, double confidence=0.99);
}
#endif
#endif
#include "opencv2/calib3d.hpp"

View File

@@ -0,0 +1,413 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CALIB3D_C_H__
#define __OPENCV_CALIB3D_C_H__
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************************\
* Camera Calibration, Pose Estimation and Stereo *
\****************************************************************************************/
typedef struct CvPOSITObject CvPOSITObject;
/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
an object given its model and projection in a weak-perspective case */
CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
double focal_length, CvTermCriteria criteria,
float* rotation_matrix, float* translation_vector);
/* Releases CvPOSITObject structure */
CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
/* updates the number of RANSAC iterations */
CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
int model_points, int max_iters );
CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
/* Calculates fundamental matrix given a set of corresponding points */
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fundamental_matrix,
int method CV_DEFAULT(CV_FM_RANSAC),
double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
CvMat* status CV_DEFAULT(NULL) );
/* For each input point on one of images
computes parameters of the corresponding
epipolar line on the other image */
CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
int which_image,
const CvMat* fundamental_matrix,
CvMat* correspondent_lines );
/* Triangulation functions */
CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
CvMat* projPoints1, CvMat* projPoints2,
CvMat* points4D);
CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
CvMat* new_points1, CvMat* new_points2);
/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
alpha=0 - only valid pixels will be retained in the undistorted image
alpha=1 - all the source image pixels will be retained in the undistorted image
*/
CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
const CvMat* dist_coeffs,
CvSize image_size, double alpha,
CvMat* new_camera_matrix,
CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pixel_ROI CV_DEFAULT(0),
int center_principal_point CV_DEFAULT(0));
/* Converts rotation vector to rotation matrix or vice versa */
CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
CvMat* jacobian CV_DEFAULT(0) );
/* Finds perspective transformation between the object plane and image (view) plane */
CVAPI(int) cvFindHomography( const CvMat* src_points,
const CvMat* dst_points,
CvMat* homography,
int method CV_DEFAULT(0),
double ransacReprojThreshold CV_DEFAULT(3),
CvMat* mask CV_DEFAULT(0));
/* Computes RQ decomposition for 3x3 matrices */
CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
CvMat *matrixQx CV_DEFAULT(NULL),
CvMat *matrixQy CV_DEFAULT(NULL),
CvMat *matrixQz CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes projection matrix decomposition */
CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
CvMat *rotMatr, CvMat *posVect,
CvMat *rotMatrX CV_DEFAULT(NULL),
CvMat *rotMatrY CV_DEFAULT(NULL),
CvMat *rotMatrZ CV_DEFAULT(NULL),
CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
/* Computes d(AB)/dA and d(AB)/dB */
CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
const CvMat* _rvec2, const CvMat* _tvec2,
CvMat* _rvec3, CvMat* _tvec3,
CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
/* Projects object points to the view plane using
the specified extrinsic and intrinsic camera parameters */
CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
const CvMat* translation_vector, const CvMat* camera_matrix,
const CvMat* distortion_coeffs, CvMat* image_points,
CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
CvMat* dpddist CV_DEFAULT(NULL),
double aspect_ratio CV_DEFAULT(0));
/* Finds extrinsic camera parameters from
a few known corresponding point pairs and intrinsic parameters */
CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* camera_matrix,
const CvMat* distortion_coeffs,
CvMat* rotation_vector,
CvMat* translation_vector,
int use_extrinsic_guess CV_DEFAULT(0) );
/* Computes initial estimate of the intrinsic camera parameters
in case of planar calibration target (e.g. chessboard) */
CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
const CvMat* image_points,
const CvMat* npoints, CvSize image_size,
CvMat* camera_matrix,
double aspect_ratio CV_DEFAULT(1.) );
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_CB_FAST_CHECK 8
// Performs a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
/* Detects corners on a chessboard calibration pattern */
CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
CvPoint2D32f* corners,
int* corner_count CV_DEFAULT(NULL),
int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
/* Draws individual chessboard corners or the whole chessboard detected */
CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
CvPoint2D32f* corners,
int count, int pattern_was_found );
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_K4 2048
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
#define CV_CALIB_THIN_PRISM_MODEL 32768
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
/* Computes the transformation from one camera coordinate system to another one
from a few correspondent views of the same calibration target. Optionally, calibrates
both cameras */
CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
const CvMat* image_points2, const CvMat* npoints,
CvMat* camera_matrix1, CvMat* dist_coeffs1,
CvMat* camera_matrix2, CvMat* dist_coeffs2,
CvSize image_size, CvMat* R, CvMat* T,
CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
#define CV_CALIB_ZERO_DISPARITY 1024
/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
views parallel (=> to make all the epipolar lines horizontal or vertical) */
CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
CvSize image_size, const CvMat* R, const CvMat* T,
CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
CvMat* Q CV_DEFAULT(0),
int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
double alpha CV_DEFAULT(-1),
CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
CvRect* valid_pix_ROI1 CV_DEFAULT(0),
CvRect* valid_pix_ROI2 CV_DEFAULT(0));
/* Computes rectification transformations for uncalibrated pair of images using a set
of point correspondences */
CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
const CvMat* F, CvSize img_size,
CvMat* H1, CvMat* H2,
double threshold CV_DEFAULT(5));
/* stereo correspondence parameters and functions */
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_XSOBEL 1
/* Block matching algorithm structure */
typedef struct CvStereoBMState
{
// pre-filtering (normalization of input images)
int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
int preFilterSize; // averaging window size: ~5x5..21x21
int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
// correspondence using Sum of Absolute Difference (SAD)
int SADWindowSize; // ~5x5..21x21
int minDisparity; // minimum disparity (can be negative)
int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
// post-filtering
int textureThreshold; // the disparity is only computed for pixels
// with textured enough neighborhood
int uniquenessRatio; // accept the computed disparity d* only if
// SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
// for any d != d*+/-1 within the search range.
int speckleWindowSize; // disparity variation window
int speckleRange; // acceptable range of variation in window
int trySmallerWindows; // if 1, the results may be more accurate,
// at the expense of slower processing
CvRect roi1, roi2;
int disp12MaxDiff;
// temporary buffers
CvMat* preFilteredImg0;
CvMat* preFilteredImg1;
CvMat* slidingSumBuf;
CvMat* cost;
CvMat* disp;
} CvStereoBMState;
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
int numberOfDisparities CV_DEFAULT(0));
CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
CvArr* disparity, CvStereoBMState* state );
CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize );
CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
int minDisparity, int numberOfDisparities,
int disp12MaxDiff CV_DEFAULT(1) );
/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
CvArr* _3dImage, const CvMat* Q,
int handleMissingValues CV_DEFAULT(0) );
#ifdef __cplusplus
} // extern "C"
//////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS CvLevMarq
{
public:
CvLevMarq();
CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
~CvLevMarq();
void init( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
void clear();
void step();
enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
cv::Ptr<CvMat> mask;
cv::Ptr<CvMat> prevParam;
cv::Ptr<CvMat> param;
cv::Ptr<CvMat> J;
cv::Ptr<CvMat> err;
cv::Ptr<CvMat> JtJ;
cv::Ptr<CvMat> JtJN;
cv::Ptr<CvMat> JtErr;
cv::Ptr<CvMat> JtJV;
cv::Ptr<CvMat> JtJW;
double prevErrNorm, errNorm;
int lambdaLg10;
CvTermCriteria criteria;
int state;
int iters;
bool completeSymmFlag;
};
#endif
#endif /* __OPENCV_CALIB3D_C_H__ */

View File

@@ -22,7 +22,7 @@ PERF_TEST_P(String_Size, asymm_circles_grid, testing::Values(
)
)
{
String filename = getDataPath(get<0>(GetParam()));
string filename = getDataPath(get<0>(GetParam()));
Size gridSize = get<1>(GetParam());
Mat frame = imread(filename);

View File

@@ -1,5 +1,8 @@
#include "perf_precomp.hpp"
#include "opencv2/core/internal.hpp"
#ifdef HAVE_TBB
#include "tbb/task_scheduler_init.h"
#endif
using namespace std;
using namespace cv;
@@ -7,7 +10,7 @@ using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
CV_ENUM(pnpAlgo, CV_ITERATIVE, CV_EPNP /*, CV_P3P*/)
CV_ENUM(pnpAlgo, ITERATIVE, EPNP /*, P3P*/)
typedef std::tr1::tuple<int, pnpAlgo> PointsNum_Algo_t;
typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
@@ -16,8 +19,8 @@ typedef perf::TestBaseWithParam<int> PointsNum;
PERF_TEST_P(PointsNum_Algo, solvePnP,
testing::Combine(
testing::Values(4, 3*9, 7*13),
testing::Values((int)CV_ITERATIVE, (int)CV_EPNP)
testing::Values(/*4,*/ 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)ITERATIVE, (int)EPNP)
)
)
{
@@ -55,7 +58,7 @@ PERF_TEST_P(PointsNum_Algo, solvePnP,
}
SANITY_CHECK(rvec, 1e-6);
SANITY_CHECK(tvec, 1e-6);
SANITY_CHECK(tvec, 1e-3);
}
PERF_TEST(PointsNum_Algo, solveP3P)
@@ -86,17 +89,18 @@ PERF_TEST(PointsNum_Algo, solveP3P)
add(points2d, noise, points2d);
declare.in(points3d, points2d);
declare.time(100);
TEST_CYCLE_N(1000)
{
solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, CV_P3P);
solvePnP(points3d, points2d, intrinsics, distortion, rvec, tvec, false, P3P);
}
SANITY_CHECK(rvec, 1e-6);
SANITY_CHECK(tvec, 1e-6);
}
PERF_TEST_P(PointsNum, SolvePnPRansac, testing::Values(4, 3*9, 7*13))
PERF_TEST_P(PointsNum, DISABLED_SolvePnPRansac, testing::Values(4, 3*9, 7*13))
{
int count = GetParam();

View File

@@ -1,15 +1,18 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# pragma GCC diagnostic ignored "-Wmissing-prototypes" //OSX
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts/ts.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/ts.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined

View File

@@ -40,49 +40,27 @@
//
//M*/
#ifndef __OPENCV_SIMPLEFLOW_H__
#define __OPENCV_SIMPLEFLOW_H__
#include "precomp.hpp"
#include <vector>
using namespace cv;
using namespace std;
//////////////////////////////////////////////////////////////////////////////////////////////////////////
#define MASK_TRUE_VALUE 255
#define UNKNOWN_FLOW_THRESH 1e9
namespace cv {
//////////////////////////////////////////////////////////////////////////////////////////////////////////
inline static float dist(const Vec3b& p1, const Vec3b& p2) {
return (float)((p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]) +
(p1[2] - p2[2]) * (p1[2] - p2[2]));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
bool cv::initModule_calib3d(void)
{
bool all = true;
all &= !RANSACPointSetRegistrator_info_auto.name().empty();
all &= !LMeDSPointSetRegistrator_info_auto.name().empty();
all &= !LMSolverImpl_info_auto.name().empty();
return all;
}
inline static float dist(const Vec2f& p1, const Vec2f& p2) {
return (p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]);
}
inline static float dist(const Point2f& p1, const Point2f& p2) {
return (p1.x - p2.x) * (p1.x - p2.x) +
(p1.y - p2.y) * (p1.y - p2.y);
}
inline static float dist(float x1, float y1, float x2, float y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
inline static int dist(int x1, int y1, int x2, int y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
template<class T>
inline static T min(T t1, T t2, T t3) {
return (t1 <= t2 && t1 <= t3) ? t1 : min(t2, t3);
}
}
#endif

View File

@@ -60,6 +60,8 @@
\************************************************************************************/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
#include "circlesgrid.hpp"
#include <stdarg.h>
@@ -69,7 +71,7 @@
#ifdef DEBUG_CHESSBOARD
# include "opencv2/opencv_modules.hpp"
# ifdef HAVE_OPENCV_HIGHGUI
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/highgui.hpp"
# else
# undef DEBUG_CHESSBOARD
# endif
@@ -1095,7 +1097,7 @@ icvOrderQuad(CvCBQuad *quad, CvCBCorner *corner, int common)
static int
icvCleanFoundConnectedQuads( int quad_count, CvCBQuad **quad_group, CvSize pattern_size )
{
CvPoint2D32f center = {0,0};
CvPoint2D32f center;
int i, j, k;
// number of quads this pattern should contain
int count = ((pattern_size.width + 1)*(pattern_size.height + 1) + 1)/2;
@@ -1111,7 +1113,7 @@ icvCleanFoundConnectedQuads( int quad_count, CvCBQuad **quad_group, CvSize patte
for( i = 0; i < quad_count; i++ )
{
CvPoint2D32f ci = {0,0};
CvPoint2D32f ci;
CvCBQuad* q = quad_group[i];
for( j = 0; j < 4; j++ )
@@ -1833,7 +1835,7 @@ cvDrawChessboardCorners( CvArr* _image, CvSize pattern_size,
if( !found )
{
CvScalar color = {{0,0,255}};
CvScalar color(0,0,255,0);
if( cn == 1 )
color = cvScalarAll(200);
color.val[0] *= scale;
@@ -1856,17 +1858,17 @@ cvDrawChessboardCorners( CvArr* _image, CvSize pattern_size,
else
{
int x, y;
CvPoint prev_pt = {0, 0};
CvPoint prev_pt;
const int line_max = 7;
static const CvScalar line_colors[line_max] =
{
{{0,0,255}},
{{0,128,255}},
{{0,200,200}},
{{0,255,0}},
{{200,200,0}},
{{255,0,0}},
{{255,0,255}}
CvScalar(0,0,255),
CvScalar(0,128,255),
CvScalar(0,200,200),
CvScalar(0,255,0),
CvScalar(200,200,0),
CvScalar(255,0,0),
CvScalar(255,0,255)
};
for( y = 0, i = 0; y < pattern_size.height; y++ )
@@ -1903,7 +1905,7 @@ bool cv::findChessboardCorners( InputArray _image, Size patternSize,
OutputArray corners, int flags )
{
int count = patternSize.area()*2;
vector<Point2f> tmpcorners(count+1);
std::vector<Point2f> tmpcorners(count+1);
Mat image = _image.getMat(); CvMat c_image = image;
bool ok = cvFindChessboardCorners(&c_image, patternSize,
(CvPoint2D32f*)&tmpcorners[0], &count, flags ) > 0;
@@ -1949,11 +1951,11 @@ bool cv::findCirclesGrid( InputArray _image, Size patternSize,
CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);
Mat image = _image.getMat();
vector<Point2f> centers;
std::vector<Point2f> centers;
vector<KeyPoint> keypoints;
std::vector<KeyPoint> keypoints;
blobDetector->detect(image, keypoints);
vector<Point2f> points;
std::vector<Point2f> points;
for (size_t i = 0; i < keypoints.size(); i++)
{
points.push_back (keypoints[i].pt);

View File

@@ -41,6 +41,8 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
#include <stdio.h>
#include <iterator>
@@ -55,247 +57,6 @@
using namespace cv;
CvLevMarq::CvLevMarq()
{
mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = Ptr<CvMat>();
lambdaLg10 = 0; state = DONE;
criteria = cvTermCriteria(0,0,0);
iters = 0;
completeSymmFlag = false;
}
CvLevMarq::CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
{
mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = Ptr<CvMat>();
init(nparams, nerrs, criteria0, _completeSymmFlag);
}
void CvLevMarq::clear()
{
mask.release();
prevParam.release();
param.release();
J.release();
err.release();
JtJ.release();
JtJN.release();
JtErr.release();
JtJV.release();
JtJW.release();
}
CvLevMarq::~CvLevMarq()
{
clear();
}
void CvLevMarq::init( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
{
if( !param || param->rows != nparams || nerrs != (err ? err->rows : 0) )
clear();
mask = cvCreateMat( nparams, 1, CV_8U );
cvSet(mask, cvScalarAll(1));
prevParam = cvCreateMat( nparams, 1, CV_64F );
param = cvCreateMat( nparams, 1, CV_64F );
JtJ = cvCreateMat( nparams, nparams, CV_64F );
JtJN = cvCreateMat( nparams, nparams, CV_64F );
JtJV = cvCreateMat( nparams, nparams, CV_64F );
JtJW = cvCreateMat( nparams, 1, CV_64F );
JtErr = cvCreateMat( nparams, 1, CV_64F );
if( nerrs > 0 )
{
J = cvCreateMat( nerrs, nparams, CV_64F );
err = cvCreateMat( nerrs, 1, CV_64F );
}
prevErrNorm = DBL_MAX;
lambdaLg10 = -3;
criteria = criteria0;
if( criteria.type & CV_TERMCRIT_ITER )
criteria.max_iter = MIN(MAX(criteria.max_iter,1),1000);
else
criteria.max_iter = 30;
if( criteria.type & CV_TERMCRIT_EPS )
criteria.epsilon = MAX(criteria.epsilon, 0);
else
criteria.epsilon = DBL_EPSILON;
state = STARTED;
iters = 0;
completeSymmFlag = _completeSymmFlag;
}
bool CvLevMarq::update( const CvMat*& _param, CvMat*& matJ, CvMat*& _err )
{
double change;
matJ = _err = 0;
assert( !err.empty() );
if( state == DONE )
{
_param = param;
return false;
}
if( state == STARTED )
{
_param = param;
cvZero( J );
cvZero( err );
matJ = J;
_err = err;
state = CALC_J;
return true;
}
if( state == CALC_J )
{
cvMulTransposed( J, JtJ, 1 );
cvGEMM( J, err, 1, 0, 0, JtErr, CV_GEMM_A_T );
cvCopy( param, prevParam );
step();
if( iters == 0 )
prevErrNorm = cvNorm(err, 0, CV_L2);
_param = param;
cvZero( err );
_err = err;
state = CHECK_ERR;
return true;
}
assert( state == CHECK_ERR );
errNorm = cvNorm( err, 0, CV_L2 );
if( errNorm > prevErrNorm )
{
if( ++lambdaLg10 <= 16 )
{
step();
_param = param;
cvZero( err );
_err = err;
state = CHECK_ERR;
return true;
}
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
if( ++iters >= criteria.max_iter ||
(change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )
{
_param = param;
state = DONE;
return true;
}
prevErrNorm = errNorm;
_param = param;
cvZero(J);
matJ = J;
_err = err;
state = CALC_J;
return true;
}
bool CvLevMarq::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm )
{
double change;
CV_Assert( err.empty() );
if( state == DONE )
{
_param = param;
return false;
}
if( state == STARTED )
{
_param = param;
cvZero( JtJ );
cvZero( JtErr );
errNorm = 0;
_JtJ = JtJ;
_JtErr = JtErr;
_errNorm = &errNorm;
state = CALC_J;
return true;
}
if( state == CALC_J )
{
cvCopy( param, prevParam );
step();
_param = param;
prevErrNorm = errNorm;
errNorm = 0;
_errNorm = &errNorm;
state = CHECK_ERR;
return true;
}
assert( state == CHECK_ERR );
if( errNorm > prevErrNorm )
{
if( ++lambdaLg10 <= 16 )
{
step();
_param = param;
errNorm = 0;
_errNorm = &errNorm;
state = CHECK_ERR;
return true;
}
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
if( ++iters >= criteria.max_iter ||
(change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )
{
_param = param;
state = DONE;
return false;
}
prevErrNorm = errNorm;
cvZero( JtJ );
cvZero( JtErr );
_param = param;
_JtJ = JtJ;
_JtErr = JtErr;
state = CALC_J;
return true;
}
void CvLevMarq::step()
{
const double LOG10 = log(10.);
double lambda = exp(lambdaLg10*LOG10);
int i, j, nparams = param->rows;
for( i = 0; i < nparams; i++ )
if( mask->data.ptr[i] == 0 )
{
double *row = JtJ->data.db + i*nparams, *col = JtJ->data.db + i;
for( j = 0; j < nparams; j++ )
row[j] = col[j*nparams] = 0;
JtErr->data.db[i] = 0;
}
if( !err )
cvCompleteSymm( JtJ, completeSymmFlag );
#if 1
cvCopy( JtJ, JtJN );
for( i = 0; i < nparams; i++ )
JtJN->data.db[(nparams+1)*i] *= 1. + lambda;
#else
cvSetIdentity(JtJN, cvRealScalar(lambda));
cvAdd( JtJ, JtJN, JtJN );
#endif
cvSVD( JtJN, JtJW, 0, JtJV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
cvSVBkSb( JtJW, JtJV, JtJV, JtErr, param, CV_SVD_U_T + CV_SVD_V_T );
for( i = 0; i < nparams; i++ )
param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? param->data.db[i] : 0);
}
// reimplementation of dAB.m
CV_IMPL void cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB )
{
@@ -412,7 +173,7 @@ CV_IMPL void cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
cvRodrigues2( &r1, &R1, &dR1dr1 );
cvRodrigues2( &r2, &R2, &dR2dr2 );
if( _rvec3 || dr3dr1 || dr3dr1 )
if( _rvec3 || dr3dr1 || dr3dr2 )
{
double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3];
double _W1[9*3], _W2[3*3];
@@ -546,7 +307,7 @@ CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
ry = src->data.db[step];
rz = src->data.db[step*2];
}
theta = sqrt(rx*rx + ry*ry + rz*rz);
theta = std::sqrt(rx*rx + ry*ry + rz*rz);
if( theta < DBL_EPSILON )
{
@@ -632,7 +393,7 @@ CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
ry = R[2] - R[6];
rz = R[3] - R[1];
s = sqrt((rx*rx + ry*ry + rz*rz)*0.25);
s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
c = (R[0] + R[4] + R[8] - 1)*0.5;
c = c > 1. ? 1. : c < -1. ? -1. : c;
theta = acos(c);
@@ -646,14 +407,14 @@ CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
else
{
t = (R[0] + 1)*0.5;
rx = sqrt(MAX(t,0.));
rx = std::sqrt(MAX(t,0.));
t = (R[4] + 1)*0.5;
ry = sqrt(MAX(t,0.))*(R[1] < 0 ? -1. : 1.);
ry = std::sqrt(MAX(t,0.))*(R[1] < 0 ? -1. : 1.);
t = (R[8] + 1)*0.5;
rz = sqrt(MAX(t,0.))*(R[2] < 0 ? -1. : 1.);
rz = std::sqrt(MAX(t,0.))*(R[2] < 0 ? -1. : 1.);
if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R[5] > 0) != (ry*rz > 0) )
rz = -rz;
theta /= sqrt(rx*rx + ry*ry + rz*rz);
theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
rx *= theta;
ry *= theta;
rz *= theta;
@@ -762,7 +523,7 @@ CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
}
static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector";
static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12 or 12x1 floating-point vector";
CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
const CvMat* r_vec,
@@ -781,7 +542,7 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
int calc_derivatives;
const CvPoint3D64f* M;
CvPoint2D64f* m;
double r[3], R[9], dRdr[27], t[3], a[9], k[8] = {0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
double r[3], R[9], dRdr[27], t[3], a[9], k[12] = {0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;
@@ -884,7 +645,8 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
(distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
(distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8) )
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8 &&
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 12) )
CV_Error( CV_StsBadArg, cvDistCoeffErr );
_k = cvMat( distCoeffs->rows, distCoeffs->cols,
@@ -966,8 +728,8 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
{
if( !CV_IS_MAT(dpdk) ||
(CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||
dpdk->rows != count*2 || (dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
CV_Error( CV_StsBadArg, "dp/df must be 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
dpdk->rows != count*2 || (dpdk->cols != 12 && dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
CV_Error( CV_StsBadArg, "dp/df must be 2Nx12, 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
if( !distCoeffs )
CV_Error( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );
@@ -1004,8 +766,8 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
a3 = r2 + 2*y*y;
cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2;
yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1;
xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4;
yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4;
m[i].x = xd*fx + cx;
m[i].y = yd*fy + cy;
@@ -1062,6 +824,17 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
dpdk_p[dpdk_step+6] = fy*y*cdist*(-icdist2)*icdist2*r4;
dpdk_p[7] = fx*x*icdist2*cdist*(-icdist2)*icdist2*r6;
dpdk_p[dpdk_step+7] = fy*y*cdist*(-icdist2)*icdist2*r6;
if( _dpdk->cols > 8 )
{
dpdk_p[8] = fx*r2; //s1
dpdk_p[9] = fx*r4; //s2
dpdk_p[10] = 0;//s3
dpdk_p[11] = 0;//s4
dpdk_p[dpdk_step+8] = 0; //s1
dpdk_p[dpdk_step+9] = 0; //s2
dpdk_p[dpdk_step+10] = fy*r2; //s3
dpdk_p[dpdk_step+11] = fy*r4; //s4
}
}
}
}
@@ -1078,9 +851,9 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt);
double da1dt = 2*(x*dydt[j] + y*dxdt[j]);
double dmxdt = fx*(dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt +
k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]));
k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]) + k[8]*dr2dt + 2*r2*k[9]*dr2dt);
double dmydt = fy*(dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt +
k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt);
k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt + k[10]*dr2dt + 2*r2*k[11]*dr2dt);
dpdt_p[j] = dmxdt;
dpdt_p[dpdt_step+j] = dmydt;
}
@@ -1116,9 +889,9 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
double dicdist2_dr = -icdist2*icdist2*(k[5]*dr2dr + 2*k[6]*r2*dr2dr + 3*k[7]*r4*dr2dr);
double da1dr = 2*(x*dydr + y*dxdr);
double dmxdr = fx*(dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr +
k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr));
k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr) + k[8]*dr2dr + 2*r2*k[9]*dr2dr);
double dmydr = fy*(dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr +
k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr);
k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr + k[10]*dr2dr + 2*r2*k[11]*dr2dr);
dpdr_p[j] = dmxdr;
dpdr_p[dpdr_step+j] = dmydr;
}
@@ -1249,8 +1022,8 @@ CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
cvGetCol( &matH, &_h1, 0 );
_h2 = _h1; _h2.data.db++;
_h3 = _h2; _h3.data.db++;
h1_norm = sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
h2_norm = sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);
h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);
cvScale( &_h1, &_h1, 1./MAX(h1_norm, DBL_EPSILON) );
cvScale( &_h2, &_h2, 1./MAX(h2_norm, DBL_EPSILON) );
@@ -1424,7 +1197,7 @@ CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
}
for( j = 0; j < 4; j++ )
n[j] = 1./sqrt(n[j]);
n[j] = 1./std::sqrt(n[j]);
for( j = 0; j < 3; j++ )
{
@@ -1438,8 +1211,8 @@ CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
}
cvSolve( matA, _b, &_f, CV_NORMAL + CV_SVD );
a[0] = sqrt(fabs(1./f[0]));
a[4] = sqrt(fabs(1./f[1]));
a[0] = std::sqrt(fabs(1./f[0]));
a[4] = std::sqrt(fabs(1./f[1]));
if( aspectRatio != 0 )
{
double tf = (a[0] + a[4])/(aspectRatio + 1.);
@@ -1458,12 +1231,12 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
CvMat* rvecs, CvMat* tvecs, int flags, CvTermCriteria termCrit )
{
const int NINTRINSIC = 12;
const int NINTRINSIC = 16;
Ptr<CvMat> matM, _m, _Ji, _Je, _err;
CvLevMarq solver;
double reprojErr = 0;
double A[9], k[8] = {0,0,0,0,0,0,0,0};
double A[9], k[12] = {0,0,0,0,0,0,0,0,0,0,0,0};
CvMat matA = cvMat(3, 3, CV_64F, A), _k;
int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
double aspectRatio = 0.;
@@ -1480,6 +1253,9 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
(npoints->rows != 1 && npoints->cols != 1) )
CV_Error( CV_StsUnsupportedFormat,
"the array of point counters must be 1-dimensional integer vector" );
//when the thin prism model is used the distortion coefficients matrix must have 12 parameters
if((flags & CV_CALIB_THIN_PRISM_MODEL) && (distCoeffs->cols*distCoeffs->rows != 12))
CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
nimages = npoints->rows*npoints->cols;
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
@@ -1517,7 +1293,8 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
(distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
(distCoeffs->cols*distCoeffs->rows != 4 &&
distCoeffs->cols*distCoeffs->rows != 5 &&
distCoeffs->cols*distCoeffs->rows != 8) )
distCoeffs->cols*distCoeffs->rows != 8 &&
distCoeffs->cols*distCoeffs->rows != 12) )
CV_Error( CV_StsBadArg, cvDistCoeffErr );
for( i = 0; i < nimages; i++ )
@@ -1613,6 +1390,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
param[0] = A[0]; param[1] = A[4]; param[2] = A[2]; param[3] = A[5];
param[4] = k[0]; param[5] = k[1]; param[6] = k[2]; param[7] = k[3];
param[8] = k[4]; param[9] = k[5]; param[10] = k[6]; param[11] = k[7];
param[12] = k[8]; param[13] = k[9]; param[14] = k[10]; param[15] = k[11];
if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
mask[0] = mask[1] = 0;
@@ -1637,6 +1415,16 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
mask[10] = 0;
if( flags & CV_CALIB_FIX_K6 )
mask[11] = 0;
if(!(flags & CV_CALIB_THIN_PRISM_MODEL))
flags |= CALIB_FIX_S1_S2_S3_S4;
if(flags & CALIB_FIX_S1_S2_S3_S4)
{
mask[12] = 0;
mask[13] = 0;
mask[14] = 0;
mask[15] = 0;
}
}
// 2. initialize extrinsic parameters
@@ -1672,6 +1460,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
A[0] = param[0]; A[4] = param[1]; A[2] = param[2]; A[5] = param[3];
k[0] = param[4]; k[1] = param[5]; k[2] = param[6]; k[3] = param[7];
k[4] = param[8]; k[5] = param[9]; k[6] = param[10]; k[7] = param[11];
k[8] = param[12];k[9] = param[13];k[10] = param[14];k[11] = param[15];
if( !proceed )
break;
@@ -1699,7 +1488,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
if( _JtJ || _JtErr )
{
cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
(flags & CV_CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
(flags & CV_CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
(flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
@@ -2721,7 +2510,7 @@ CV_IMPL int cvStereoRectifyUncalibrated(
cvMatMul( &T, &E2, &E2 );
int mirror = e2[0] < 0;
double d = MAX(sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);
double d = MAX(std::sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);
double alpha = e2[0]/d;
double beta = e2[1]/d;
double r[] =
@@ -2778,17 +2567,13 @@ CV_IMPL int cvStereoRectifyUncalibrated(
cvPerspectiveTransform( _m1, _m1, &H0 );
cvPerspectiveTransform( _m2, _m2, &H2 );
CvMat A = cvMat( 1, npoints, CV_64FC3, lines1 ), BxBy, B;
double a[9], atb[3], x[3];
CvMat AtA = cvMat( 3, 3, CV_64F, a );
CvMat AtB = cvMat( 3, 1, CV_64F, atb );
double x[3];
CvMat X = cvMat( 3, 1, CV_64F, x );
cvConvertPointsHomogeneous( _m1, &A );
cvReshape( &A, &A, 1, npoints );
cvReshape( _m2, &BxBy, 1, npoints );
cvGetCol( &BxBy, &B, 0 );
cvGEMM( &A, &A, 1, 0, 0, &AtA, CV_GEMM_A_T );
cvGEMM( &A, &B, 1, 0, 0, &AtB, CV_GEMM_A_T );
cvSolve( &AtA, &AtB, &X, CV_SVD_SYM );
cvSolve( &A, &B, &X, CV_SVD );
double ha[] =
{
@@ -2845,7 +2630,7 @@ void cv::reprojectImageTo3D( InputArray _disparity,
int x, cols = disparity.cols;
CV_Assert( cols >= 0 );
vector<float> _sbuf(cols+1), _dbuf(cols*3+1);
std::vector<float> _sbuf(cols+1), _dbuf(cols*3+1);
float* sbuf = &_sbuf[0], *dbuf = &_dbuf[0];
double minDisparity = FLT_MAX;
@@ -2905,7 +2690,7 @@ void cv::reprojectImageTo3D( InputArray _disparity,
for( x = 0; x < cols*3; x++ )
{
int ival = cvRound(dptr[x]);
dptr0[x] = CV_CAST_16S(ival);
dptr0[x] = cv::saturate_cast<short>(ival);
}
}
else if( dtype == CV_32SC3 )
@@ -2962,7 +2747,7 @@ cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
*/
s = matM[2][1];
c = matM[2][2];
z = 1./sqrt(c * c + s * s + DBL_EPSILON);
z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
c *= z;
s *= z;
@@ -2981,7 +2766,7 @@ cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
*/
s = -matR[2][0];
c = matR[2][2];
z = 1./sqrt(c * c + s * s + DBL_EPSILON);
z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
c *= z;
s *= z;
@@ -3001,7 +2786,7 @@ cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
s = matM[1][0];
c = matM[1][1];
z = 1./sqrt(c * c + s * s + DBL_EPSILON);
z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
c *= z;
s *= z;
@@ -3220,13 +3005,14 @@ static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
{
Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 8) : Size(8, 1), rtype);
Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 12) : Size(12, 1), rtype);
if( distCoeffs0.size() == Size(1, 4) ||
distCoeffs0.size() == Size(1, 5) ||
distCoeffs0.size() == Size(1, 8) ||
distCoeffs0.size() == Size(4, 1) ||
distCoeffs0.size() == Size(5, 1) ||
distCoeffs0.size() == Size(8, 1) )
distCoeffs0.size() == Size(8, 1) ||
distCoeffs0.size() == Size(12, 1) )
{
Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
distCoeffs0.convertTo(dstCoeffs, rtype);
@@ -3364,7 +3150,11 @@ void cv::projectPoints( InputArray _opoints,
CvMat c_cameraMatrix = cameraMatrix;
CvMat c_rvec = rvec, c_tvec = tvec;
double dc0buf[5]={0};
Mat dc0(5,1,CV_64F,dc0buf);
Mat distCoeffs = _distCoeffs.getMat();
if( distCoeffs.empty() )
distCoeffs = dc0;
CvMat c_distCoeffs = distCoeffs;
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
@@ -3379,8 +3169,7 @@ void cv::projectPoints( InputArray _opoints,
pdpddist = &(dpddist = jacobian.colRange(10, 10+ndistCoeffs));
}
cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix,
(distCoeffs.empty())? 0: &c_distCoeffs,
cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs,
&c_imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio );
}
@@ -3408,7 +3197,7 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
Mat distCoeffs = _distCoeffs.getMat();
distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
if( !(flags & CALIB_RATIONAL_MODEL) )
if( !(flags & CALIB_RATIONAL_MODEL) &&(!(flags & CALIB_THIN_PRISM_MODEL)))
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
int i;
@@ -3484,7 +3273,7 @@ double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);
distCoeffs2 = prepareDistCoeffs(distCoeffs2, rtype);
if( !(flags & CALIB_RATIONAL_MODEL) )
if( !(flags & CALIB_RATIONAL_MODEL) &&(!(flags & CALIB_THIN_PRISM_MODEL)))
{
distCoeffs1 = distCoeffs1.rows == 1 ? distCoeffs1.colRange(0, 5) : distCoeffs1.rowRange(0, 5);
distCoeffs2 = distCoeffs2.rows == 1 ? distCoeffs2.colRange(0, 5) : distCoeffs2.rowRange(0, 5);
@@ -3684,7 +3473,7 @@ static void adjust3rdMatrix(InputArrayOfArrays _imgpt1_0,
const Mat& R1, const Mat& R3, const Mat& P1, Mat& P3 )
{
size_t n1 = _imgpt1_0.total(), n3 = _imgpt3_0.total();
vector<Point2f> imgpt1, imgpt3;
std::vector<Point2f> imgpt1, imgpt3;
for( int i = 0; i < (int)std::min(n1, n3); i++ )
{
@@ -3739,13 +3528,13 @@ float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3,
OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3,
OutputArray _Qmat,
double alpha, Size /*newImgSize*/,
double alpha, Size newImgSize,
Rect* roi1, Rect* roi2, int flags )
{
// first, rectify the 1-2 stereo pair
stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2,
imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat,
flags, alpha, imageSize, roi1, roi2 );
flags, alpha, newImgSize, roi1, roi2 );
Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat();

View File

@@ -40,6 +40,8 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
#include <vector>
#include <algorithm>
@@ -49,7 +51,7 @@
#if defined(DEBUG_WINDOWS)
# include "opencv2/opencv_modules.hpp"
# ifdef HAVE_OPENCV_HIGHGUI
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/highgui.hpp"
# else
# undef DEBUG_WINDOWS
# endif

View File

@@ -46,17 +46,16 @@
#ifdef DEBUG_CIRCLES
# include "opencv2/opencv_modules.hpp"
# ifdef HAVE_OPENCV_HIGHGUI
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/highgui.hpp"
# else
# undef DEBUG_CIRCLES
# endif
#endif
using namespace cv;
using namespace std;
#ifdef DEBUG_CIRCLES
void drawPoints(const vector<Point2f> &points, Mat &outImage, int radius = 2, Scalar color = Scalar::all(255), int thickness = -1)
void drawPoints(const std::vector<Point2f> &points, Mat &outImage, int radius = 2, Scalar color = Scalar::all(255), int thickness = -1)
{
for(size_t i=0; i<points.size(); i++)
{
@@ -65,7 +64,7 @@ void drawPoints(const vector<Point2f> &points, Mat &outImage, int radius = 2, S
}
#endif
void CirclesGridClusterFinder::hierarchicalClustering(const vector<Point2f> points, const Size &patternSz, vector<Point2f> &patternPoints)
void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f> points, const Size &patternSz, std::vector<Point2f> &patternPoints)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
if(tegra::hierarchicalClustering(points, patternSz, patternPoints))
@@ -96,7 +95,7 @@ void CirclesGridClusterFinder::hierarchicalClustering(const vector<Point2f> poin
}
}
vector<std::list<size_t> > clusters(points.size());
std::vector<std::list<size_t> > clusters(points.size());
for(size_t i=0; i<points.size(); i++)
{
clusters[i].push_back(i);
@@ -134,7 +133,7 @@ void CirclesGridClusterFinder::hierarchicalClustering(const vector<Point2f> poin
}
}
void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, cv::Size _patternSize, vector<Point2f>& centers)
void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, cv::Size _patternSize, std::vector<Point2f>& centers)
{
patternSize = _patternSize;
centers.clear();
@@ -143,7 +142,7 @@ void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, c
return;
}
vector<Point2f> patternPoints;
std::vector<Point2f> patternPoints;
hierarchicalClustering(points, patternSize, patternPoints);
if(patternPoints.empty())
{
@@ -156,18 +155,18 @@ void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, c
imshow("pattern points", patternPointsImage);
#endif
vector<Point2f> hull2f;
std::vector<Point2f> hull2f;
convexHull(Mat(patternPoints), hull2f, false);
const size_t cornersCount = isAsymmetricGrid ? 6 : 4;
if(hull2f.size() < cornersCount)
return;
vector<Point2f> corners;
std::vector<Point2f> corners;
findCorners(hull2f, corners);
if(corners.size() != cornersCount)
return;
vector<Point2f> outsideCorners, sortedCorners;
std::vector<Point2f> outsideCorners, sortedCorners;
if(isAsymmetricGrid)
{
findOutsideCorners(corners, outsideCorners);
@@ -179,7 +178,7 @@ void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, c
if(sortedCorners.size() != cornersCount)
return;
vector<Point2f> rectifiedPatternPoints;
std::vector<Point2f> rectifiedPatternPoints;
rectifyPatternPoints(patternPoints, sortedCorners, rectifiedPatternPoints);
if(patternPoints.size() != rectifiedPatternPoints.size())
return;
@@ -190,7 +189,7 @@ void CirclesGridClusterFinder::findGrid(const std::vector<cv::Point2f> points, c
void CirclesGridClusterFinder::findCorners(const std::vector<cv::Point2f> &hull2f, std::vector<cv::Point2f> &corners)
{
//find angles (cosines) of vertices in convex hull
vector<float> angles;
std::vector<float> angles;
for(size_t i=0; i<hull2f.size(); i++)
{
Point2f vec1 = hull2f[(i+1) % hull2f.size()] - hull2f[i % hull2f.size()];
@@ -203,12 +202,12 @@ void CirclesGridClusterFinder::findCorners(const std::vector<cv::Point2f> &hull2
//corners are the most sharp angles (6)
Mat anglesMat = Mat(angles);
Mat sortedIndices;
sortIdx(anglesMat, sortedIndices, CV_SORT_EVERY_COLUMN + CV_SORT_DESCENDING);
sortIdx(anglesMat, sortedIndices, SORT_EVERY_COLUMN + SORT_DESCENDING);
CV_Assert(sortedIndices.type() == CV_32SC1);
CV_Assert(sortedIndices.cols == 1);
const int cornersCount = isAsymmetricGrid ? 6 : 4;
Mat cornersIndices;
cv::sort(sortedIndices.rowRange(0, cornersCount), cornersIndices, CV_SORT_EVERY_COLUMN + CV_SORT_ASCENDING);
cv::sort(sortedIndices.rowRange(0, cornersCount), cornersIndices, SORT_EVERY_COLUMN + SORT_ASCENDING);
corners.clear();
for(int i=0; i<cornersCount; i++)
{
@@ -228,7 +227,7 @@ void CirclesGridClusterFinder::findOutsideCorners(const std::vector<cv::Point2f>
imshow("corners", cornersImage);
#endif
vector<Point2f> tangentVectors(corners.size());
std::vector<Point2f> tangentVectors(corners.size());
for(size_t k=0; k<corners.size(); k++)
{
Point2f diff = corners[(k + 1) % corners.size()] - corners[k];
@@ -299,7 +298,7 @@ void CirclesGridClusterFinder::getSortedCorners(const std::vector<cv::Point2f> &
Point2f center = std::accumulate(corners.begin(), corners.end(), Point2f(0.0f, 0.0f));
center *= 1.0 / corners.size();
vector<Point2f> centerToCorners;
std::vector<Point2f> centerToCorners;
for(size_t i=0; i<outsideCorners.size(); i++)
{
centerToCorners.push_back(outsideCorners[i] - center);
@@ -318,17 +317,17 @@ void CirclesGridClusterFinder::getSortedCorners(const std::vector<cv::Point2f> &
std::vector<Point2f>::const_iterator firstCornerIterator = std::find(hull2f.begin(), hull2f.end(), firstCorner);
sortedCorners.clear();
for(vector<Point2f>::const_iterator it = firstCornerIterator; it != hull2f.end(); it++)
for(std::vector<Point2f>::const_iterator it = firstCornerIterator; it != hull2f.end(); it++)
{
vector<Point2f>::const_iterator itCorners = std::find(corners.begin(), corners.end(), *it);
std::vector<Point2f>::const_iterator itCorners = std::find(corners.begin(), corners.end(), *it);
if(itCorners != corners.end())
{
sortedCorners.push_back(*it);
}
}
for(vector<Point2f>::const_iterator it = hull2f.begin(); it != firstCornerIterator; it++)
for(std::vector<Point2f>::const_iterator it = hull2f.begin(); it != firstCornerIterator; it++)
{
vector<Point2f>::const_iterator itCorners = std::find(corners.begin(), corners.end(), *it);
std::vector<Point2f>::const_iterator itCorners = std::find(corners.begin(), corners.end(), *it);
if(itCorners != corners.end())
{
sortedCorners.push_back(*it);
@@ -354,7 +353,7 @@ void CirclesGridClusterFinder::getSortedCorners(const std::vector<cv::Point2f> &
void CirclesGridClusterFinder::rectifyPatternPoints(const std::vector<cv::Point2f> &patternPoints, const std::vector<cv::Point2f> &sortedCorners, std::vector<cv::Point2f> &rectifiedPatternPoints)
{
//indices of corner points in pattern
vector<Point> trueIndices;
std::vector<Point> trueIndices;
trueIndices.push_back(Point(0, 0));
trueIndices.push_back(Point(patternSize.width - 1, 0));
if(isAsymmetricGrid)
@@ -365,7 +364,7 @@ void CirclesGridClusterFinder::rectifyPatternPoints(const std::vector<cv::Point2
trueIndices.push_back(Point(patternSize.width - 1, patternSize.height - 1));
trueIndices.push_back(Point(0, patternSize.height - 1));
vector<Point2f> idealPoints;
std::vector<Point2f> idealPoints;
for(size_t idx=0; idx<trueIndices.size(); idx++)
{
int i = trueIndices[idx].y;
@@ -403,14 +402,16 @@ void CirclesGridClusterFinder::parsePatternPoints(const std::vector<cv::Point2f>
else
idealPt = Point2f(j*squareSize, i*squareSize);
vector<float> query = Mat(idealPt);
int knn = 1;
vector<int> indices(knn);
vector<float> dists(knn);
Mat query(1, 2, CV_32F, &idealPt);
const int knn = 1;
int indicesbuf[knn] = {0};
float distsbuf[knn] = {0.f};
Mat indices(1, knn, CV_32S, &indicesbuf);
Mat dists(1, knn, CV_32F, &distsbuf);
flannIndex.knnSearch(query, indices, dists, knn, flann::SearchParams());
centers.push_back(patternPoints.at(indices[0]));
centers.push_back(patternPoints.at(indicesbuf[0]));
if(dists[0] > maxRectifiedDistance)
if(distsbuf[0] > maxRectifiedDistance)
{
#ifdef DEBUG_CIRCLES
cout << "Pattern not detected: too large rectified distance" << endl;
@@ -437,15 +438,15 @@ bool Graph::doesVertexExist(size_t id) const
void Graph::addVertex(size_t id)
{
assert( !doesVertexExist( id ) );
CV_Assert( !doesVertexExist( id ) );
vertices.insert(pair<size_t, Vertex> (id, Vertex()));
vertices.insert(std::pair<size_t, Vertex> (id, Vertex()));
}
void Graph::addEdge(size_t id1, size_t id2)
{
assert( doesVertexExist( id1 ) );
assert( doesVertexExist( id2 ) );
CV_Assert( doesVertexExist( id1 ) );
CV_Assert( doesVertexExist( id2 ) );
vertices[id1].neighbors.insert(id2);
vertices[id2].neighbors.insert(id1);
@@ -453,8 +454,8 @@ void Graph::addEdge(size_t id1, size_t id2)
void Graph::removeEdge(size_t id1, size_t id2)
{
assert( doesVertexExist( id1 ) );
assert( doesVertexExist( id2 ) );
CV_Assert( doesVertexExist( id1 ) );
CV_Assert( doesVertexExist( id2 ) );
vertices[id1].neighbors.erase(id2);
vertices[id2].neighbors.erase(id1);
@@ -462,8 +463,8 @@ void Graph::removeEdge(size_t id1, size_t id2)
bool Graph::areVerticesAdjacent(size_t id1, size_t id2) const
{
assert( doesVertexExist( id1 ) );
assert( doesVertexExist( id2 ) );
CV_Assert( doesVertexExist( id1 ) );
CV_Assert( doesVertexExist( id2 ) );
Vertices::const_iterator it = vertices.find(id1);
return it->second.neighbors.find(id2) != it->second.neighbors.end();
@@ -476,7 +477,7 @@ size_t Graph::getVerticesCount() const
size_t Graph::getDegree(size_t id) const
{
assert( doesVertexExist(id) );
CV_Assert( doesVertexExist(id) );
Vertices::const_iterator it = vertices.find(id);
return it->second.neighbors.size();
@@ -494,7 +495,7 @@ void Graph::floydWarshall(cv::Mat &distanceMatrix, int infinity) const
distanceMatrix.at<int> ((int)it1->first, (int)it1->first) = 0;
for (Neighbors::const_iterator it2 = it1->second.neighbors.begin(); it2 != it1->second.neighbors.end(); it2++)
{
assert( it1->first != *it2 );
CV_Assert( it1->first != *it2 );
distanceMatrix.at<int> ((int)it1->first, (int)*it2) = edgeWeight;
}
}
@@ -523,7 +524,7 @@ void Graph::floydWarshall(cv::Mat &distanceMatrix, int infinity) const
const Graph::Neighbors& Graph::getNeighbors(size_t id) const
{
assert( doesVertexExist(id) );
CV_Assert( doesVertexExist(id) );
Vertices::const_iterator it = vertices.find(id);
return it->second.neighbors;
@@ -534,7 +535,7 @@ CirclesGridFinder::Segment::Segment(cv::Point2f _s, cv::Point2f _e) :
{
}
void computeShortestPath(Mat &predecessorMatrix, int v1, int v2, vector<int> &path);
void computeShortestPath(Mat &predecessorMatrix, int v1, int v2, std::vector<int> &path);
void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessorMatrix);
CirclesGridFinderParameters::CirclesGridFinderParameters()
@@ -557,7 +558,7 @@ CirclesGridFinderParameters::CirclesGridFinderParameters()
gridType = SYMMETRIC_GRID;
}
CirclesGridFinder::CirclesGridFinder(Size _patternSize, const vector<Point2f> &testKeypoints,
CirclesGridFinder::CirclesGridFinder(Size _patternSize, const std::vector<Point2f> &testKeypoints,
const CirclesGridFinderParameters &_parameters) :
patternSize(static_cast<size_t> (_patternSize.width), static_cast<size_t> (_patternSize.height))
{
@@ -575,11 +576,11 @@ bool CirclesGridFinder::findHoles()
{
case CirclesGridFinderParameters::SYMMETRIC_GRID:
{
vector<Point2f> vectors, filteredVectors, basis;
std::vector<Point2f> vectors, filteredVectors, basis;
Graph rng(0);
computeRNG(rng, vectors);
filterOutliersByDensity(vectors, filteredVectors);
vector<Graph> basisGraphs;
std::vector<Graph> basisGraphs;
findBasis(filteredVectors, basis, basisGraphs);
findMCS(basis, basisGraphs);
break;
@@ -587,12 +588,12 @@ bool CirclesGridFinder::findHoles()
case CirclesGridFinderParameters::ASYMMETRIC_GRID:
{
vector<Point2f> vectors, tmpVectors, filteredVectors, basis;
std::vector<Point2f> vectors, tmpVectors, filteredVectors, basis;
Graph rng(0);
computeRNG(rng, tmpVectors);
rng2gridGraph(rng, vectors);
filterOutliersByDensity(vectors, filteredVectors);
vector<Graph> basisGraphs;
std::vector<Graph> basisGraphs;
findBasis(filteredVectors, basis, basisGraphs);
findMCS(basis, basisGraphs);
eraseUsedGraph(basisGraphs);
@@ -603,7 +604,7 @@ bool CirclesGridFinder::findHoles()
}
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
CV_Error(Error::StsBadArg, "Unkown pattern type");
}
return (isDetectionCorrect());
//CV_Error( 0, "Detection is not correct" );
@@ -635,7 +636,7 @@ void CirclesGridFinder::rng2gridGraph(Graph &rng, std::vector<cv::Point2f> &vect
}
}
void CirclesGridFinder::eraseUsedGraph(vector<Graph> &basisGraphs) const
void CirclesGridFinder::eraseUsedGraph(std::vector<Graph> &basisGraphs) const
{
for (size_t i = 0; i < holes.size(); i++)
{
@@ -666,7 +667,7 @@ bool CirclesGridFinder::isDetectionCorrect()
if (holes.size() != patternSize.height)
return false;
set<size_t> vertices;
std::set<size_t> vertices;
for (size_t i = 0; i < holes.size(); i++)
{
if (holes[i].size() != patternSize.width)
@@ -714,7 +715,7 @@ bool CirclesGridFinder::isDetectionCorrect()
return false;
}
set<size_t> vertices;
std::set<size_t> vertices;
for (size_t i = 0; i < largeHoles->size(); i++)
{
if (largeHoles->at(i).size() != lw)
@@ -750,12 +751,12 @@ bool CirclesGridFinder::isDetectionCorrect()
return false;
}
void CirclesGridFinder::findMCS(const vector<Point2f> &basis, vector<Graph> &basisGraphs)
void CirclesGridFinder::findMCS(const std::vector<Point2f> &basis, std::vector<Graph> &basisGraphs)
{
holes.clear();
Path longestPath;
size_t bestGraphIdx = findLongestPath(basisGraphs, longestPath);
vector<size_t> holesRow = longestPath.vertices;
std::vector<size_t> holesRow = longestPath.vertices;
while (holesRow.size() > std::max(patternSize.width, patternSize.height))
{
@@ -809,14 +810,14 @@ void CirclesGridFinder::findMCS(const vector<Point2f> &basis, vector<Graph> &bas
}
}
Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const vector<Point2f>& centers,
const vector<Point2f> &keypoints, vector<Point2f> &warpedKeypoints)
Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const std::vector<Point2f>& centers,
const std::vector<Point2f> &keypoints, std::vector<Point2f> &warpedKeypoints)
{
assert( !centers.empty() );
CV_Assert( !centers.empty() );
const float edgeLength = 30;
const Point2f offset(150, 150);
vector<Point2f> dstPoints;
std::vector<Point2f> dstPoints;
bool isClockwiseBefore =
getDirection(centers[0], centers[detectedGridSize.width - 1], centers[centers.size() - 1]) < 0;
@@ -831,10 +832,10 @@ Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const vector<Point2f>&
}
}
Mat H = findHomography(Mat(centers), Mat(dstPoints), CV_RANSAC);
Mat H = findHomography(Mat(centers), Mat(dstPoints), RANSAC);
//Mat H = findHomography( Mat( corners ), Mat( dstPoints ) );
vector<Point2f> srcKeypoints;
std::vector<Point2f> srcKeypoints;
for (size_t i = 0; i < keypoints.size(); i++)
{
srcKeypoints.push_back(keypoints[i]);
@@ -842,7 +843,7 @@ Mat CirclesGridFinder::rectifyGrid(Size detectedGridSize, const vector<Point2f>&
Mat dstKeypointsMat;
transform(Mat(srcKeypoints), dstKeypointsMat, H);
vector<Point2f> dstKeypoints;
std::vector<Point2f> dstKeypoints;
convertPointsFromHomogeneous(dstKeypointsMat, dstKeypoints);
warpedKeypoints.clear();
@@ -871,7 +872,7 @@ size_t CirclesGridFinder::findNearestKeypoint(Point2f pt) const
return bestIdx;
}
void CirclesGridFinder::addPoint(Point2f pt, vector<size_t> &points)
void CirclesGridFinder::addPoint(Point2f pt, std::vector<size_t> &points)
{
size_t ptIdx = findNearestKeypoint(pt);
if (norm(keypoints[ptIdx] - pt) > parameters.minDistanceToAddKeypoint)
@@ -886,8 +887,8 @@ void CirclesGridFinder::addPoint(Point2f pt, vector<size_t> &points)
}
}
void CirclesGridFinder::findCandidateLine(vector<size_t> &line, size_t seedLineIdx, bool addRow, Point2f basisVec,
vector<size_t> &seeds)
void CirclesGridFinder::findCandidateLine(std::vector<size_t> &line, size_t seedLineIdx, bool addRow, Point2f basisVec,
std::vector<size_t> &seeds)
{
line.clear();
seeds.clear();
@@ -911,11 +912,11 @@ void CirclesGridFinder::findCandidateLine(vector<size_t> &line, size_t seedLineI
}
}
assert( line.size() == seeds.size() );
CV_Assert( line.size() == seeds.size() );
}
void CirclesGridFinder::findCandidateHoles(vector<size_t> &above, vector<size_t> &below, bool addRow, Point2f basisVec,
vector<size_t> &aboveSeeds, vector<size_t> &belowSeeds)
void CirclesGridFinder::findCandidateHoles(std::vector<size_t> &above, std::vector<size_t> &below, bool addRow, Point2f basisVec,
std::vector<size_t> &aboveSeeds, std::vector<size_t> &belowSeeds)
{
above.clear();
below.clear();
@@ -926,12 +927,12 @@ void CirclesGridFinder::findCandidateHoles(vector<size_t> &above, vector<size_t>
size_t lastIdx = addRow ? holes.size() - 1 : holes[0].size() - 1;
findCandidateLine(below, lastIdx, addRow, basisVec, belowSeeds);
assert( below.size() == above.size() );
assert( belowSeeds.size() == aboveSeeds.size() );
assert( below.size() == belowSeeds.size() );
CV_Assert( below.size() == above.size() );
CV_Assert( belowSeeds.size() == aboveSeeds.size() );
CV_Assert( below.size() == belowSeeds.size() );
}
bool CirclesGridFinder::areCentersNew(const vector<size_t> &newCenters, const vector<vector<size_t> > &holes)
bool CirclesGridFinder::areCentersNew(const std::vector<size_t> &newCenters, const std::vector<std::vector<size_t> > &holes)
{
for (size_t i = 0; i < newCenters.size(); i++)
{
@@ -948,8 +949,8 @@ bool CirclesGridFinder::areCentersNew(const vector<size_t> &newCenters, const ve
}
void CirclesGridFinder::insertWinner(float aboveConfidence, float belowConfidence, float minConfidence, bool addRow,
const vector<size_t> &above, const vector<size_t> &below,
vector<vector<size_t> > &holes)
const std::vector<size_t> &above, const std::vector<size_t> &below,
std::vector<std::vector<size_t> > &holes)
{
if (aboveConfidence < minConfidence && belowConfidence < minConfidence)
return;
@@ -996,13 +997,13 @@ void CirclesGridFinder::insertWinner(float aboveConfidence, float belowConfidenc
}
}
float CirclesGridFinder::computeGraphConfidence(const vector<Graph> &basisGraphs, bool addRow,
const vector<size_t> &points, const vector<size_t> &seeds)
float CirclesGridFinder::computeGraphConfidence(const std::vector<Graph> &basisGraphs, bool addRow,
const std::vector<size_t> &points, const std::vector<size_t> &seeds)
{
assert( points.size() == seeds.size() );
CV_Assert( points.size() == seeds.size() );
float confidence = 0;
const size_t vCount = basisGraphs[0].getVerticesCount();
assert( basisGraphs[0].getVerticesCount() == basisGraphs[1].getVerticesCount() );
CV_Assert( basisGraphs[0].getVerticesCount() == basisGraphs[1].getVerticesCount() );
for (size_t i = 0; i < seeds.size(); i++)
{
@@ -1042,9 +1043,9 @@ float CirclesGridFinder::computeGraphConfidence(const vector<Graph> &basisGraphs
}
void CirclesGridFinder::addHolesByGraph(const vector<Graph> &basisGraphs, bool addRow, Point2f basisVec)
void CirclesGridFinder::addHolesByGraph(const std::vector<Graph> &basisGraphs, bool addRow, Point2f basisVec)
{
vector<size_t> above, below, aboveSeeds, belowSeeds;
std::vector<size_t> above, below, aboveSeeds, belowSeeds;
findCandidateHoles(above, below, addRow, basisVec, aboveSeeds, belowSeeds);
float aboveConfidence = computeGraphConfidence(basisGraphs, addRow, above, aboveSeeds);
float belowConfidence = computeGraphConfidence(basisGraphs, addRow, below, belowSeeds);
@@ -1052,7 +1053,7 @@ void CirclesGridFinder::addHolesByGraph(const vector<Graph> &basisGraphs, bool a
insertWinner(aboveConfidence, belowConfidence, parameters.minGraphConfidence, addRow, above, below, holes);
}
void CirclesGridFinder::filterOutliersByDensity(const vector<Point2f> &samples, vector<Point2f> &filteredSamples)
void CirclesGridFinder::filterOutliersByDensity(const std::vector<Point2f> &samples, std::vector<Point2f> &filteredSamples)
{
if (samples.empty())
CV_Error( 0, "samples is empty" );
@@ -1077,7 +1078,7 @@ void CirclesGridFinder::filterOutliersByDensity(const vector<Point2f> &samples,
CV_Error( 0, "filteredSamples is empty" );
}
void CirclesGridFinder::findBasis(const vector<Point2f> &samples, vector<Point2f> &basis, vector<Graph> &basisGraphs)
void CirclesGridFinder::findBasis(const std::vector<Point2f> &samples, std::vector<Point2f> &basis, std::vector<Graph> &basisGraphs)
{
basis.clear();
Mat bestLabels;
@@ -1086,9 +1087,9 @@ void CirclesGridFinder::findBasis(const vector<Point2f> &samples, vector<Point2f
const int clustersCount = 4;
kmeans(Mat(samples).reshape(1, 0), clustersCount, bestLabels, termCriteria, parameters.kmeansAttempts,
KMEANS_RANDOM_CENTERS, centers);
assert( centers.type() == CV_32FC1 );
CV_Assert( centers.type() == CV_32FC1 );
vector<int> basisIndices;
std::vector<int> basisIndices;
//TODO: only remove duplicate
for (int i = 0; i < clustersCount; i++)
{
@@ -1113,7 +1114,7 @@ void CirclesGridFinder::findBasis(const vector<Point2f> &samples, vector<Point2f
if (norm(basis[0] - basis[1]) < minBasisDif)
CV_Error(0, "degenerate basis" );
vector<vector<Point2f> > clusters(2), hulls(2);
std::vector<std::vector<Point2f> > clusters(2), hulls(2);
for (int k = 0; k < (int)samples.size(); k++)
{
int label = bestLabels.at<int> (k, 0);
@@ -1203,7 +1204,7 @@ void CirclesGridFinder::computeRNG(Graph &rng, std::vector<cv::Point2f> &vectors
void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessorMatrix)
{
assert( dm.type() == CV_32SC1 );
CV_Assert( dm.type() == CV_32SC1 );
predecessorMatrix.create(verticesCount, verticesCount, CV_32SC1);
predecessorMatrix = -1;
for (int i = 0; i < predecessorMatrix.rows; i++)
@@ -1223,7 +1224,7 @@ void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessor
}
}
static void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
static void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, std::vector<size_t> &path)
{
if (predecessorMatrix.at<int> ((int)v1, (int)v2) < 0)
{
@@ -1235,10 +1236,10 @@ static void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, ve
path.push_back(v2);
}
size_t CirclesGridFinder::findLongestPath(vector<Graph> &basisGraphs, Path &bestPath)
size_t CirclesGridFinder::findLongestPath(std::vector<Graph> &basisGraphs, Path &bestPath)
{
vector<Path> longestPaths(1);
vector<int> confidences;
std::vector<Path> longestPaths(1);
std::vector<int> confidences;
size_t bestGraphIdx = 0;
const int infinity = -1;
@@ -1252,7 +1253,6 @@ size_t CirclesGridFinder::findLongestPath(vector<Graph> &basisGraphs, Path &best
double maxVal;
Point maxLoc;
assert (infinity < 0);
minMaxLoc(distanceMatrix, 0, &maxVal, 0, &maxLoc);
if (maxVal > longestPaths[0].length)
@@ -1305,7 +1305,7 @@ size_t CirclesGridFinder::findLongestPath(vector<Graph> &basisGraphs, Path &best
return bestGraphIdx;
}
void CirclesGridFinder::drawBasis(const vector<Point2f> &basis, Point2f origin, Mat &drawImg) const
void CirclesGridFinder::drawBasis(const std::vector<Point2f> &basis, Point2f origin, Mat &drawImg) const
{
for (size_t i = 0; i < basis.size(); i++)
{
@@ -1314,7 +1314,7 @@ void CirclesGridFinder::drawBasis(const vector<Point2f> &basis, Point2f origin,
}
}
void CirclesGridFinder::drawBasisGraphs(const vector<Graph> &basisGraphs, Mat &drawImage, bool drawEdges,
void CirclesGridFinder::drawBasisGraphs(const std::vector<Graph> &basisGraphs, Mat &drawImage, bool drawEdges,
bool drawVertices) const
{
//const int vertexRadius = 1;
@@ -1363,7 +1363,7 @@ void CirclesGridFinder::drawHoles(const Mat &srcImage, Mat &drawImage) const
const Scalar holeColor = Scalar(0, 255, 0);
if (srcImage.channels() == 1)
cvtColor(srcImage, drawImage, CV_GRAY2RGB);
cvtColor(srcImage, drawImage, COLOR_GRAY2RGB);
else
srcImage.copyTo(drawImage);
@@ -1390,7 +1390,7 @@ Size CirclesGridFinder::getDetectedGridSize() const
return Size((int)holes[0].size(), (int)holes.size());
}
void CirclesGridFinder::getHoles(vector<Point2f> &outHoles) const
void CirclesGridFinder::getHoles(std::vector<Point2f> &outHoles) const
{
outHoles.clear();
@@ -1403,7 +1403,7 @@ void CirclesGridFinder::getHoles(vector<Point2f> &outHoles) const
}
}
static bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
static bool areIndicesCorrect(Point pos, std::vector<std::vector<size_t> > *points)
{
if (pos.y < 0 || pos.x < 0)
return false;
@@ -1414,8 +1414,8 @@ void CirclesGridFinder::getAsymmetricHoles(std::vector<cv::Point2f> &outHoles) c
{
outHoles.clear();
vector<Point> largeCornerIndices, smallCornerIndices;
vector<Point> firstSteps, secondSteps;
std::vector<Point> largeCornerIndices, smallCornerIndices;
std::vector<Point> firstSteps, secondSteps;
size_t cornerIdx = getFirstCorner(largeCornerIndices, smallCornerIndices, firstSteps, secondSteps);
CV_Assert(largeHoles != 0 && smallHoles != 0)
;
@@ -1472,9 +1472,9 @@ bool CirclesGridFinder::areSegmentsIntersecting(Segment seg1, Segment seg2)
*/
}
void CirclesGridFinder::getCornerSegments(const vector<vector<size_t> > &points, vector<vector<Segment> > &segments,
vector<Point> &cornerIndices, vector<Point> &firstSteps,
vector<Point> &secondSteps) const
void CirclesGridFinder::getCornerSegments(const std::vector<std::vector<size_t> > &points, std::vector<std::vector<Segment> > &segments,
std::vector<Point> &cornerIndices, std::vector<Point> &firstSteps,
std::vector<Point> &secondSteps) const
{
segments.clear();
cornerIndices.clear();
@@ -1486,7 +1486,7 @@ void CirclesGridFinder::getCornerSegments(const vector<vector<size_t> > &points,
;
//all 8 segments with one end in a corner
vector<Segment> corner;
std::vector<Segment> corner;
corner.push_back(Segment(keypoints[points[1][0]], keypoints[points[0][0]]));
corner.push_back(Segment(keypoints[points[0][0]], keypoints[points[0][1]]));
segments.push_back(corner);
@@ -1535,7 +1535,7 @@ void CirclesGridFinder::getCornerSegments(const vector<vector<size_t> > &points,
}
}
bool CirclesGridFinder::doesIntersectionExist(const vector<Segment> &corner, const vector<vector<Segment> > &segments)
bool CirclesGridFinder::doesIntersectionExist(const std::vector<Segment> &corner, const std::vector<std::vector<Segment> > &segments)
{
for (size_t i = 0; i < corner.size(); i++)
{
@@ -1552,11 +1552,11 @@ bool CirclesGridFinder::doesIntersectionExist(const vector<Segment> &corner, con
return false;
}
size_t CirclesGridFinder::getFirstCorner(vector<Point> &largeCornerIndices, vector<Point> &smallCornerIndices, vector<
Point> &firstSteps, vector<Point> &secondSteps) const
size_t CirclesGridFinder::getFirstCorner(std::vector<Point> &largeCornerIndices, std::vector<Point> &smallCornerIndices, std::vector<
Point> &firstSteps, std::vector<Point> &secondSteps) const
{
vector<vector<Segment> > largeSegments;
vector<vector<Segment> > smallSegments;
std::vector<std::vector<Segment> > largeSegments;
std::vector<std::vector<Segment> > smallSegments;
getCornerSegments(*largeHoles, largeSegments, largeCornerIndices, firstSteps, secondSteps);
getCornerSegments(*smallHoles, smallSegments, smallCornerIndices, firstSteps, secondSteps);
@@ -1593,9 +1593,3 @@ size_t CirclesGridFinder::getFirstCorner(vector<Point> &largeCornerIndices, vect
return cornerIdx;
}
bool cv::findCirclesGridDefault( InputArray image, Size patternSize,
OutputArray centers, int flags )
{
return findCirclesGrid(image, patternSize, centers, flags);
}

View File

@@ -47,6 +47,7 @@
#include <set>
#include <list>
#include <numeric>
#include <map>
#include "precomp.hpp"

View File

@@ -0,0 +1,431 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
/************************************************************************************\
Some backward compatibility stuff, to be moved to legacy or compat module
\************************************************************************************/
using cv::Ptr;
////////////////// Levenberg-Marquardt engine (the old variant) ////////////////////////
CvLevMarq::CvLevMarq()
{
mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = Ptr<CvMat>();
lambdaLg10 = 0; state = DONE;
criteria = cvTermCriteria(0,0,0);
iters = 0;
completeSymmFlag = false;
}
CvLevMarq::CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
{
mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = Ptr<CvMat>();
init(nparams, nerrs, criteria0, _completeSymmFlag);
}
void CvLevMarq::clear()
{
mask.release();
prevParam.release();
param.release();
J.release();
err.release();
JtJ.release();
JtJN.release();
JtErr.release();
JtJV.release();
JtJW.release();
}
CvLevMarq::~CvLevMarq()
{
clear();
}
void CvLevMarq::init( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
{
if( !param || param->rows != nparams || nerrs != (err ? err->rows : 0) )
clear();
mask = cvCreateMat( nparams, 1, CV_8U );
cvSet(mask, cvScalarAll(1));
prevParam = cvCreateMat( nparams, 1, CV_64F );
param = cvCreateMat( nparams, 1, CV_64F );
JtJ = cvCreateMat( nparams, nparams, CV_64F );
JtJN = cvCreateMat( nparams, nparams, CV_64F );
JtJV = cvCreateMat( nparams, nparams, CV_64F );
JtJW = cvCreateMat( nparams, 1, CV_64F );
JtErr = cvCreateMat( nparams, 1, CV_64F );
if( nerrs > 0 )
{
J = cvCreateMat( nerrs, nparams, CV_64F );
err = cvCreateMat( nerrs, 1, CV_64F );
}
prevErrNorm = DBL_MAX;
lambdaLg10 = -3;
criteria = criteria0;
if( criteria.type & CV_TERMCRIT_ITER )
criteria.max_iter = MIN(MAX(criteria.max_iter,1),1000);
else
criteria.max_iter = 30;
if( criteria.type & CV_TERMCRIT_EPS )
criteria.epsilon = MAX(criteria.epsilon, 0);
else
criteria.epsilon = DBL_EPSILON;
state = STARTED;
iters = 0;
completeSymmFlag = _completeSymmFlag;
}
bool CvLevMarq::update( const CvMat*& _param, CvMat*& matJ, CvMat*& _err )
{
double change;
matJ = _err = 0;
assert( !err.empty() );
if( state == DONE )
{
_param = param;
return false;
}
if( state == STARTED )
{
_param = param;
cvZero( J );
cvZero( err );
matJ = J;
_err = err;
state = CALC_J;
return true;
}
if( state == CALC_J )
{
cvMulTransposed( J, JtJ, 1 );
cvGEMM( J, err, 1, 0, 0, JtErr, CV_GEMM_A_T );
cvCopy( param, prevParam );
step();
if( iters == 0 )
prevErrNorm = cvNorm(err, 0, CV_L2);
_param = param;
cvZero( err );
_err = err;
state = CHECK_ERR;
return true;
}
assert( state == CHECK_ERR );
errNorm = cvNorm( err, 0, CV_L2 );
if( errNorm > prevErrNorm )
{
if( ++lambdaLg10 <= 16 )
{
step();
_param = param;
cvZero( err );
_err = err;
state = CHECK_ERR;
return true;
}
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
if( ++iters >= criteria.max_iter ||
(change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )
{
_param = param;
state = DONE;
return true;
}
prevErrNorm = errNorm;
_param = param;
cvZero(J);
matJ = J;
_err = err;
state = CALC_J;
return true;
}
bool CvLevMarq::updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm )
{
double change;
CV_Assert( err.empty() );
if( state == DONE )
{
_param = param;
return false;
}
if( state == STARTED )
{
_param = param;
cvZero( JtJ );
cvZero( JtErr );
errNorm = 0;
_JtJ = JtJ;
_JtErr = JtErr;
_errNorm = &errNorm;
state = CALC_J;
return true;
}
if( state == CALC_J )
{
cvCopy( param, prevParam );
step();
_param = param;
prevErrNorm = errNorm;
errNorm = 0;
_errNorm = &errNorm;
state = CHECK_ERR;
return true;
}
assert( state == CHECK_ERR );
if( errNorm > prevErrNorm )
{
if( ++lambdaLg10 <= 16 )
{
step();
_param = param;
errNorm = 0;
_errNorm = &errNorm;
state = CHECK_ERR;
return true;
}
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
if( ++iters >= criteria.max_iter ||
(change = cvNorm(param, prevParam, CV_RELATIVE_L2)) < criteria.epsilon )
{
_param = param;
state = DONE;
return false;
}
prevErrNorm = errNorm;
cvZero( JtJ );
cvZero( JtErr );
_param = param;
_JtJ = JtJ;
_JtErr = JtErr;
state = CALC_J;
return true;
}
void CvLevMarq::step()
{
const double LOG10 = log(10.);
double lambda = exp(lambdaLg10*LOG10);
int i, j, nparams = param->rows;
for( i = 0; i < nparams; i++ )
if( mask->data.ptr[i] == 0 )
{
double *row = JtJ->data.db + i*nparams, *col = JtJ->data.db + i;
for( j = 0; j < nparams; j++ )
row[j] = col[j*nparams] = 0;
JtErr->data.db[i] = 0;
}
if( !err )
cvCompleteSymm( JtJ, completeSymmFlag );
#if 1
cvCopy( JtJ, JtJN );
for( i = 0; i < nparams; i++ )
JtJN->data.db[(nparams+1)*i] *= 1. + lambda;
#else
cvSetIdentity(JtJN, cvRealScalar(lambda));
cvAdd( JtJ, JtJN, JtJN );
#endif
cvSVD( JtJN, JtJW, 0, JtJV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
cvSVBkSb( JtJW, JtJV, JtJV, JtErr, param, CV_SVD_U_T + CV_SVD_V_T );
for( i = 0; i < nparams; i++ )
param->data.db[i] = prevParam->data.db[i] - (mask->data.ptr[i] ? param->data.db[i] : 0);
}
CV_IMPL int cvRANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
{
return cv::RANSACUpdateNumIters(p, ep, modelPoints, maxIters);
}
CV_IMPL int cvFindHomography( const CvMat* _src, const CvMat* _dst, CvMat* __H, int method,
double ransacReprojThreshold, CvMat* _mask )
{
cv::Mat src = cv::cvarrToMat(_src), dst = cv::cvarrToMat(_dst);
if( src.channels() == 1 && (src.rows == 2 || src.rows == 3) && src.cols > 3 )
cv::transpose(src, src);
if( dst.channels() == 1 && (dst.rows == 2 || dst.rows == 3) && dst.cols > 3 )
cv::transpose(dst, dst);
const cv::Mat H = cv::cvarrToMat(__H), mask = cv::cvarrToMat(_mask);
cv::Mat H0 = cv::findHomography(src, dst, method, ransacReprojThreshold,
_mask ? cv::_OutputArray(mask) : cv::_OutputArray());
if( H0.empty() )
{
cv::Mat Hz = cv::cvarrToMat(__H);
Hz.setTo(cv::Scalar::all(0));
return 0;
}
H0.convertTo(H, H.type());
return 1;
}
CV_IMPL int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fmatrix, int method,
double param1, double param2, CvMat* _mask )
{
cv::Mat m1 = cv::cvarrToMat(points1), m2 = cv::cvarrToMat(points2);
if( m1.channels() == 1 && (m1.rows == 2 || m1.rows == 3) && m1.cols > 3 )
cv::transpose(m1, m1);
if( m2.channels() == 1 && (m2.rows == 2 || m2.rows == 3) && m2.cols > 3 )
cv::transpose(m2, m2);
const cv::Mat FM = cv::cvarrToMat(fmatrix), mask = cv::cvarrToMat(_mask);
cv::Mat FM0 = cv::findFundamentalMat(m1, m2, method, param1, param2,
_mask ? cv::_OutputArray(mask) : cv::_OutputArray());
if( FM0.empty() )
{
cv::Mat FM0z = cv::cvarrToMat(fmatrix);
FM0z.setTo(cv::Scalar::all(0));
return 0;
}
CV_Assert( FM0.cols == 3 && FM0.rows % 3 == 0 && FM.cols == 3 && FM.rows % 3 == 0 && FM.channels() == 1 );
cv::Mat FM1 = FM.rowRange(0, MIN(FM0.rows, FM.rows));
FM0.rowRange(0, FM1.rows).convertTo(FM1, FM1.type());
return FM1.rows / 3;
}
CV_IMPL void cvComputeCorrespondEpilines( const CvMat* points, int pointImageID,
const CvMat* fmatrix, CvMat* _lines )
{
cv::Mat pt = cv::cvarrToMat(points), fm = cv::cvarrToMat(fmatrix);
cv::Mat lines = cv::cvarrToMat(_lines);
const cv::Mat lines0 = lines;
if( pt.channels() == 1 && (pt.rows == 2 || pt.rows == 3) && pt.cols > 3 )
cv::transpose(pt, pt);
cv::computeCorrespondEpilines(pt, pointImageID, fm, lines);
bool tflag = lines0.channels() == 1 && lines0.rows == 3 && lines0.cols > 3;
lines = lines.reshape(lines0.channels(), (tflag ? lines0.cols : lines0.rows));
if( tflag )
{
CV_Assert( lines.rows == lines0.cols && lines.cols == lines0.rows );
if( lines0.type() == lines.type() )
transpose( lines, lines0 );
else
{
transpose( lines, lines );
lines.convertTo( lines0, lines0.type() );
}
}
else
{
CV_Assert( lines.size() == lines0.size() );
if( lines.data != lines0.data )
lines.convertTo(lines0, lines0.type());
}
}
CV_IMPL void cvConvertPointsHomogeneous( const CvMat* _src, CvMat* _dst )
{
cv::Mat src = cv::cvarrToMat(_src), dst = cv::cvarrToMat(_dst);
const cv::Mat dst0 = dst;
int d0 = src.channels() > 1 ? src.channels() : MIN(src.cols, src.rows);
if( src.channels() == 1 && src.cols > d0 )
cv::transpose(src, src);
int d1 = dst.channels() > 1 ? dst.channels() : MIN(dst.cols, dst.rows);
if( d0 == d1 )
src.copyTo(dst);
else if( d0 < d1 )
cv::convertPointsToHomogeneous(src, dst);
else
cv::convertPointsFromHomogeneous(src, dst);
bool tflag = dst0.channels() == 1 && dst0.cols > d1;
dst = dst.reshape(dst0.channels(), (tflag ? dst0.cols : dst0.rows));
if( tflag )
{
CV_Assert( dst.rows == dst0.cols && dst.cols == dst0.rows );
if( dst0.type() == dst.type() )
transpose( dst, dst0 );
else
{
transpose( dst, dst );
dst.convertTo( dst0, dst0.type() );
}
}
else
{
CV_Assert( dst.size() == dst0.size() );
if( dst.data != dst0.data )
dst.convertTo(dst0, dst0.type());
}
}

View File

@@ -0,0 +1,123 @@
//M*//////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
CvStereoBMState* cvCreateStereoBMState( int /*preset*/, int numberOfDisparities )
{
CvStereoBMState* state = (CvStereoBMState*)cvAlloc( sizeof(*state) );
if( !state )
return 0;
state->preFilterType = CV_STEREO_BM_XSOBEL; //CV_STEREO_BM_NORMALIZED_RESPONSE;
state->preFilterSize = 9;
state->preFilterCap = 31;
state->SADWindowSize = 15;
state->minDisparity = 0;
state->numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : 64;
state->textureThreshold = 10;
state->uniquenessRatio = 15;
state->speckleRange = state->speckleWindowSize = 0;
state->trySmallerWindows = 0;
state->roi1 = state->roi2 = cvRect(0,0,0,0);
state->disp12MaxDiff = -1;
state->preFilteredImg0 = state->preFilteredImg1 = state->slidingSumBuf =
state->disp = state->cost = 0;
return state;
}
void cvReleaseStereoBMState( CvStereoBMState** state )
{
if( !state )
CV_Error( CV_StsNullPtr, "" );
if( !*state )
return;
cvReleaseMat( &(*state)->preFilteredImg0 );
cvReleaseMat( &(*state)->preFilteredImg1 );
cvReleaseMat( &(*state)->slidingSumBuf );
cvReleaseMat( &(*state)->disp );
cvReleaseMat( &(*state)->cost );
cvFree( state );
}
void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
CvArr* disparr, CvStereoBMState* state )
{
cv::Mat left = cv::cvarrToMat(leftarr), right = cv::cvarrToMat(rightarr);
const cv::Mat disp = cv::cvarrToMat(disparr);
CV_Assert( state != 0 );
cv::Ptr<cv::StereoMatcher> sm = cv::createStereoBM(state->numberOfDisparities,
state->SADWindowSize);
sm->set("preFilterType", state->preFilterType);
sm->set("preFilterSize", state->preFilterSize);
sm->set("preFilterCap", state->preFilterCap);
sm->set("SADWindowSize", state->SADWindowSize);
sm->set("numDisparities", state->numberOfDisparities > 0 ? state->numberOfDisparities : 64);
sm->set("textureThreshold", state->textureThreshold);
sm->set("uniquenessRatio", state->uniquenessRatio);
sm->set("speckleRange", state->speckleRange);
sm->set("speckleWindowSize", state->speckleWindowSize);
sm->set("disp12MaxDiff", state->disp12MaxDiff);
sm->compute(left, right, disp);
}
CvRect cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize )
{
return (CvRect)cv::getValidDisparityROI( roi1, roi2, minDisparity,
numberOfDisparities, SADWindowSize );
}
void cvValidateDisparity( CvArr* _disp, const CvArr* _cost, int minDisparity,
int numberOfDisparities, int disp12MaxDiff )
{
cv::Mat disp = cv::cvarrToMat(_disp), cost = cv::cvarrToMat(_cost);
cv::validateDisparity( disp, cost, minDisparity, numberOfDisparities, disp12MaxDiff );
}

View File

@@ -1,5 +1,4 @@
#include <iostream>
using namespace std;
#include "precomp.hpp"
#include "epnp.h"

View File

@@ -2,6 +2,7 @@
#define epnp_h
#include "precomp.hpp"
#include "opencv2/core/core_c.h"
class epnp {
public:

View File

@@ -0,0 +1,606 @@
/* This is a 5-point algorithm contributed to OpenCV by the author, Bo Li.
It implements the 5-point algorithm solver from Nister's paper:
Nister, An efficient solution to the five-point relative pose problem, PAMI, 2004.
*/
/* Copyright (c) 2013, Bo Li (prclibo@gmail.com), ETH Zurich
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "precomp.hpp"
namespace cv
{
class EMEstimatorCallback : public PointSetRegistrator::Callback
{
public:
int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const
{
Mat q1 = _m1.getMat(), q2 = _m2.getMat();
Mat Q1 = q1.reshape(1, (int)q1.total());
Mat Q2 = q2.reshape(1, (int)q2.total());
int n = Q1.rows;
Mat Q(n, 9, CV_64F);
Q.col(0) = Q1.col(0).mul( Q2.col(0) );
Q.col(1) = Q1.col(1).mul( Q2.col(0) );
Q.col(2) = Q2.col(0) * 1.0;
Q.col(3) = Q1.col(0).mul( Q2.col(1) );
Q.col(4) = Q1.col(1).mul( Q2.col(1) );
Q.col(5) = Q2.col(1) * 1.0;
Q.col(6) = Q1.col(0) * 1.0;
Q.col(7) = Q1.col(1) * 1.0;
Q.col(8) = 1.0;
Mat U, W, Vt;
SVD::compute(Q, W, U, Vt, SVD::MODIFY_A | SVD::FULL_UV);
Mat EE = Mat(Vt.t()).colRange(5, 9) * 1.0;
Mat A(10, 20, CV_64F);
EE = EE.t();
getCoeffMat((double*)EE.data, (double*)A.data);
EE = EE.t();
A = A.colRange(0, 10).inv() * A.colRange(10, 20);
double b[3 * 13];
Mat B(3, 13, CV_64F, b);
for (int i = 0; i < 3; i++)
{
Mat arow1 = A.row(i * 2 + 4) * 1.0;
Mat arow2 = A.row(i * 2 + 5) * 1.0;
Mat row1(1, 13, CV_64F, Scalar(0.0));
Mat row2(1, 13, CV_64F, Scalar(0.0));
row1.colRange(1, 4) = arow1.colRange(0, 3) * 1.0;
row1.colRange(5, 8) = arow1.colRange(3, 6) * 1.0;
row1.colRange(9, 13) = arow1.colRange(6, 10) * 1.0;
row2.colRange(0, 3) = arow2.colRange(0, 3) * 1.0;
row2.colRange(4, 7) = arow2.colRange(3, 6) * 1.0;
row2.colRange(8, 12) = arow2.colRange(6, 10) * 1.0;
B.row(i) = row1 - row2;
}
double c[11];
Mat coeffs(1, 11, CV_64F, c);
c[10] = (b[0]*b[17]*b[34]+b[26]*b[4]*b[21]-b[26]*b[17]*b[8]-b[13]*b[4]*b[34]-b[0]*b[21]*b[30]+b[13]*b[30]*b[8]);
c[9] = (b[26]*b[4]*b[22]+b[14]*b[30]*b[8]+b[13]*b[31]*b[8]+b[1]*b[17]*b[34]-b[13]*b[5]*b[34]+b[26]*b[5]*b[21]-b[0]*b[21]*b[31]-b[26]*b[17]*b[9]-b[1]*b[21]*b[30]+b[27]*b[4]*b[21]+b[0]*b[17]*b[35]-b[0]*b[22]*b[30]+b[13]*b[30]*b[9]+b[0]*b[18]*b[34]-b[27]*b[17]*b[8]-b[14]*b[4]*b[34]-b[13]*b[4]*b[35]-b[26]*b[18]*b[8]);
c[8] = (b[14]*b[30]*b[9]+b[14]*b[31]*b[8]+b[13]*b[31]*b[9]-b[13]*b[4]*b[36]-b[13]*b[5]*b[35]+b[15]*b[30]*b[8]-b[13]*b[6]*b[34]+b[13]*b[30]*b[10]+b[13]*b[32]*b[8]-b[14]*b[4]*b[35]-b[14]*b[5]*b[34]+b[26]*b[4]*b[23]+b[26]*b[5]*b[22]+b[26]*b[6]*b[21]-b[26]*b[17]*b[10]-b[15]*b[4]*b[34]-b[26]*b[18]*b[9]-b[26]*b[19]*b[8]+b[27]*b[4]*b[22]+b[27]*b[5]*b[21]-b[27]*b[17]*b[9]-b[27]*b[18]*b[8]-b[1]*b[21]*b[31]-b[0]*b[23]*b[30]-b[0]*b[21]*b[32]+b[28]*b[4]*b[21]-b[28]*b[17]*b[8]+b[2]*b[17]*b[34]+b[0]*b[18]*b[35]-b[0]*b[22]*b[31]+b[0]*b[17]*b[36]+b[0]*b[19]*b[34]-b[1]*b[22]*b[30]+b[1]*b[18]*b[34]+b[1]*b[17]*b[35]-b[2]*b[21]*b[30]);
c[7] = (b[14]*b[30]*b[10]+b[14]*b[32]*b[8]-b[3]*b[21]*b[30]+b[3]*b[17]*b[34]+b[13]*b[32]*b[9]+b[13]*b[33]*b[8]-b[13]*b[4]*b[37]-b[13]*b[5]*b[36]+b[15]*b[30]*b[9]+b[15]*b[31]*b[8]-b[16]*b[4]*b[34]-b[13]*b[6]*b[35]-b[13]*b[7]*b[34]+b[13]*b[30]*b[11]+b[13]*b[31]*b[10]+b[14]*b[31]*b[9]-b[14]*b[4]*b[36]-b[14]*b[5]*b[35]-b[14]*b[6]*b[34]+b[16]*b[30]*b[8]-b[26]*b[20]*b[8]+b[26]*b[4]*b[24]+b[26]*b[5]*b[23]+b[26]*b[6]*b[22]+b[26]*b[7]*b[21]-b[26]*b[17]*b[11]-b[15]*b[4]*b[35]-b[15]*b[5]*b[34]-b[26]*b[18]*b[10]-b[26]*b[19]*b[9]+b[27]*b[4]*b[23]+b[27]*b[5]*b[22]+b[27]*b[6]*b[21]-b[27]*b[17]*b[10]-b[27]*b[18]*b[9]-b[27]*b[19]*b[8]+b[0]*b[17]*b[37]-b[0]*b[23]*b[31]-b[0]*b[24]*b[30]-b[0]*b[21]*b[33]-b[29]*b[17]*b[8]+b[28]*b[4]*b[22]+b[28]*b[5]*b[21]-b[28]*b[17]*b[9]-b[28]*b[18]*b[8]+b[29]*b[4]*b[21]+b[1]*b[19]*b[34]-b[2]*b[21]*b[31]+b[0]*b[20]*b[34]+b[0]*b[19]*b[35]+b[0]*b[18]*b[36]-b[0]*b[22]*b[32]-b[1]*b[23]*b[30]-b[1]*b[21]*b[32]+b[1]*b[18]*b[35]-b[1]*b[22]*b[31]-b[2]*b[22]*b[30]+b[2]*b[17]*b[35]+b[1]*b[17]*b[36]+b[2]*b[18]*b[34]);
c[6] = (-b[14]*b[6]*b[35]-b[14]*b[7]*b[34]-b[3]*b[22]*b[30]-b[3]*b[21]*b[31]+b[3]*b[17]*b[35]+b[3]*b[18]*b[34]+b[13]*b[32]*b[10]+b[13]*b[33]*b[9]-b[13]*b[4]*b[38]-b[13]*b[5]*b[37]-b[15]*b[6]*b[34]+b[15]*b[30]*b[10]+b[15]*b[32]*b[8]-b[16]*b[4]*b[35]-b[13]*b[6]*b[36]-b[13]*b[7]*b[35]+b[13]*b[31]*b[11]+b[13]*b[30]*b[12]+b[14]*b[32]*b[9]+b[14]*b[33]*b[8]-b[14]*b[4]*b[37]-b[14]*b[5]*b[36]+b[16]*b[30]*b[9]+b[16]*b[31]*b[8]-b[26]*b[20]*b[9]+b[26]*b[4]*b[25]+b[26]*b[5]*b[24]+b[26]*b[6]*b[23]+b[26]*b[7]*b[22]-b[26]*b[17]*b[12]+b[14]*b[30]*b[11]+b[14]*b[31]*b[10]+b[15]*b[31]*b[9]-b[15]*b[4]*b[36]-b[15]*b[5]*b[35]-b[26]*b[18]*b[11]-b[26]*b[19]*b[10]-b[27]*b[20]*b[8]+b[27]*b[4]*b[24]+b[27]*b[5]*b[23]+b[27]*b[6]*b[22]+b[27]*b[7]*b[21]-b[27]*b[17]*b[11]-b[27]*b[18]*b[10]-b[27]*b[19]*b[9]-b[16]*b[5]*b[34]-b[29]*b[17]*b[9]-b[29]*b[18]*b[8]+b[28]*b[4]*b[23]+b[28]*b[5]*b[22]+b[28]*b[6]*b[21]-b[28]*b[17]*b[10]-b[28]*b[18]*b[9]-b[28]*b[19]*b[8]+b[29]*b[4]*b[22]+b[29]*b[5]*b[21]-b[2]*b[23]*b[30]+b[2]*b[18]*b[35]-b[1]*b[22]*b[32]-b[2]*b[21]*b[32]+b[2]*b[19]*b[34]+b[0]*b[19]*b[36]-b[0]*b[22]*b[33]+b[0]*b[20]*b[35]-b[0]*b[23]*b[32]-b[0]*b[25]*b[30]+b[0]*b[17]*b[38]+b[0]*b[18]*b[37]-b[0]*b[24]*b[31]+b[1]*b[17]*b[37]-b[1]*b[23]*b[31]-b[1]*b[24]*b[30]-b[1]*b[21]*b[33]+b[1]*b[20]*b[34]+b[1]*b[19]*b[35]+b[1]*b[18]*b[36]+b[2]*b[17]*b[36]-b[2]*b[22]*b[31]);
c[5] = (-b[14]*b[6]*b[36]-b[14]*b[7]*b[35]+b[14]*b[31]*b[11]-b[3]*b[23]*b[30]-b[3]*b[21]*b[32]+b[3]*b[18]*b[35]-b[3]*b[22]*b[31]+b[3]*b[17]*b[36]+b[3]*b[19]*b[34]+b[13]*b[32]*b[11]+b[13]*b[33]*b[10]-b[13]*b[5]*b[38]-b[15]*b[6]*b[35]-b[15]*b[7]*b[34]+b[15]*b[30]*b[11]+b[15]*b[31]*b[10]+b[16]*b[31]*b[9]-b[13]*b[6]*b[37]-b[13]*b[7]*b[36]+b[13]*b[31]*b[12]+b[14]*b[32]*b[10]+b[14]*b[33]*b[9]-b[14]*b[4]*b[38]-b[14]*b[5]*b[37]-b[16]*b[6]*b[34]+b[16]*b[30]*b[10]+b[16]*b[32]*b[8]-b[26]*b[20]*b[10]+b[26]*b[5]*b[25]+b[26]*b[6]*b[24]+b[26]*b[7]*b[23]+b[14]*b[30]*b[12]+b[15]*b[32]*b[9]+b[15]*b[33]*b[8]-b[15]*b[4]*b[37]-b[15]*b[5]*b[36]+b[29]*b[5]*b[22]+b[29]*b[6]*b[21]-b[26]*b[18]*b[12]-b[26]*b[19]*b[11]-b[27]*b[20]*b[9]+b[27]*b[4]*b[25]+b[27]*b[5]*b[24]+b[27]*b[6]*b[23]+b[27]*b[7]*b[22]-b[27]*b[17]*b[12]-b[27]*b[18]*b[11]-b[27]*b[19]*b[10]-b[28]*b[20]*b[8]-b[16]*b[4]*b[36]-b[16]*b[5]*b[35]-b[29]*b[17]*b[10]-b[29]*b[18]*b[9]-b[29]*b[19]*b[8]+b[28]*b[4]*b[24]+b[28]*b[5]*b[23]+b[28]*b[6]*b[22]+b[28]*b[7]*b[21]-b[28]*b[17]*b[11]-b[28]*b[18]*b[10]-b[28]*b[19]*b[9]+b[29]*b[4]*b[23]-b[2]*b[22]*b[32]-b[2]*b[21]*b[33]-b[1]*b[24]*b[31]+b[0]*b[18]*b[38]-b[0]*b[24]*b[32]+b[0]*b[19]*b[37]+b[0]*b[20]*b[36]-b[0]*b[25]*b[31]-b[0]*b[23]*b[33]+b[1]*b[19]*b[36]-b[1]*b[22]*b[33]+b[1]*b[20]*b[35]+b[2]*b[19]*b[35]-b[2]*b[24]*b[30]-b[2]*b[23]*b[31]+b[2]*b[20]*b[34]+b[2]*b[17]*b[37]-b[1]*b[25]*b[30]+b[1]*b[18]*b[37]+b[1]*b[17]*b[38]-b[1]*b[23]*b[32]+b[2]*b[18]*b[36]);
c[4] = (-b[14]*b[6]*b[37]-b[14]*b[7]*b[36]+b[14]*b[31]*b[12]+b[3]*b[17]*b[37]-b[3]*b[23]*b[31]-b[3]*b[24]*b[30]-b[3]*b[21]*b[33]+b[3]*b[20]*b[34]+b[3]*b[19]*b[35]+b[3]*b[18]*b[36]-b[3]*b[22]*b[32]+b[13]*b[32]*b[12]+b[13]*b[33]*b[11]-b[15]*b[6]*b[36]-b[15]*b[7]*b[35]+b[15]*b[31]*b[11]+b[15]*b[30]*b[12]+b[16]*b[32]*b[9]+b[16]*b[33]*b[8]-b[13]*b[6]*b[38]-b[13]*b[7]*b[37]+b[14]*b[32]*b[11]+b[14]*b[33]*b[10]-b[14]*b[5]*b[38]-b[16]*b[6]*b[35]-b[16]*b[7]*b[34]+b[16]*b[30]*b[11]+b[16]*b[31]*b[10]-b[26]*b[19]*b[12]-b[26]*b[20]*b[11]+b[26]*b[6]*b[25]+b[26]*b[7]*b[24]+b[15]*b[32]*b[10]+b[15]*b[33]*b[9]-b[15]*b[4]*b[38]-b[15]*b[5]*b[37]+b[29]*b[5]*b[23]+b[29]*b[6]*b[22]+b[29]*b[7]*b[21]-b[27]*b[20]*b[10]+b[27]*b[5]*b[25]+b[27]*b[6]*b[24]+b[27]*b[7]*b[23]-b[27]*b[18]*b[12]-b[27]*b[19]*b[11]-b[28]*b[20]*b[9]-b[16]*b[4]*b[37]-b[16]*b[5]*b[36]+b[0]*b[19]*b[38]-b[0]*b[24]*b[33]+b[0]*b[20]*b[37]-b[29]*b[17]*b[11]-b[29]*b[18]*b[10]-b[29]*b[19]*b[9]+b[28]*b[4]*b[25]+b[28]*b[5]*b[24]+b[28]*b[6]*b[23]+b[28]*b[7]*b[22]-b[28]*b[17]*b[12]-b[28]*b[18]*b[11]-b[28]*b[19]*b[10]-b[29]*b[20]*b[8]+b[29]*b[4]*b[24]+b[2]*b[18]*b[37]-b[0]*b[25]*b[32]+b[1]*b[18]*b[38]-b[1]*b[24]*b[32]+b[1]*b[19]*b[37]+b[1]*b[20]*b[36]-b[1]*b[25]*b[31]+b[2]*b[17]*b[38]+b[2]*b[19]*b[36]-b[2]*b[24]*b[31]-b[2]*b[22]*b[33]-b[2]*b[23]*b[32]+b[2]*b[20]*b[35]-b[1]*b[23]*b[33]-b[2]*b[25]*b[30]);
c[3] = (-b[14]*b[6]*b[38]-b[14]*b[7]*b[37]+b[3]*b[19]*b[36]-b[3]*b[22]*b[33]+b[3]*b[20]*b[35]-b[3]*b[23]*b[32]-b[3]*b[25]*b[30]+b[3]*b[17]*b[38]+b[3]*b[18]*b[37]-b[3]*b[24]*b[31]-b[15]*b[6]*b[37]-b[15]*b[7]*b[36]+b[15]*b[31]*b[12]+b[16]*b[32]*b[10]+b[16]*b[33]*b[9]+b[13]*b[33]*b[12]-b[13]*b[7]*b[38]+b[14]*b[32]*b[12]+b[14]*b[33]*b[11]-b[16]*b[6]*b[36]-b[16]*b[7]*b[35]+b[16]*b[31]*b[11]+b[16]*b[30]*b[12]+b[15]*b[32]*b[11]+b[15]*b[33]*b[10]-b[15]*b[5]*b[38]+b[29]*b[5]*b[24]+b[29]*b[6]*b[23]-b[26]*b[20]*b[12]+b[26]*b[7]*b[25]-b[27]*b[19]*b[12]-b[27]*b[20]*b[11]+b[27]*b[6]*b[25]+b[27]*b[7]*b[24]-b[28]*b[20]*b[10]-b[16]*b[4]*b[38]-b[16]*b[5]*b[37]+b[29]*b[7]*b[22]-b[29]*b[17]*b[12]-b[29]*b[18]*b[11]-b[29]*b[19]*b[10]+b[28]*b[5]*b[25]+b[28]*b[6]*b[24]+b[28]*b[7]*b[23]-b[28]*b[18]*b[12]-b[28]*b[19]*b[11]-b[29]*b[20]*b[9]+b[29]*b[4]*b[25]-b[2]*b[24]*b[32]+b[0]*b[20]*b[38]-b[0]*b[25]*b[33]+b[1]*b[19]*b[38]-b[1]*b[24]*b[33]+b[1]*b[20]*b[37]-b[2]*b[25]*b[31]+b[2]*b[20]*b[36]-b[1]*b[25]*b[32]+b[2]*b[19]*b[37]+b[2]*b[18]*b[38]-b[2]*b[23]*b[33]);
c[2] = (b[3]*b[18]*b[38]-b[3]*b[24]*b[32]+b[3]*b[19]*b[37]+b[3]*b[20]*b[36]-b[3]*b[25]*b[31]-b[3]*b[23]*b[33]-b[15]*b[6]*b[38]-b[15]*b[7]*b[37]+b[16]*b[32]*b[11]+b[16]*b[33]*b[10]-b[16]*b[5]*b[38]-b[16]*b[6]*b[37]-b[16]*b[7]*b[36]+b[16]*b[31]*b[12]+b[14]*b[33]*b[12]-b[14]*b[7]*b[38]+b[15]*b[32]*b[12]+b[15]*b[33]*b[11]+b[29]*b[5]*b[25]+b[29]*b[6]*b[24]-b[27]*b[20]*b[12]+b[27]*b[7]*b[25]-b[28]*b[19]*b[12]-b[28]*b[20]*b[11]+b[29]*b[7]*b[23]-b[29]*b[18]*b[12]-b[29]*b[19]*b[11]+b[28]*b[6]*b[25]+b[28]*b[7]*b[24]-b[29]*b[20]*b[10]+b[2]*b[19]*b[38]-b[1]*b[25]*b[33]+b[2]*b[20]*b[37]-b[2]*b[24]*b[33]-b[2]*b[25]*b[32]+b[1]*b[20]*b[38]);
c[1] = (b[29]*b[7]*b[24]-b[29]*b[20]*b[11]+b[2]*b[20]*b[38]-b[2]*b[25]*b[33]-b[28]*b[20]*b[12]+b[28]*b[7]*b[25]-b[29]*b[19]*b[12]-b[3]*b[24]*b[33]+b[15]*b[33]*b[12]+b[3]*b[19]*b[38]-b[16]*b[6]*b[38]+b[3]*b[20]*b[37]+b[16]*b[32]*b[12]+b[29]*b[6]*b[25]-b[16]*b[7]*b[37]-b[3]*b[25]*b[32]-b[15]*b[7]*b[38]+b[16]*b[33]*b[11]);
c[0] = -b[29]*b[20]*b[12]+b[29]*b[7]*b[25]+b[16]*b[33]*b[12]-b[16]*b[7]*b[38]+b[3]*b[20]*b[38]-b[3]*b[25]*b[33];
std::vector<Complex<double> > roots;
solvePoly(coeffs, roots);
std::vector<double> xs, ys, zs;
int count = 0;
Mat ematrix(10*3, 3, CV_64F);
double* e = ematrix.ptr<double>();
for (size_t i = 0; i < roots.size(); i++)
{
if (fabs(roots[i].im) > 1e-10) continue;
double z1 = roots[i].re;
double z2 = z1 * z1;
double z3 = z2 * z1;
double z4 = z3 * z1;
double bz[3][3];
for (int j = 0; j < 3; j++)
{
const double * br = b + j * 13;
bz[j][0] = br[0] * z3 + br[1] * z2 + br[2] * z1 + br[3];
bz[j][1] = br[4] * z3 + br[5] * z2 + br[6] * z1 + br[7];
bz[j][2] = br[8] * z4 + br[9] * z3 + br[10] * z2 + br[11] * z1 + br[12];
}
Mat Bz(3, 3, CV_64F, bz);
cv::Mat xy1;
SVD::solveZ(Bz, xy1);
if (fabs(xy1.at<double>(2)) < 1e-10) continue;
xs.push_back(xy1.at<double>(0) / xy1.at<double>(2));
ys.push_back(xy1.at<double>(1) / xy1.at<double>(2));
zs.push_back(z1);
cv::Mat Evec = EE.col(0) * xs.back() + EE.col(1) * ys.back() + EE.col(2) * zs.back() + EE.col(3);
Evec /= norm(Evec);
memcpy(e + count * 9, Evec.data, 9 * sizeof(double));
count++;
}
ematrix.rowRange(0, count*3).copyTo(_model);
return count;
}
protected:
void getCoeffMat(double *e, double *A) const
{
double ep2[36], ep3[36];
for (int i = 0; i < 36; i++)
{
ep2[i] = e[i] * e[i];
ep3[i] = ep2[i] * e[i];
}
A[0]=e[33]*e[28]*e[32]-e[33]*e[31]*e[29]+e[30]*e[34]*e[29]-e[30]*e[28]*e[35]-e[27]*e[32]*e[34]+e[27]*e[31]*e[35];
A[146]=.5000000000*e[6]*ep2[8]-.5000000000*e[6]*ep2[5]+.5000000000*ep3[6]+.5000000000*e[6]*ep2[7]-.5000000000*e[6]*ep2[4]+e[0]*e[2]*e[8]+e[3]*e[4]*e[7]+e[3]*e[5]*e[8]+e[0]*e[1]*e[7]-.5000000000*e[6]*ep2[1]-.5000000000*e[6]*ep2[2]+.5000000000*ep2[0]*e[6]+.5000000000*ep2[3]*e[6];
A[1]=e[30]*e[34]*e[2]+e[33]*e[1]*e[32]-e[3]*e[28]*e[35]+e[0]*e[31]*e[35]+e[3]*e[34]*e[29]-e[30]*e[1]*e[35]+e[27]*e[31]*e[8]-e[27]*e[32]*e[7]-e[30]*e[28]*e[8]-e[33]*e[31]*e[2]-e[0]*e[32]*e[34]+e[6]*e[28]*e[32]-e[33]*e[4]*e[29]+e[33]*e[28]*e[5]+e[30]*e[7]*e[29]+e[27]*e[4]*e[35]-e[27]*e[5]*e[34]-e[6]*e[31]*e[29];
A[147]=e[9]*e[27]*e[15]+e[9]*e[29]*e[17]+e[9]*e[11]*e[35]+e[9]*e[28]*e[16]+e[9]*e[10]*e[34]+e[27]*e[11]*e[17]+e[27]*e[10]*e[16]+e[12]*e[30]*e[15]+e[12]*e[32]*e[17]+e[12]*e[14]*e[35]+e[12]*e[31]*e[16]+e[12]*e[13]*e[34]+e[30]*e[14]*e[17]+e[30]*e[13]*e[16]+e[15]*e[35]*e[17]+e[15]*e[34]*e[16]-1.*e[15]*e[28]*e[10]-1.*e[15]*e[31]*e[13]-1.*e[15]*e[32]*e[14]-1.*e[15]*e[29]*e[11]+.5000000000*ep2[9]*e[33]+.5000000000*e[33]*ep2[16]-.5000000000*e[33]*ep2[11]+.5000000000*e[33]*ep2[12]+1.500000000*e[33]*ep2[15]+.5000000000*e[33]*ep2[17]-.5000000000*e[33]*ep2[10]-.5000000000*e[33]*ep2[14]-.5000000000*e[33]*ep2[13];
A[2]=-e[33]*e[22]*e[29]-e[33]*e[31]*e[20]-e[27]*e[32]*e[25]+e[27]*e[22]*e[35]-e[27]*e[23]*e[34]+e[27]*e[31]*e[26]+e[33]*e[28]*e[23]-e[21]*e[28]*e[35]+e[30]*e[25]*e[29]+e[24]*e[28]*e[32]-e[24]*e[31]*e[29]+e[18]*e[31]*e[35]-e[30]*e[28]*e[26]-e[30]*e[19]*e[35]+e[21]*e[34]*e[29]+e[33]*e[19]*e[32]-e[18]*e[32]*e[34]+e[30]*e[34]*e[20];
A[144]=e[18]*e[2]*e[17]+e[3]*e[21]*e[15]+e[3]*e[12]*e[24]+e[3]*e[23]*e[17]+e[3]*e[14]*e[26]+e[3]*e[22]*e[16]+e[3]*e[13]*e[25]+3.*e[6]*e[24]*e[15]+e[6]*e[26]*e[17]+e[6]*e[25]*e[16]+e[0]*e[20]*e[17]+e[0]*e[11]*e[26]+e[0]*e[19]*e[16]+e[0]*e[10]*e[25]+e[15]*e[26]*e[8]-1.*e[15]*e[20]*e[2]-1.*e[15]*e[19]*e[1]-1.*e[15]*e[22]*e[4]+e[15]*e[25]*e[7]-1.*e[15]*e[23]*e[5]+e[12]*e[21]*e[6]+e[12]*e[22]*e[7]+e[12]*e[4]*e[25]+e[12]*e[23]*e[8]+e[12]*e[5]*e[26]-1.*e[24]*e[11]*e[2]-1.*e[24]*e[10]*e[1]-1.*e[24]*e[13]*e[4]+e[24]*e[16]*e[7]-1.*e[24]*e[14]*e[5]+e[24]*e[17]*e[8]+e[21]*e[13]*e[7]+e[21]*e[4]*e[16]+e[21]*e[14]*e[8]+e[21]*e[5]*e[17]-1.*e[6]*e[23]*e[14]-1.*e[6]*e[20]*e[11]-1.*e[6]*e[19]*e[10]-1.*e[6]*e[22]*e[13]+e[9]*e[18]*e[6]+e[9]*e[0]*e[24]+e[9]*e[19]*e[7]+e[9]*e[1]*e[25]+e[9]*e[20]*e[8]+e[9]*e[2]*e[26]+e[18]*e[0]*e[15]+e[18]*e[10]*e[7]+e[18]*e[1]*e[16]+e[18]*e[11]*e[8];
A[3]=e[33]*e[10]*e[32]+e[33]*e[28]*e[14]-e[33]*e[13]*e[29]-e[33]*e[31]*e[11]+e[9]*e[31]*e[35]-e[9]*e[32]*e[34]+e[27]*e[13]*e[35]-e[27]*e[32]*e[16]+e[27]*e[31]*e[17]-e[27]*e[14]*e[34]+e[12]*e[34]*e[29]-e[12]*e[28]*e[35]+e[30]*e[34]*e[11]+e[30]*e[16]*e[29]-e[30]*e[10]*e[35]-e[30]*e[28]*e[17]+e[15]*e[28]*e[32]-e[15]*e[31]*e[29];
A[145]=e[0]*e[27]*e[6]+e[0]*e[28]*e[7]+e[0]*e[1]*e[34]+e[0]*e[29]*e[8]+e[0]*e[2]*e[35]+e[6]*e[34]*e[7]-1.*e[6]*e[32]*e[5]+e[6]*e[30]*e[3]+e[6]*e[35]*e[8]-1.*e[6]*e[29]*e[2]-1.*e[6]*e[28]*e[1]-1.*e[6]*e[31]*e[4]+e[27]*e[1]*e[7]+e[27]*e[2]*e[8]+e[3]*e[31]*e[7]+e[3]*e[4]*e[34]+e[3]*e[32]*e[8]+e[3]*e[5]*e[35]+e[30]*e[4]*e[7]+e[30]*e[5]*e[8]+.5000000000*ep2[0]*e[33]+1.500000000*e[33]*ep2[6]-.5000000000*e[33]*ep2[4]-.5000000000*e[33]*ep2[5]-.5000000000*e[33]*ep2[1]+.5000000000*e[33]*ep2[7]+.5000000000*e[33]*ep2[3]-.5000000000*e[33]*ep2[2]+.5000000000*e[33]*ep2[8];
A[4]=-e[0]*e[23]*e[16]+e[9]*e[4]*e[26]+e[9]*e[22]*e[8]-e[9]*e[5]*e[25]-e[9]*e[23]*e[7]+e[18]*e[4]*e[17]+e[18]*e[13]*e[8]-e[18]*e[5]*e[16]-e[18]*e[14]*e[7]+e[3]*e[16]*e[20]+e[3]*e[25]*e[11]-e[3]*e[10]*e[26]-e[3]*e[19]*e[17]+e[12]*e[7]*e[20]+e[12]*e[25]*e[2]-e[12]*e[1]*e[26]-e[12]*e[19]*e[8]+e[21]*e[7]*e[11]+e[21]*e[16]*e[2]-e[21]*e[1]*e[17]-e[21]*e[10]*e[8]+e[6]*e[10]*e[23]+e[6]*e[19]*e[14]-e[6]*e[13]*e[20]-e[6]*e[22]*e[11]+e[15]*e[1]*e[23]+e[15]*e[19]*e[5]-e[15]*e[4]*e[20]-e[15]*e[22]*e[2]+e[24]*e[1]*e[14]+e[24]*e[10]*e[5]-e[24]*e[4]*e[11]-e[24]*e[13]*e[2]+e[0]*e[13]*e[26]+e[0]*e[22]*e[17]-e[0]*e[14]*e[25];
A[150]=e[18]*e[19]*e[25]+.5000000000*ep3[24]-.5000000000*e[24]*ep2[23]+e[18]*e[20]*e[26]+e[21]*e[22]*e[25]+e[21]*e[23]*e[26]-.5000000000*e[24]*ep2[19]+.5000000000*ep2[21]*e[24]+.5000000000*e[24]*ep2[26]-.5000000000*e[24]*ep2[20]+.5000000000*ep2[18]*e[24]-.5000000000*e[24]*ep2[22]+.5000000000*e[24]*ep2[25];
A[5]=-e[3]*e[1]*e[35]-e[0]*e[32]*e[7]+e[27]*e[4]*e[8]+e[33]*e[1]*e[5]-e[33]*e[4]*e[2]+e[0]*e[4]*e[35]+e[3]*e[34]*e[2]-e[30]*e[1]*e[8]+e[30]*e[7]*e[2]-e[6]*e[4]*e[29]+e[3]*e[7]*e[29]+e[6]*e[1]*e[32]-e[0]*e[5]*e[34]-e[3]*e[28]*e[8]+e[0]*e[31]*e[8]+e[6]*e[28]*e[5]-e[6]*e[31]*e[2]-e[27]*e[5]*e[7];
A[151]=e[33]*e[16]*e[7]-1.*e[33]*e[14]*e[5]+e[33]*e[17]*e[8]+e[30]*e[13]*e[7]+e[30]*e[4]*e[16]+e[30]*e[14]*e[8]+e[30]*e[5]*e[17]+e[6]*e[27]*e[9]-1.*e[6]*e[28]*e[10]-1.*e[6]*e[31]*e[13]-1.*e[6]*e[32]*e[14]-1.*e[6]*e[29]*e[11]+e[9]*e[28]*e[7]+e[9]*e[1]*e[34]+e[9]*e[29]*e[8]+e[9]*e[2]*e[35]+e[27]*e[10]*e[7]+e[27]*e[1]*e[16]+e[27]*e[11]*e[8]+e[27]*e[2]*e[17]+e[3]*e[30]*e[15]+e[3]*e[12]*e[33]+e[3]*e[32]*e[17]+e[3]*e[14]*e[35]+e[3]*e[31]*e[16]+e[3]*e[13]*e[34]+3.*e[6]*e[33]*e[15]+e[6]*e[35]*e[17]+e[6]*e[34]*e[16]+e[0]*e[27]*e[15]+e[0]*e[9]*e[33]+e[0]*e[29]*e[17]+e[0]*e[11]*e[35]+e[0]*e[28]*e[16]+e[0]*e[10]*e[34]+e[15]*e[34]*e[7]-1.*e[15]*e[32]*e[5]+e[15]*e[35]*e[8]-1.*e[15]*e[29]*e[2]-1.*e[15]*e[28]*e[1]-1.*e[15]*e[31]*e[4]+e[12]*e[30]*e[6]+e[12]*e[31]*e[7]+e[12]*e[4]*e[34]+e[12]*e[32]*e[8]+e[12]*e[5]*e[35]-1.*e[33]*e[11]*e[2]-1.*e[33]*e[10]*e[1]-1.*e[33]*e[13]*e[4];
A[6]=e[6]*e[1]*e[5]-e[6]*e[4]*e[2]+e[3]*e[7]*e[2]+e[0]*e[4]*e[8]-e[0]*e[5]*e[7]-e[3]*e[1]*e[8];
A[148]=.5000000000*ep3[15]+e[9]*e[10]*e[16]-.5000000000*e[15]*ep2[11]+e[9]*e[11]*e[17]+.5000000000*ep2[12]*e[15]+.5000000000*e[15]*ep2[16]+.5000000000*e[15]*ep2[17]-.5000000000*e[15]*ep2[13]+.5000000000*ep2[9]*e[15]+e[12]*e[14]*e[17]-.5000000000*e[15]*ep2[10]-.5000000000*e[15]*ep2[14]+e[12]*e[13]*e[16];
A[7]=e[15]*e[28]*e[14]-e[15]*e[13]*e[29]-e[15]*e[31]*e[11]+e[33]*e[10]*e[14]-e[33]*e[13]*e[11]+e[9]*e[13]*e[35]-e[9]*e[32]*e[16]+e[9]*e[31]*e[17]-e[9]*e[14]*e[34]+e[27]*e[13]*e[17]-e[27]*e[14]*e[16]+e[12]*e[34]*e[11]+e[12]*e[16]*e[29]-e[12]*e[10]*e[35]-e[12]*e[28]*e[17]+e[30]*e[16]*e[11]-e[30]*e[10]*e[17]+e[15]*e[10]*e[32];
A[149]=e[18]*e[27]*e[24]+e[18]*e[28]*e[25]+e[18]*e[19]*e[34]+e[18]*e[29]*e[26]+e[18]*e[20]*e[35]+e[27]*e[19]*e[25]+e[27]*e[20]*e[26]+e[21]*e[30]*e[24]+e[21]*e[31]*e[25]+e[21]*e[22]*e[34]+e[21]*e[32]*e[26]+e[21]*e[23]*e[35]+e[30]*e[22]*e[25]+e[30]*e[23]*e[26]+e[24]*e[34]*e[25]+e[24]*e[35]*e[26]-1.*e[24]*e[29]*e[20]-1.*e[24]*e[31]*e[22]-1.*e[24]*e[32]*e[23]-1.*e[24]*e[28]*e[19]+1.500000000*e[33]*ep2[24]+.5000000000*e[33]*ep2[25]+.5000000000*e[33]*ep2[26]-.5000000000*e[33]*ep2[23]-.5000000000*e[33]*ep2[19]-.5000000000*e[33]*ep2[20]-.5000000000*e[33]*ep2[22]+.5000000000*ep2[18]*e[33]+.5000000000*ep2[21]*e[33];
A[9]=e[21]*e[25]*e[29]-e[27]*e[23]*e[25]+e[24]*e[19]*e[32]-e[21]*e[28]*e[26]-e[21]*e[19]*e[35]+e[18]*e[31]*e[26]-e[30]*e[19]*e[26]-e[24]*e[31]*e[20]+e[24]*e[28]*e[23]+e[27]*e[22]*e[26]+e[30]*e[25]*e[20]-e[33]*e[22]*e[20]+e[33]*e[19]*e[23]+e[21]*e[34]*e[20]-e[18]*e[23]*e[34]-e[24]*e[22]*e[29]-e[18]*e[32]*e[25]+e[18]*e[22]*e[35];
A[155]=e[12]*e[14]*e[8]+e[12]*e[5]*e[17]+e[15]*e[16]*e[7]+e[15]*e[17]*e[8]+e[0]*e[11]*e[17]+e[0]*e[9]*e[15]+e[0]*e[10]*e[16]+e[3]*e[14]*e[17]+e[3]*e[13]*e[16]+e[9]*e[10]*e[7]+e[9]*e[1]*e[16]+e[9]*e[11]*e[8]+e[9]*e[2]*e[17]-1.*e[15]*e[11]*e[2]-1.*e[15]*e[10]*e[1]-1.*e[15]*e[13]*e[4]-1.*e[15]*e[14]*e[5]+e[12]*e[3]*e[15]+e[12]*e[13]*e[7]+e[12]*e[4]*e[16]+.5000000000*ep2[12]*e[6]+1.500000000*ep2[15]*e[6]+.5000000000*e[6]*ep2[17]+.5000000000*e[6]*ep2[16]+.5000000000*e[6]*ep2[9]-.5000000000*e[6]*ep2[11]-.5000000000*e[6]*ep2[10]-.5000000000*e[6]*ep2[14]-.5000000000*e[6]*ep2[13];
A[8]=-e[9]*e[14]*e[16]-e[12]*e[10]*e[17]+e[9]*e[13]*e[17]-e[15]*e[13]*e[11]+e[15]*e[10]*e[14]+e[12]*e[16]*e[11];
A[154]=e[21]*e[14]*e[17]+e[21]*e[13]*e[16]+e[15]*e[26]*e[17]+e[15]*e[25]*e[16]-1.*e[15]*e[23]*e[14]-1.*e[15]*e[20]*e[11]-1.*e[15]*e[19]*e[10]-1.*e[15]*e[22]*e[13]+e[9]*e[20]*e[17]+e[9]*e[11]*e[26]+e[9]*e[19]*e[16]+e[9]*e[10]*e[25]+.5000000000*ep2[12]*e[24]+1.500000000*e[24]*ep2[15]+.5000000000*e[24]*ep2[17]+.5000000000*e[24]*ep2[16]+.5000000000*ep2[9]*e[24]-.5000000000*e[24]*ep2[11]-.5000000000*e[24]*ep2[10]-.5000000000*e[24]*ep2[14]-.5000000000*e[24]*ep2[13]+e[18]*e[11]*e[17]+e[18]*e[9]*e[15]+e[18]*e[10]*e[16]+e[12]*e[21]*e[15]+e[12]*e[23]*e[17]+e[12]*e[14]*e[26]+e[12]*e[22]*e[16]+e[12]*e[13]*e[25];
A[11]=-e[9]*e[5]*e[34]+e[9]*e[31]*e[8]-e[9]*e[32]*e[7]+e[27]*e[4]*e[17]+e[27]*e[13]*e[8]-e[27]*e[5]*e[16]-e[27]*e[14]*e[7]+e[0]*e[13]*e[35]-e[0]*e[32]*e[16]+e[0]*e[31]*e[17]-e[0]*e[14]*e[34]+e[9]*e[4]*e[35]+e[6]*e[10]*e[32]+e[6]*e[28]*e[14]-e[6]*e[13]*e[29]-e[6]*e[31]*e[11]+e[15]*e[1]*e[32]+e[3]*e[34]*e[11]+e[3]*e[16]*e[29]-e[3]*e[10]*e[35]-e[3]*e[28]*e[17]-e[12]*e[1]*e[35]+e[12]*e[7]*e[29]+e[12]*e[34]*e[2]-e[12]*e[28]*e[8]+e[15]*e[28]*e[5]-e[15]*e[4]*e[29]-e[15]*e[31]*e[2]+e[33]*e[1]*e[14]+e[33]*e[10]*e[5]-e[33]*e[4]*e[11]-e[33]*e[13]*e[2]+e[30]*e[7]*e[11]+e[30]*e[16]*e[2]-e[30]*e[1]*e[17]-e[30]*e[10]*e[8];
A[153]=e[21]*e[31]*e[7]+e[21]*e[4]*e[34]+e[21]*e[32]*e[8]+e[21]*e[5]*e[35]+e[30]*e[22]*e[7]+e[30]*e[4]*e[25]+e[30]*e[23]*e[8]+e[30]*e[5]*e[26]+3.*e[24]*e[33]*e[6]+e[24]*e[34]*e[7]+e[24]*e[35]*e[8]+e[33]*e[25]*e[7]+e[33]*e[26]*e[8]+e[0]*e[27]*e[24]+e[0]*e[18]*e[33]+e[0]*e[28]*e[25]+e[0]*e[19]*e[34]+e[0]*e[29]*e[26]+e[0]*e[20]*e[35]+e[18]*e[27]*e[6]+e[18]*e[28]*e[7]+e[18]*e[1]*e[34]+e[18]*e[29]*e[8]+e[18]*e[2]*e[35]+e[27]*e[19]*e[7]+e[27]*e[1]*e[25]+e[27]*e[20]*e[8]+e[27]*e[2]*e[26]+e[3]*e[30]*e[24]+e[3]*e[21]*e[33]+e[3]*e[31]*e[25]+e[3]*e[22]*e[34]+e[3]*e[32]*e[26]+e[3]*e[23]*e[35]+e[6]*e[30]*e[21]-1.*e[6]*e[29]*e[20]+e[6]*e[35]*e[26]-1.*e[6]*e[31]*e[22]-1.*e[6]*e[32]*e[23]-1.*e[6]*e[28]*e[19]+e[6]*e[34]*e[25]-1.*e[24]*e[32]*e[5]-1.*e[24]*e[29]*e[2]-1.*e[24]*e[28]*e[1]-1.*e[24]*e[31]*e[4]-1.*e[33]*e[20]*e[2]-1.*e[33]*e[19]*e[1]-1.*e[33]*e[22]*e[4]-1.*e[33]*e[23]*e[5];
A[10]=e[21]*e[25]*e[20]-e[21]*e[19]*e[26]+e[18]*e[22]*e[26]-e[18]*e[23]*e[25]-e[24]*e[22]*e[20]+e[24]*e[19]*e[23];
A[152]=e[3]*e[4]*e[25]+e[3]*e[23]*e[8]+e[3]*e[5]*e[26]+e[21]*e[4]*e[7]+e[21]*e[5]*e[8]+e[6]*e[25]*e[7]+e[6]*e[26]*e[8]+e[0]*e[19]*e[7]+e[0]*e[1]*e[25]+e[0]*e[20]*e[8]+e[0]*e[2]*e[26]-1.*e[6]*e[20]*e[2]-1.*e[6]*e[19]*e[1]-1.*e[6]*e[22]*e[4]-1.*e[6]*e[23]*e[5]+e[18]*e[1]*e[7]+e[18]*e[0]*e[6]+e[18]*e[2]*e[8]+e[3]*e[21]*e[6]+e[3]*e[22]*e[7]-.5000000000*e[24]*ep2[4]+.5000000000*e[24]*ep2[0]+1.500000000*e[24]*ep2[6]-.5000000000*e[24]*ep2[5]-.5000000000*e[24]*ep2[1]+.5000000000*e[24]*ep2[7]+.5000000000*e[24]*ep2[3]-.5000000000*e[24]*ep2[2]+.5000000000*e[24]*ep2[8];
A[13]=e[6]*e[28]*e[23]-e[6]*e[22]*e[29]-e[6]*e[31]*e[20]-e[3]*e[19]*e[35]+e[3]*e[34]*e[20]+e[3]*e[25]*e[29]-e[21]*e[1]*e[35]+e[21]*e[7]*e[29]+e[21]*e[34]*e[2]+e[24]*e[1]*e[32]+e[24]*e[28]*e[5]-e[24]*e[4]*e[29]-e[24]*e[31]*e[2]+e[33]*e[1]*e[23]+e[33]*e[19]*e[5]-e[33]*e[4]*e[20]-e[33]*e[22]*e[2]-e[21]*e[28]*e[8]+e[30]*e[7]*e[20]+e[30]*e[25]*e[2]-e[30]*e[1]*e[26]+e[18]*e[4]*e[35]-e[18]*e[5]*e[34]+e[18]*e[31]*e[8]-e[18]*e[32]*e[7]+e[27]*e[4]*e[26]+e[27]*e[22]*e[8]-e[27]*e[5]*e[25]-e[27]*e[23]*e[7]-e[3]*e[28]*e[26]-e[0]*e[32]*e[25]+e[0]*e[22]*e[35]-e[0]*e[23]*e[34]+e[0]*e[31]*e[26]-e[30]*e[19]*e[8]+e[6]*e[19]*e[32];
A[159]=.5000000000*ep2[18]*e[6]+.5000000000*ep2[21]*e[6]+1.500000000*ep2[24]*e[6]+.5000000000*e[6]*ep2[26]-.5000000000*e[6]*ep2[23]-.5000000000*e[6]*ep2[19]-.5000000000*e[6]*ep2[20]-.5000000000*e[6]*ep2[22]+.5000000000*e[6]*ep2[25]+e[21]*e[3]*e[24]+e[18]*e[20]*e[8]+e[21]*e[4]*e[25]+e[18]*e[19]*e[7]+e[18]*e[1]*e[25]+e[21]*e[22]*e[7]+e[21]*e[23]*e[8]+e[18]*e[0]*e[24]+e[18]*e[2]*e[26]+e[21]*e[5]*e[26]+e[24]*e[26]*e[8]-1.*e[24]*e[20]*e[2]-1.*e[24]*e[19]*e[1]-1.*e[24]*e[22]*e[4]+e[24]*e[25]*e[7]-1.*e[24]*e[23]*e[5]+e[0]*e[19]*e[25]+e[0]*e[20]*e[26]+e[3]*e[22]*e[25]+e[3]*e[23]*e[26];
A[12]=e[18]*e[4]*e[8]+e[3]*e[7]*e[20]+e[3]*e[25]*e[2]-e[3]*e[1]*e[26]-e[18]*e[5]*e[7]+e[6]*e[1]*e[23]+e[6]*e[19]*e[5]-e[6]*e[4]*e[20]-e[6]*e[22]*e[2]+e[21]*e[7]*e[2]-e[21]*e[1]*e[8]+e[24]*e[1]*e[5]-e[24]*e[4]*e[2]-e[3]*e[19]*e[8]+e[0]*e[4]*e[26]+e[0]*e[22]*e[8]-e[0]*e[5]*e[25]-e[0]*e[23]*e[7];
A[158]=e[9]*e[1]*e[7]+e[9]*e[0]*e[6]+e[9]*e[2]*e[8]+e[3]*e[12]*e[6]+e[3]*e[13]*e[7]+e[3]*e[4]*e[16]+e[3]*e[14]*e[8]+e[3]*e[5]*e[17]+e[12]*e[4]*e[7]+e[12]*e[5]*e[8]+e[6]*e[16]*e[7]+e[6]*e[17]*e[8]-1.*e[6]*e[11]*e[2]-1.*e[6]*e[10]*e[1]-1.*e[6]*e[13]*e[4]-1.*e[6]*e[14]*e[5]+e[0]*e[10]*e[7]+e[0]*e[1]*e[16]+e[0]*e[11]*e[8]+e[0]*e[2]*e[17]+.5000000000*ep2[3]*e[15]+1.500000000*e[15]*ep2[6]+.5000000000*e[15]*ep2[7]+.5000000000*e[15]*ep2[8]+.5000000000*ep2[0]*e[15]-.5000000000*e[15]*ep2[4]-.5000000000*e[15]*ep2[5]-.5000000000*e[15]*ep2[1]-.5000000000*e[15]*ep2[2];
A[15]=-e[15]*e[13]*e[2]-e[6]*e[13]*e[11]-e[15]*e[4]*e[11]+e[12]*e[16]*e[2]-e[3]*e[10]*e[17]+e[3]*e[16]*e[11]+e[0]*e[13]*e[17]-e[0]*e[14]*e[16]+e[15]*e[1]*e[14]-e[12]*e[10]*e[8]+e[9]*e[4]*e[17]+e[9]*e[13]*e[8]-e[9]*e[5]*e[16]-e[9]*e[14]*e[7]+e[15]*e[10]*e[5]+e[12]*e[7]*e[11]+e[6]*e[10]*e[14]-e[12]*e[1]*e[17];
A[157]=e[12]*e[30]*e[24]+e[12]*e[21]*e[33]+e[12]*e[31]*e[25]+e[12]*e[22]*e[34]+e[12]*e[32]*e[26]+e[12]*e[23]*e[35]+e[9]*e[27]*e[24]+e[9]*e[18]*e[33]+e[9]*e[28]*e[25]+e[9]*e[19]*e[34]+e[9]*e[29]*e[26]+e[9]*e[20]*e[35]+e[21]*e[30]*e[15]+e[21]*e[32]*e[17]+e[21]*e[14]*e[35]+e[21]*e[31]*e[16]+e[21]*e[13]*e[34]+e[30]*e[23]*e[17]+e[30]*e[14]*e[26]+e[30]*e[22]*e[16]+e[30]*e[13]*e[25]+e[15]*e[27]*e[18]+3.*e[15]*e[33]*e[24]-1.*e[15]*e[29]*e[20]+e[15]*e[35]*e[26]-1.*e[15]*e[31]*e[22]-1.*e[15]*e[32]*e[23]-1.*e[15]*e[28]*e[19]+e[15]*e[34]*e[25]+e[18]*e[29]*e[17]+e[18]*e[11]*e[35]+e[18]*e[28]*e[16]+e[18]*e[10]*e[34]+e[27]*e[20]*e[17]+e[27]*e[11]*e[26]+e[27]*e[19]*e[16]+e[27]*e[10]*e[25]-1.*e[24]*e[28]*e[10]-1.*e[24]*e[31]*e[13]-1.*e[24]*e[32]*e[14]+e[24]*e[34]*e[16]+e[24]*e[35]*e[17]-1.*e[24]*e[29]*e[11]-1.*e[33]*e[23]*e[14]+e[33]*e[25]*e[16]+e[33]*e[26]*e[17]-1.*e[33]*e[20]*e[11]-1.*e[33]*e[19]*e[10]-1.*e[33]*e[22]*e[13];
A[14]=e[18]*e[13]*e[17]+e[9]*e[13]*e[26]+e[9]*e[22]*e[17]-e[9]*e[14]*e[25]-e[18]*e[14]*e[16]-e[15]*e[13]*e[20]-e[15]*e[22]*e[11]+e[12]*e[16]*e[20]+e[12]*e[25]*e[11]-e[12]*e[10]*e[26]-e[12]*e[19]*e[17]+e[21]*e[16]*e[11]-e[21]*e[10]*e[17]-e[9]*e[23]*e[16]+e[24]*e[10]*e[14]-e[24]*e[13]*e[11]+e[15]*e[10]*e[23]+e[15]*e[19]*e[14];
A[156]=e[21]*e[12]*e[24]+e[21]*e[23]*e[17]+e[21]*e[14]*e[26]+e[21]*e[22]*e[16]+e[21]*e[13]*e[25]+e[24]*e[26]*e[17]+e[24]*e[25]*e[16]+e[9]*e[19]*e[25]+e[9]*e[18]*e[24]+e[9]*e[20]*e[26]+e[12]*e[22]*e[25]+e[12]*e[23]*e[26]+e[18]*e[20]*e[17]+e[18]*e[11]*e[26]+e[18]*e[19]*e[16]+e[18]*e[10]*e[25]-1.*e[24]*e[23]*e[14]-1.*e[24]*e[20]*e[11]-1.*e[24]*e[19]*e[10]-1.*e[24]*e[22]*e[13]+.5000000000*ep2[21]*e[15]+1.500000000*ep2[24]*e[15]+.5000000000*e[15]*ep2[25]+.5000000000*e[15]*ep2[26]+.5000000000*e[15]*ep2[18]-.5000000000*e[15]*ep2[23]-.5000000000*e[15]*ep2[19]-.5000000000*e[15]*ep2[20]-.5000000000*e[15]*ep2[22];
A[18]=e[6]*e[1]*e[14]+e[15]*e[1]*e[5]-e[0]*e[5]*e[16]-e[0]*e[14]*e[7]+e[0]*e[13]*e[8]-e[15]*e[4]*e[2]+e[12]*e[7]*e[2]+e[6]*e[10]*e[5]+e[3]*e[7]*e[11]-e[6]*e[4]*e[11]+e[3]*e[16]*e[2]-e[6]*e[13]*e[2]-e[3]*e[1]*e[17]-e[9]*e[5]*e[7]-e[3]*e[10]*e[8]-e[12]*e[1]*e[8]+e[0]*e[4]*e[17]+e[9]*e[4]*e[8];
A[128]=-.5000000000*e[14]*ep2[16]-.5000000000*e[14]*ep2[10]-.5000000000*e[14]*ep2[9]+e[11]*e[9]*e[12]+.5000000000*ep3[14]+e[17]*e[13]*e[16]+.5000000000*e[14]*ep2[12]+e[11]*e[10]*e[13]-.5000000000*e[14]*ep2[15]+.5000000000*e[14]*ep2[17]+e[17]*e[12]*e[15]+.5000000000*ep2[11]*e[14]+.5000000000*e[14]*ep2[13];
A[19]=-e[21]*e[19]*e[8]+e[18]*e[4]*e[26]-e[18]*e[5]*e[25]-e[18]*e[23]*e[7]+e[21]*e[25]*e[2]-e[21]*e[1]*e[26]+e[6]*e[19]*e[23]+e[18]*e[22]*e[8]-e[0]*e[23]*e[25]-e[6]*e[22]*e[20]+e[24]*e[1]*e[23]+e[24]*e[19]*e[5]-e[24]*e[4]*e[20]-e[24]*e[22]*e[2]+e[3]*e[25]*e[20]-e[3]*e[19]*e[26]+e[0]*e[22]*e[26]+e[21]*e[7]*e[20];
A[129]=.5000000000*ep2[20]*e[32]+1.500000000*e[32]*ep2[23]+.5000000000*e[32]*ep2[22]+.5000000000*e[32]*ep2[21]+.5000000000*e[32]*ep2[26]-.5000000000*e[32]*ep2[18]-.5000000000*e[32]*ep2[19]-.5000000000*e[32]*ep2[24]-.5000000000*e[32]*ep2[25]+e[20]*e[27]*e[21]+e[20]*e[18]*e[30]+e[20]*e[28]*e[22]+e[20]*e[19]*e[31]+e[20]*e[29]*e[23]+e[29]*e[19]*e[22]+e[29]*e[18]*e[21]+e[23]*e[30]*e[21]+e[23]*e[31]*e[22]+e[26]*e[30]*e[24]+e[26]*e[21]*e[33]+e[26]*e[31]*e[25]+e[26]*e[22]*e[34]+e[26]*e[23]*e[35]+e[35]*e[22]*e[25]+e[35]*e[21]*e[24]-1.*e[23]*e[27]*e[18]-1.*e[23]*e[33]*e[24]-1.*e[23]*e[28]*e[19]-1.*e[23]*e[34]*e[25];
A[16]=-e[9]*e[23]*e[25]-e[21]*e[10]*e[26]-e[21]*e[19]*e[17]-e[18]*e[23]*e[16]+e[18]*e[13]*e[26]+e[12]*e[25]*e[20]-e[12]*e[19]*e[26]-e[15]*e[22]*e[20]+e[21]*e[16]*e[20]+e[21]*e[25]*e[11]+e[24]*e[10]*e[23]+e[24]*e[19]*e[14]-e[24]*e[13]*e[20]-e[24]*e[22]*e[11]+e[18]*e[22]*e[17]-e[18]*e[14]*e[25]+e[9]*e[22]*e[26]+e[15]*e[19]*e[23];
A[130]=.5000000000*e[23]*ep2[21]+e[20]*e[19]*e[22]+e[20]*e[18]*e[21]+.5000000000*ep3[23]+e[26]*e[22]*e[25]+.5000000000*e[23]*ep2[26]-.5000000000*e[23]*ep2[18]+.5000000000*e[23]*ep2[22]-.5000000000*e[23]*ep2[19]+e[26]*e[21]*e[24]+.5000000000*ep2[20]*e[23]-.5000000000*e[23]*ep2[24]-.5000000000*e[23]*ep2[25];
A[17]=e[18]*e[13]*e[35]-e[18]*e[32]*e[16]+e[18]*e[31]*e[17]-e[18]*e[14]*e[34]+e[27]*e[13]*e[26]+e[27]*e[22]*e[17]-e[27]*e[14]*e[25]-e[27]*e[23]*e[16]-e[9]*e[32]*e[25]+e[9]*e[22]*e[35]-e[9]*e[23]*e[34]+e[9]*e[31]*e[26]+e[15]*e[19]*e[32]+e[15]*e[28]*e[23]-e[15]*e[22]*e[29]-e[15]*e[31]*e[20]+e[24]*e[10]*e[32]+e[24]*e[28]*e[14]-e[24]*e[13]*e[29]-e[24]*e[31]*e[11]+e[33]*e[10]*e[23]+e[33]*e[19]*e[14]-e[33]*e[13]*e[20]-e[33]*e[22]*e[11]+e[21]*e[16]*e[29]-e[21]*e[10]*e[35]-e[21]*e[28]*e[17]+e[30]*e[16]*e[20]+e[30]*e[25]*e[11]-e[30]*e[10]*e[26]-e[30]*e[19]*e[17]-e[12]*e[28]*e[26]-e[12]*e[19]*e[35]+e[12]*e[34]*e[20]+e[12]*e[25]*e[29]+e[21]*e[34]*e[11];
A[131]=-1.*e[32]*e[10]*e[1]+e[32]*e[13]*e[4]-1.*e[32]*e[16]*e[7]-1.*e[32]*e[15]*e[6]-1.*e[32]*e[9]*e[0]+e[32]*e[12]*e[3]+e[17]*e[30]*e[6]+e[17]*e[3]*e[33]+e[17]*e[31]*e[7]+e[17]*e[4]*e[34]+e[17]*e[5]*e[35]-1.*e[5]*e[27]*e[9]-1.*e[5]*e[28]*e[10]-1.*e[5]*e[33]*e[15]-1.*e[5]*e[34]*e[16]+e[5]*e[29]*e[11]+e[35]*e[12]*e[6]+e[35]*e[3]*e[15]+e[35]*e[13]*e[7]+e[35]*e[4]*e[16]+e[11]*e[27]*e[3]+e[11]*e[0]*e[30]+e[11]*e[28]*e[4]+e[11]*e[1]*e[31]+e[29]*e[9]*e[3]+e[29]*e[0]*e[12]+e[29]*e[10]*e[4]+e[29]*e[1]*e[13]+e[5]*e[30]*e[12]+3.*e[5]*e[32]*e[14]+e[5]*e[31]*e[13]+e[8]*e[30]*e[15]+e[8]*e[12]*e[33]+e[8]*e[32]*e[17]+e[8]*e[14]*e[35]+e[8]*e[31]*e[16]+e[8]*e[13]*e[34]+e[2]*e[27]*e[12]+e[2]*e[9]*e[30]+e[2]*e[29]*e[14]+e[2]*e[11]*e[32]+e[2]*e[28]*e[13]+e[2]*e[10]*e[31]-1.*e[14]*e[27]*e[0]-1.*e[14]*e[34]*e[7]-1.*e[14]*e[33]*e[6]+e[14]*e[30]*e[3]-1.*e[14]*e[28]*e[1]+e[14]*e[31]*e[4];
A[22]=.5000000000*e[18]*ep2[29]+.5000000000*e[18]*ep2[28]+.5000000000*e[18]*ep2[30]+.5000000000*e[18]*ep2[33]-.5000000000*e[18]*ep2[32]-.5000000000*e[18]*ep2[31]-.5000000000*e[18]*ep2[34]-.5000000000*e[18]*ep2[35]+1.500000000*e[18]*ep2[27]+e[27]*e[28]*e[19]+e[27]*e[29]*e[20]+e[21]*e[27]*e[30]+e[21]*e[29]*e[32]+e[21]*e[28]*e[31]+e[30]*e[28]*e[22]+e[30]*e[19]*e[31]+e[30]*e[29]*e[23]+e[30]*e[20]*e[32]+e[24]*e[27]*e[33]+e[24]*e[29]*e[35]+e[24]*e[28]*e[34]+e[33]*e[28]*e[25]+e[33]*e[19]*e[34]+e[33]*e[29]*e[26]+e[33]*e[20]*e[35]-1.*e[27]*e[35]*e[26]-1.*e[27]*e[31]*e[22]-1.*e[27]*e[32]*e[23]-1.*e[27]*e[34]*e[25];
A[132]=e[20]*e[1]*e[4]+e[20]*e[0]*e[3]+e[20]*e[2]*e[5]+e[5]*e[21]*e[3]+e[5]*e[22]*e[4]+e[8]*e[21]*e[6]+e[8]*e[3]*e[24]+e[8]*e[22]*e[7]+e[8]*e[4]*e[25]+e[8]*e[5]*e[26]+e[26]*e[4]*e[7]+e[26]*e[3]*e[6]+e[2]*e[18]*e[3]+e[2]*e[0]*e[21]+e[2]*e[19]*e[4]+e[2]*e[1]*e[22]-1.*e[5]*e[19]*e[1]-1.*e[5]*e[18]*e[0]-1.*e[5]*e[25]*e[7]-1.*e[5]*e[24]*e[6]+.5000000000*e[23]*ep2[4]-.5000000000*e[23]*ep2[0]-.5000000000*e[23]*ep2[6]+1.500000000*e[23]*ep2[5]-.5000000000*e[23]*ep2[1]-.5000000000*e[23]*ep2[7]+.5000000000*e[23]*ep2[3]+.5000000000*e[23]*ep2[2]+.5000000000*e[23]*ep2[8];
A[23]=1.500000000*e[9]*ep2[27]+.5000000000*e[9]*ep2[29]+.5000000000*e[9]*ep2[28]-.5000000000*e[9]*ep2[32]-.5000000000*e[9]*ep2[31]+.5000000000*e[9]*ep2[33]+.5000000000*e[9]*ep2[30]-.5000000000*e[9]*ep2[34]-.5000000000*e[9]*ep2[35]+e[33]*e[27]*e[15]+e[33]*e[29]*e[17]+e[33]*e[11]*e[35]+e[33]*e[28]*e[16]+e[33]*e[10]*e[34]+e[27]*e[29]*e[11]+e[27]*e[28]*e[10]+e[27]*e[30]*e[12]-1.*e[27]*e[31]*e[13]-1.*e[27]*e[32]*e[14]-1.*e[27]*e[34]*e[16]-1.*e[27]*e[35]*e[17]+e[30]*e[29]*e[14]+e[30]*e[11]*e[32]+e[30]*e[28]*e[13]+e[30]*e[10]*e[31]+e[12]*e[29]*e[32]+e[12]*e[28]*e[31]+e[15]*e[29]*e[35]+e[15]*e[28]*e[34];
A[133]=-1.*e[32]*e[24]*e[6]+e[8]*e[30]*e[24]+e[8]*e[21]*e[33]+e[8]*e[31]*e[25]+e[8]*e[22]*e[34]+e[26]*e[30]*e[6]+e[26]*e[3]*e[33]+e[26]*e[31]*e[7]+e[26]*e[4]*e[34]+e[26]*e[32]*e[8]+e[26]*e[5]*e[35]+e[35]*e[21]*e[6]+e[35]*e[3]*e[24]+e[35]*e[22]*e[7]+e[35]*e[4]*e[25]+e[35]*e[23]*e[8]+e[2]*e[27]*e[21]+e[2]*e[18]*e[30]+e[2]*e[28]*e[22]+e[2]*e[19]*e[31]+e[2]*e[29]*e[23]+e[2]*e[20]*e[32]+e[20]*e[27]*e[3]+e[20]*e[0]*e[30]+e[20]*e[28]*e[4]+e[20]*e[1]*e[31]+e[20]*e[29]*e[5]+e[29]*e[18]*e[3]+e[29]*e[0]*e[21]+e[29]*e[19]*e[4]+e[29]*e[1]*e[22]+e[5]*e[30]*e[21]+e[5]*e[31]*e[22]+3.*e[5]*e[32]*e[23]-1.*e[5]*e[27]*e[18]-1.*e[5]*e[33]*e[24]-1.*e[5]*e[28]*e[19]-1.*e[5]*e[34]*e[25]-1.*e[23]*e[27]*e[0]-1.*e[23]*e[34]*e[7]-1.*e[23]*e[33]*e[6]+e[23]*e[30]*e[3]-1.*e[23]*e[28]*e[1]+e[23]*e[31]*e[4]+e[32]*e[21]*e[3]-1.*e[32]*e[19]*e[1]+e[32]*e[22]*e[4]-1.*e[32]*e[18]*e[0]-1.*e[32]*e[25]*e[7];
A[20]=.5000000000*e[27]*ep2[33]-.5000000000*e[27]*ep2[32]-.5000000000*e[27]*ep2[31]-.5000000000*e[27]*ep2[34]-.5000000000*e[27]*ep2[35]+e[33]*e[29]*e[35]+.5000000000*e[27]*ep2[29]+e[30]*e[29]*e[32]+e[30]*e[28]*e[31]+e[33]*e[28]*e[34]+.5000000000*e[27]*ep2[28]+.5000000000*e[27]*ep2[30]+.5000000000*ep3[27];
A[134]=e[14]*e[21]*e[12]+e[14]*e[22]*e[13]+e[17]*e[21]*e[15]+e[17]*e[12]*e[24]+e[17]*e[14]*e[26]+e[17]*e[22]*e[16]+e[17]*e[13]*e[25]+e[26]*e[12]*e[15]+e[26]*e[13]*e[16]-1.*e[14]*e[24]*e[15]-1.*e[14]*e[25]*e[16]-1.*e[14]*e[18]*e[9]-1.*e[14]*e[19]*e[10]+e[11]*e[18]*e[12]+e[11]*e[9]*e[21]+e[11]*e[19]*e[13]+e[11]*e[10]*e[22]+e[20]*e[11]*e[14]+e[20]*e[9]*e[12]+e[20]*e[10]*e[13]+1.500000000*e[23]*ep2[14]+.5000000000*e[23]*ep2[12]+.5000000000*e[23]*ep2[13]+.5000000000*e[23]*ep2[17]+.5000000000*ep2[11]*e[23]-.5000000000*e[23]*ep2[16]-.5000000000*e[23]*ep2[9]-.5000000000*e[23]*ep2[15]-.5000000000*e[23]*ep2[10];
A[21]=1.500000000*e[0]*ep2[27]+.5000000000*e[0]*ep2[29]+.5000000000*e[0]*ep2[28]+.5000000000*e[0]*ep2[30]-.5000000000*e[0]*ep2[32]-.5000000000*e[0]*ep2[31]+.5000000000*e[0]*ep2[33]-.5000000000*e[0]*ep2[34]-.5000000000*e[0]*ep2[35]-1.*e[27]*e[31]*e[4]+e[3]*e[27]*e[30]+e[3]*e[29]*e[32]+e[3]*e[28]*e[31]+e[30]*e[28]*e[4]+e[30]*e[1]*e[31]+e[30]*e[29]*e[5]+e[30]*e[2]*e[32]+e[6]*e[27]*e[33]+e[6]*e[29]*e[35]+e[6]*e[28]*e[34]+e[27]*e[28]*e[1]+e[27]*e[29]*e[2]+e[33]*e[28]*e[7]+e[33]*e[1]*e[34]+e[33]*e[29]*e[8]+e[33]*e[2]*e[35]-1.*e[27]*e[34]*e[7]-1.*e[27]*e[32]*e[5]-1.*e[27]*e[35]*e[8];
A[135]=e[14]*e[12]*e[3]+e[14]*e[13]*e[4]+e[17]*e[12]*e[6]+e[17]*e[3]*e[15]+e[17]*e[13]*e[7]+e[17]*e[4]*e[16]+e[17]*e[14]*e[8]+e[8]*e[12]*e[15]+e[8]*e[13]*e[16]+e[2]*e[11]*e[14]+e[2]*e[9]*e[12]+e[2]*e[10]*e[13]+e[11]*e[9]*e[3]+e[11]*e[0]*e[12]+e[11]*e[10]*e[4]+e[11]*e[1]*e[13]-1.*e[14]*e[10]*e[1]-1.*e[14]*e[16]*e[7]-1.*e[14]*e[15]*e[6]-1.*e[14]*e[9]*e[0]-.5000000000*e[5]*ep2[16]-.5000000000*e[5]*ep2[9]+.5000000000*e[5]*ep2[11]+.5000000000*e[5]*ep2[12]-.5000000000*e[5]*ep2[15]-.5000000000*e[5]*ep2[10]+.5000000000*e[5]*ep2[13]+1.500000000*ep2[14]*e[5]+.5000000000*e[5]*ep2[17];
A[27]=1.500000000*e[27]*ep2[9]-.5000000000*e[27]*ep2[16]+.5000000000*e[27]*ep2[11]+.5000000000*e[27]*ep2[12]+.5000000000*e[27]*ep2[15]-.5000000000*e[27]*ep2[17]+.5000000000*e[27]*ep2[10]-.5000000000*e[27]*ep2[14]-.5000000000*e[27]*ep2[13]+e[12]*e[10]*e[31]+e[30]*e[11]*e[14]+e[30]*e[10]*e[13]+e[15]*e[9]*e[33]+e[15]*e[29]*e[17]+e[15]*e[11]*e[35]+e[15]*e[28]*e[16]+e[15]*e[10]*e[34]+e[33]*e[11]*e[17]+e[33]*e[10]*e[16]-1.*e[9]*e[31]*e[13]-1.*e[9]*e[32]*e[14]-1.*e[9]*e[34]*e[16]-1.*e[9]*e[35]*e[17]+e[9]*e[29]*e[11]+e[9]*e[28]*e[10]+e[12]*e[9]*e[30]+e[12]*e[29]*e[14]+e[12]*e[11]*e[32]+e[12]*e[28]*e[13];
A[137]=e[29]*e[18]*e[12]+e[29]*e[9]*e[21]+e[29]*e[19]*e[13]+e[29]*e[10]*e[22]+e[17]*e[30]*e[24]+e[17]*e[21]*e[33]+e[17]*e[31]*e[25]+e[17]*e[22]*e[34]+e[17]*e[32]*e[26]+e[17]*e[23]*e[35]-1.*e[23]*e[27]*e[9]-1.*e[23]*e[28]*e[10]-1.*e[23]*e[33]*e[15]-1.*e[23]*e[34]*e[16]-1.*e[32]*e[24]*e[15]-1.*e[32]*e[25]*e[16]-1.*e[32]*e[18]*e[9]-1.*e[32]*e[19]*e[10]+e[26]*e[30]*e[15]+e[26]*e[12]*e[33]+e[26]*e[31]*e[16]+e[26]*e[13]*e[34]+e[35]*e[21]*e[15]+e[35]*e[12]*e[24]+e[35]*e[22]*e[16]+e[35]*e[13]*e[25]+e[14]*e[30]*e[21]+e[14]*e[31]*e[22]+3.*e[14]*e[32]*e[23]+e[11]*e[27]*e[21]+e[11]*e[18]*e[30]+e[11]*e[28]*e[22]+e[11]*e[19]*e[31]+e[11]*e[29]*e[23]+e[11]*e[20]*e[32]+e[23]*e[30]*e[12]+e[23]*e[31]*e[13]+e[32]*e[21]*e[12]+e[32]*e[22]*e[13]-1.*e[14]*e[27]*e[18]-1.*e[14]*e[33]*e[24]+e[14]*e[29]*e[20]+e[14]*e[35]*e[26]-1.*e[14]*e[28]*e[19]-1.*e[14]*e[34]*e[25]+e[20]*e[27]*e[12]+e[20]*e[9]*e[30]+e[20]*e[28]*e[13]+e[20]*e[10]*e[31];
A[26]=.5000000000*e[0]*ep2[1]+.5000000000*e[0]*ep2[2]+e[6]*e[2]*e[8]+e[6]*e[1]*e[7]+.5000000000*e[0]*ep2[3]+e[3]*e[1]*e[4]+.5000000000*e[0]*ep2[6]+e[3]*e[2]*e[5]-.5000000000*e[0]*ep2[5]-.5000000000*e[0]*ep2[8]+.5000000000*ep3[0]-.5000000000*e[0]*ep2[7]-.5000000000*e[0]*ep2[4];
A[136]=1.500000000*ep2[23]*e[14]+.5000000000*e[14]*ep2[26]-.5000000000*e[14]*ep2[18]-.5000000000*e[14]*ep2[19]+.5000000000*e[14]*ep2[20]+.5000000000*e[14]*ep2[22]-.5000000000*e[14]*ep2[24]+.5000000000*e[14]*ep2[21]-.5000000000*e[14]*ep2[25]+e[23]*e[21]*e[12]+e[23]*e[22]*e[13]+e[26]*e[21]*e[15]+e[26]*e[12]*e[24]+e[26]*e[23]*e[17]+e[26]*e[22]*e[16]+e[26]*e[13]*e[25]+e[17]*e[22]*e[25]+e[17]*e[21]*e[24]+e[11]*e[19]*e[22]+e[11]*e[18]*e[21]+e[11]*e[20]*e[23]+e[20]*e[18]*e[12]+e[20]*e[9]*e[21]+e[20]*e[19]*e[13]+e[20]*e[10]*e[22]-1.*e[23]*e[24]*e[15]-1.*e[23]*e[25]*e[16]-1.*e[23]*e[18]*e[9]-1.*e[23]*e[19]*e[10];
A[25]=1.500000000*e[27]*ep2[0]-.5000000000*e[27]*ep2[4]+.5000000000*e[27]*ep2[6]-.5000000000*e[27]*ep2[5]+.5000000000*e[27]*ep2[1]-.5000000000*e[27]*ep2[7]+.5000000000*e[27]*ep2[3]+.5000000000*e[27]*ep2[2]-.5000000000*e[27]*ep2[8]+e[0]*e[33]*e[6]+e[0]*e[30]*e[3]-1.*e[0]*e[35]*e[8]-1.*e[0]*e[31]*e[4]+e[3]*e[28]*e[4]+e[3]*e[1]*e[31]+e[3]*e[29]*e[5]+e[3]*e[2]*e[32]+e[30]*e[1]*e[4]+e[30]*e[2]*e[5]+e[6]*e[28]*e[7]+e[6]*e[1]*e[34]+e[6]*e[29]*e[8]+e[6]*e[2]*e[35]+e[33]*e[1]*e[7]+e[33]*e[2]*e[8]+e[0]*e[28]*e[1]+e[0]*e[29]*e[2]-1.*e[0]*e[34]*e[7]-1.*e[0]*e[32]*e[5];
A[139]=e[8]*e[22]*e[25]+e[8]*e[21]*e[24]+e[20]*e[18]*e[3]+e[20]*e[0]*e[21]+e[20]*e[19]*e[4]+e[20]*e[1]*e[22]+e[20]*e[2]*e[23]+e[23]*e[21]*e[3]+e[23]*e[22]*e[4]+e[23]*e[26]*e[8]-1.*e[23]*e[19]*e[1]-1.*e[23]*e[18]*e[0]-1.*e[23]*e[25]*e[7]-1.*e[23]*e[24]*e[6]+e[2]*e[19]*e[22]+e[2]*e[18]*e[21]+e[26]*e[21]*e[6]+e[26]*e[3]*e[24]+e[26]*e[22]*e[7]+e[26]*e[4]*e[25]+.5000000000*ep2[20]*e[5]+1.500000000*ep2[23]*e[5]+.5000000000*e[5]*ep2[22]+.5000000000*e[5]*ep2[21]+.5000000000*e[5]*ep2[26]-.5000000000*e[5]*ep2[18]-.5000000000*e[5]*ep2[19]-.5000000000*e[5]*ep2[24]-.5000000000*e[5]*ep2[25];
A[24]=e[24]*e[11]*e[8]+e[24]*e[2]*e[17]+3.*e[9]*e[18]*e[0]+e[9]*e[19]*e[1]+e[9]*e[20]*e[2]+e[18]*e[10]*e[1]+e[18]*e[11]*e[2]+e[3]*e[18]*e[12]+e[3]*e[9]*e[21]+e[3]*e[20]*e[14]+e[3]*e[11]*e[23]+e[3]*e[19]*e[13]+e[3]*e[10]*e[22]+e[6]*e[18]*e[15]+e[6]*e[9]*e[24]+e[6]*e[20]*e[17]+e[6]*e[11]*e[26]+e[6]*e[19]*e[16]+e[6]*e[10]*e[25]+e[0]*e[20]*e[11]+e[0]*e[19]*e[10]-1.*e[9]*e[26]*e[8]-1.*e[9]*e[22]*e[4]-1.*e[9]*e[25]*e[7]-1.*e[9]*e[23]*e[5]+e[12]*e[0]*e[21]+e[12]*e[19]*e[4]+e[12]*e[1]*e[22]+e[12]*e[20]*e[5]+e[12]*e[2]*e[23]-1.*e[18]*e[13]*e[4]-1.*e[18]*e[16]*e[7]-1.*e[18]*e[14]*e[5]-1.*e[18]*e[17]*e[8]+e[21]*e[10]*e[4]+e[21]*e[1]*e[13]+e[21]*e[11]*e[5]+e[21]*e[2]*e[14]+e[15]*e[0]*e[24]+e[15]*e[19]*e[7]+e[15]*e[1]*e[25]+e[15]*e[20]*e[8]+e[15]*e[2]*e[26]-1.*e[0]*e[23]*e[14]-1.*e[0]*e[25]*e[16]-1.*e[0]*e[26]*e[17]-1.*e[0]*e[22]*e[13]+e[24]*e[10]*e[7]+e[24]*e[1]*e[16];
A[138]=e[11]*e[1]*e[4]+e[11]*e[0]*e[3]+e[11]*e[2]*e[5]+e[5]*e[12]*e[3]+e[5]*e[13]*e[4]+e[8]*e[12]*e[6]+e[8]*e[3]*e[15]+e[8]*e[13]*e[7]+e[8]*e[4]*e[16]+e[8]*e[5]*e[17]+e[17]*e[4]*e[7]+e[17]*e[3]*e[6]-1.*e[5]*e[10]*e[1]-1.*e[5]*e[16]*e[7]-1.*e[5]*e[15]*e[6]-1.*e[5]*e[9]*e[0]+e[2]*e[9]*e[3]+e[2]*e[0]*e[12]+e[2]*e[10]*e[4]+e[2]*e[1]*e[13]+.5000000000*ep2[2]*e[14]-.5000000000*e[14]*ep2[0]-.5000000000*e[14]*ep2[6]-.5000000000*e[14]*ep2[1]-.5000000000*e[14]*ep2[7]+1.500000000*e[14]*ep2[5]+.5000000000*e[14]*ep2[4]+.5000000000*e[14]*ep2[3]+.5000000000*e[14]*ep2[8];
A[31]=e[3]*e[27]*e[12]+e[3]*e[9]*e[30]+e[3]*e[29]*e[14]+e[3]*e[11]*e[32]+e[3]*e[28]*e[13]+e[3]*e[10]*e[31]+e[6]*e[27]*e[15]+e[6]*e[9]*e[33]+e[6]*e[29]*e[17]+e[6]*e[11]*e[35]+e[6]*e[28]*e[16]+e[6]*e[10]*e[34]+3.*e[0]*e[27]*e[9]+e[0]*e[29]*e[11]+e[0]*e[28]*e[10]-1.*e[9]*e[34]*e[7]-1.*e[9]*e[32]*e[5]-1.*e[9]*e[35]*e[8]+e[9]*e[29]*e[2]+e[9]*e[28]*e[1]-1.*e[9]*e[31]*e[4]+e[12]*e[0]*e[30]+e[12]*e[28]*e[4]+e[12]*e[1]*e[31]+e[12]*e[29]*e[5]+e[12]*e[2]*e[32]+e[27]*e[11]*e[2]+e[27]*e[10]*e[1]-1.*e[27]*e[13]*e[4]-1.*e[27]*e[16]*e[7]-1.*e[27]*e[14]*e[5]-1.*e[27]*e[17]*e[8]+e[30]*e[10]*e[4]+e[30]*e[1]*e[13]+e[30]*e[11]*e[5]+e[30]*e[2]*e[14]+e[15]*e[0]*e[33]+e[15]*e[28]*e[7]+e[15]*e[1]*e[34]+e[15]*e[29]*e[8]+e[15]*e[2]*e[35]-1.*e[0]*e[31]*e[13]-1.*e[0]*e[32]*e[14]-1.*e[0]*e[34]*e[16]-1.*e[0]*e[35]*e[17]+e[33]*e[10]*e[7]+e[33]*e[1]*e[16]+e[33]*e[11]*e[8]+e[33]*e[2]*e[17];
A[141]=.5000000000*ep2[30]*e[6]+.5000000000*e[6]*ep2[27]-.5000000000*e[6]*ep2[32]-.5000000000*e[6]*ep2[28]-.5000000000*e[6]*ep2[29]-.5000000000*e[6]*ep2[31]+1.500000000*e[6]*ep2[33]+.5000000000*e[6]*ep2[34]+.5000000000*e[6]*ep2[35]+e[0]*e[27]*e[33]+e[0]*e[29]*e[35]+e[0]*e[28]*e[34]+e[3]*e[30]*e[33]+e[3]*e[32]*e[35]+e[3]*e[31]*e[34]+e[30]*e[31]*e[7]+e[30]*e[4]*e[34]+e[30]*e[32]*e[8]+e[30]*e[5]*e[35]+e[27]*e[28]*e[7]+e[27]*e[1]*e[34]+e[27]*e[29]*e[8]+e[27]*e[2]*e[35]+e[33]*e[34]*e[7]+e[33]*e[35]*e[8]-1.*e[33]*e[32]*e[5]-1.*e[33]*e[29]*e[2]-1.*e[33]*e[28]*e[1]-1.*e[33]*e[31]*e[4];
A[30]=e[24]*e[20]*e[26]+e[21]*e[19]*e[22]-.5000000000*e[18]*ep2[22]-.5000000000*e[18]*ep2[25]+.5000000000*ep3[18]+.5000000000*e[18]*ep2[21]+e[21]*e[20]*e[23]+.5000000000*e[18]*ep2[20]+.5000000000*e[18]*ep2[19]+.5000000000*e[18]*ep2[24]+e[24]*e[19]*e[25]-.5000000000*e[18]*ep2[23]-.5000000000*e[18]*ep2[26];
A[140]=.5000000000*e[33]*ep2[35]+.5000000000*ep3[33]+.5000000000*ep2[27]*e[33]+.5000000000*ep2[30]*e[33]-.5000000000*e[33]*ep2[29]+.5000000000*e[33]*ep2[34]-.5000000000*e[33]*ep2[32]-.5000000000*e[33]*ep2[28]+e[30]*e[32]*e[35]-.5000000000*e[33]*ep2[31]+e[27]*e[29]*e[35]+e[27]*e[28]*e[34]+e[30]*e[31]*e[34];
A[29]=1.500000000*e[27]*ep2[18]+.5000000000*e[27]*ep2[19]+.5000000000*e[27]*ep2[20]+.5000000000*e[27]*ep2[21]+.5000000000*e[27]*ep2[24]-.5000000000*e[27]*ep2[26]-.5000000000*e[27]*ep2[23]-.5000000000*e[27]*ep2[22]-.5000000000*e[27]*ep2[25]+e[33]*e[20]*e[26]-1.*e[18]*e[35]*e[26]-1.*e[18]*e[31]*e[22]-1.*e[18]*e[32]*e[23]-1.*e[18]*e[34]*e[25]+e[18]*e[28]*e[19]+e[18]*e[29]*e[20]+e[21]*e[18]*e[30]+e[21]*e[28]*e[22]+e[21]*e[19]*e[31]+e[21]*e[29]*e[23]+e[21]*e[20]*e[32]+e[30]*e[19]*e[22]+e[30]*e[20]*e[23]+e[24]*e[18]*e[33]+e[24]*e[28]*e[25]+e[24]*e[19]*e[34]+e[24]*e[29]*e[26]+e[24]*e[20]*e[35]+e[33]*e[19]*e[25];
A[143]=e[9]*e[27]*e[33]+e[9]*e[29]*e[35]+e[9]*e[28]*e[34]+e[33]*e[35]*e[17]+e[33]*e[34]*e[16]+e[27]*e[29]*e[17]+e[27]*e[11]*e[35]+e[27]*e[28]*e[16]+e[27]*e[10]*e[34]+e[33]*e[30]*e[12]-1.*e[33]*e[28]*e[10]-1.*e[33]*e[31]*e[13]-1.*e[33]*e[32]*e[14]-1.*e[33]*e[29]*e[11]+e[30]*e[32]*e[17]+e[30]*e[14]*e[35]+e[30]*e[31]*e[16]+e[30]*e[13]*e[34]+e[12]*e[32]*e[35]+e[12]*e[31]*e[34]+.5000000000*e[15]*ep2[27]-.5000000000*e[15]*ep2[32]-.5000000000*e[15]*ep2[28]-.5000000000*e[15]*ep2[29]-.5000000000*e[15]*ep2[31]+1.500000000*e[15]*ep2[33]+.5000000000*e[15]*ep2[30]+.5000000000*e[15]*ep2[34]+.5000000000*e[15]*ep2[35];
A[28]=.5000000000*e[9]*ep2[12]-.5000000000*e[9]*ep2[16]+.5000000000*e[9]*ep2[10]-.5000000000*e[9]*ep2[17]-.5000000000*e[9]*ep2[13]+e[15]*e[10]*e[16]+e[12]*e[11]*e[14]+.5000000000*e[9]*ep2[11]+.5000000000*e[9]*ep2[15]-.5000000000*e[9]*ep2[14]+e[15]*e[11]*e[17]+.5000000000*ep3[9]+e[12]*e[10]*e[13];
A[142]=e[18]*e[27]*e[33]+e[18]*e[29]*e[35]+e[18]*e[28]*e[34]+e[27]*e[28]*e[25]+e[27]*e[19]*e[34]+e[27]*e[29]*e[26]+e[27]*e[20]*e[35]+e[21]*e[30]*e[33]+e[21]*e[32]*e[35]+e[21]*e[31]*e[34]+e[30]*e[31]*e[25]+e[30]*e[22]*e[34]+e[30]*e[32]*e[26]+e[30]*e[23]*e[35]+e[33]*e[34]*e[25]+e[33]*e[35]*e[26]-1.*e[33]*e[29]*e[20]-1.*e[33]*e[31]*e[22]-1.*e[33]*e[32]*e[23]-1.*e[33]*e[28]*e[19]+.5000000000*ep2[27]*e[24]+.5000000000*ep2[30]*e[24]+1.500000000*e[24]*ep2[33]+.5000000000*e[24]*ep2[35]+.5000000000*e[24]*ep2[34]-.5000000000*e[24]*ep2[32]-.5000000000*e[24]*ep2[28]-.5000000000*e[24]*ep2[29]-.5000000000*e[24]*ep2[31];
A[36]=.5000000000*e[9]*ep2[21]+.5000000000*e[9]*ep2[24]+.5000000000*e[9]*ep2[19]+1.500000000*e[9]*ep2[18]+.5000000000*e[9]*ep2[20]-.5000000000*e[9]*ep2[26]-.5000000000*e[9]*ep2[23]-.5000000000*e[9]*ep2[22]-.5000000000*e[9]*ep2[25]+e[21]*e[18]*e[12]+e[21]*e[20]*e[14]+e[21]*e[11]*e[23]+e[21]*e[19]*e[13]+e[21]*e[10]*e[22]+e[24]*e[18]*e[15]+e[24]*e[20]*e[17]+e[24]*e[11]*e[26]+e[24]*e[19]*e[16]+e[24]*e[10]*e[25]+e[15]*e[19]*e[25]+e[15]*e[20]*e[26]+e[12]*e[19]*e[22]+e[12]*e[20]*e[23]+e[18]*e[20]*e[11]+e[18]*e[19]*e[10]-1.*e[18]*e[23]*e[14]-1.*e[18]*e[25]*e[16]-1.*e[18]*e[26]*e[17]-1.*e[18]*e[22]*e[13];
A[182]=.5000000000*ep2[29]*e[26]+.5000000000*ep2[32]*e[26]+.5000000000*e[26]*ep2[33]+1.500000000*e[26]*ep2[35]+.5000000000*e[26]*ep2[34]-.5000000000*e[26]*ep2[27]-.5000000000*e[26]*ep2[28]-.5000000000*e[26]*ep2[31]-.5000000000*e[26]*ep2[30]+e[20]*e[27]*e[33]+e[20]*e[29]*e[35]+e[20]*e[28]*e[34]+e[29]*e[27]*e[24]+e[29]*e[18]*e[33]+e[29]*e[28]*e[25]+e[29]*e[19]*e[34]+e[23]*e[30]*e[33]+e[23]*e[32]*e[35]+e[23]*e[31]*e[34]+e[32]*e[30]*e[24]+e[32]*e[21]*e[33]+e[32]*e[31]*e[25]+e[32]*e[22]*e[34]+e[35]*e[33]*e[24]+e[35]*e[34]*e[25]-1.*e[35]*e[27]*e[18]-1.*e[35]*e[30]*e[21]-1.*e[35]*e[31]*e[22]-1.*e[35]*e[28]*e[19];
A[37]=e[12]*e[19]*e[31]+e[12]*e[29]*e[23]+e[12]*e[20]*e[32]+3.*e[9]*e[27]*e[18]+e[9]*e[28]*e[19]+e[9]*e[29]*e[20]+e[21]*e[9]*e[30]+e[21]*e[29]*e[14]+e[21]*e[11]*e[32]+e[21]*e[28]*e[13]+e[21]*e[10]*e[31]+e[30]*e[20]*e[14]+e[30]*e[11]*e[23]+e[30]*e[19]*e[13]+e[30]*e[10]*e[22]+e[9]*e[33]*e[24]-1.*e[9]*e[35]*e[26]-1.*e[9]*e[31]*e[22]-1.*e[9]*e[32]*e[23]-1.*e[9]*e[34]*e[25]+e[18]*e[29]*e[11]+e[18]*e[28]*e[10]+e[27]*e[20]*e[11]+e[27]*e[19]*e[10]+e[15]*e[27]*e[24]+e[15]*e[18]*e[33]+e[15]*e[28]*e[25]+e[15]*e[19]*e[34]+e[15]*e[29]*e[26]+e[15]*e[20]*e[35]-1.*e[18]*e[31]*e[13]-1.*e[18]*e[32]*e[14]-1.*e[18]*e[34]*e[16]-1.*e[18]*e[35]*e[17]-1.*e[27]*e[23]*e[14]-1.*e[27]*e[25]*e[16]-1.*e[27]*e[26]*e[17]-1.*e[27]*e[22]*e[13]+e[24]*e[29]*e[17]+e[24]*e[11]*e[35]+e[24]*e[28]*e[16]+e[24]*e[10]*e[34]+e[33]*e[20]*e[17]+e[33]*e[11]*e[26]+e[33]*e[19]*e[16]+e[33]*e[10]*e[25]+e[12]*e[27]*e[21]+e[12]*e[18]*e[30]+e[12]*e[28]*e[22];
A[183]=-.5000000000*e[17]*ep2[27]+.5000000000*e[17]*ep2[32]-.5000000000*e[17]*ep2[28]+.5000000000*e[17]*ep2[29]-.5000000000*e[17]*ep2[31]+.5000000000*e[17]*ep2[33]-.5000000000*e[17]*ep2[30]+.5000000000*e[17]*ep2[34]+1.500000000*e[17]*ep2[35]+e[32]*e[30]*e[15]+e[32]*e[12]*e[33]+e[32]*e[31]*e[16]+e[32]*e[13]*e[34]+e[14]*e[30]*e[33]+e[14]*e[31]*e[34]+e[11]*e[27]*e[33]+e[11]*e[29]*e[35]+e[11]*e[28]*e[34]+e[35]*e[33]*e[15]+e[35]*e[34]*e[16]+e[29]*e[27]*e[15]+e[29]*e[9]*e[33]+e[29]*e[28]*e[16]+e[29]*e[10]*e[34]-1.*e[35]*e[27]*e[9]-1.*e[35]*e[30]*e[12]-1.*e[35]*e[28]*e[10]-1.*e[35]*e[31]*e[13]+e[35]*e[32]*e[14];
A[38]=.5000000000*e[9]*ep2[1]+1.500000000*e[9]*ep2[0]+.5000000000*e[9]*ep2[2]+.5000000000*e[9]*ep2[3]+.5000000000*e[9]*ep2[6]-.5000000000*e[9]*ep2[4]-.5000000000*e[9]*ep2[5]-.5000000000*e[9]*ep2[7]-.5000000000*e[9]*ep2[8]+e[6]*e[0]*e[15]+e[6]*e[10]*e[7]+e[6]*e[1]*e[16]+e[6]*e[11]*e[8]+e[6]*e[2]*e[17]+e[15]*e[1]*e[7]+e[15]*e[2]*e[8]+e[0]*e[11]*e[2]+e[0]*e[10]*e[1]-1.*e[0]*e[13]*e[4]-1.*e[0]*e[16]*e[7]-1.*e[0]*e[14]*e[5]-1.*e[0]*e[17]*e[8]+e[3]*e[0]*e[12]+e[3]*e[10]*e[4]+e[3]*e[1]*e[13]+e[3]*e[11]*e[5]+e[3]*e[2]*e[14]+e[12]*e[1]*e[4]+e[12]*e[2]*e[5];
A[180]=.5000000000*e[35]*ep2[33]+.5000000000*e[35]*ep2[34]-.5000000000*e[35]*ep2[27]-.5000000000*e[35]*ep2[28]-.5000000000*e[35]*ep2[31]-.5000000000*e[35]*ep2[30]+e[32]*e[31]*e[34]+.5000000000*ep2[29]*e[35]+.5000000000*ep2[32]*e[35]+e[29]*e[28]*e[34]+e[32]*e[30]*e[33]+.5000000000*ep3[35]+e[29]*e[27]*e[33];
A[39]=.5000000000*e[0]*ep2[19]+.5000000000*e[0]*ep2[20]+.5000000000*e[0]*ep2[24]-.5000000000*e[0]*ep2[26]-.5000000000*e[0]*ep2[23]-.5000000000*e[0]*ep2[22]-.5000000000*e[0]*ep2[25]+1.500000000*ep2[18]*e[0]+.5000000000*e[0]*ep2[21]+e[18]*e[19]*e[1]+e[18]*e[20]*e[2]+e[21]*e[18]*e[3]+e[21]*e[19]*e[4]+e[21]*e[1]*e[22]+e[21]*e[20]*e[5]+e[21]*e[2]*e[23]-1.*e[18]*e[26]*e[8]-1.*e[18]*e[22]*e[4]-1.*e[18]*e[25]*e[7]-1.*e[18]*e[23]*e[5]+e[18]*e[24]*e[6]+e[3]*e[19]*e[22]+e[3]*e[20]*e[23]+e[24]*e[19]*e[7]+e[24]*e[1]*e[25]+e[24]*e[20]*e[8]+e[24]*e[2]*e[26]+e[6]*e[19]*e[25]+e[6]*e[20]*e[26];
A[181]=.5000000000*ep2[32]*e[8]-.5000000000*e[8]*ep2[27]-.5000000000*e[8]*ep2[28]+.5000000000*e[8]*ep2[29]-.5000000000*e[8]*ep2[31]+.5000000000*e[8]*ep2[33]-.5000000000*e[8]*ep2[30]+.5000000000*e[8]*ep2[34]+1.500000000*e[8]*ep2[35]+e[2]*e[27]*e[33]+e[2]*e[29]*e[35]+e[2]*e[28]*e[34]+e[5]*e[30]*e[33]+e[5]*e[32]*e[35]+e[5]*e[31]*e[34]+e[32]*e[30]*e[6]+e[32]*e[3]*e[33]+e[32]*e[31]*e[7]+e[32]*e[4]*e[34]+e[29]*e[27]*e[6]+e[29]*e[0]*e[33]+e[29]*e[28]*e[7]+e[29]*e[1]*e[34]+e[35]*e[33]*e[6]+e[35]*e[34]*e[7]-1.*e[35]*e[27]*e[0]-1.*e[35]*e[30]*e[3]-1.*e[35]*e[28]*e[1]-1.*e[35]*e[31]*e[4];
A[32]=-.5000000000*e[18]*ep2[4]+1.500000000*e[18]*ep2[0]+.5000000000*e[18]*ep2[6]-.5000000000*e[18]*ep2[5]+.5000000000*e[18]*ep2[1]-.5000000000*e[18]*ep2[7]+.5000000000*e[18]*ep2[3]+.5000000000*e[18]*ep2[2]-.5000000000*e[18]*ep2[8]+e[3]*e[0]*e[21]+e[3]*e[19]*e[4]+e[3]*e[1]*e[22]+e[3]*e[20]*e[5]+e[3]*e[2]*e[23]+e[21]*e[1]*e[4]+e[21]*e[2]*e[5]+e[6]*e[0]*e[24]+e[6]*e[19]*e[7]+e[6]*e[1]*e[25]+e[6]*e[20]*e[8]+e[6]*e[2]*e[26]+e[24]*e[1]*e[7]+e[24]*e[2]*e[8]+e[0]*e[19]*e[1]+e[0]*e[20]*e[2]-1.*e[0]*e[26]*e[8]-1.*e[0]*e[22]*e[4]-1.*e[0]*e[25]*e[7]-1.*e[0]*e[23]*e[5];
A[178]=e[10]*e[1]*e[7]+e[10]*e[0]*e[6]+e[10]*e[2]*e[8]+e[4]*e[12]*e[6]+e[4]*e[3]*e[15]+e[4]*e[13]*e[7]+e[4]*e[14]*e[8]+e[4]*e[5]*e[17]+e[13]*e[3]*e[6]+e[13]*e[5]*e[8]+e[7]*e[15]*e[6]+e[7]*e[17]*e[8]-1.*e[7]*e[11]*e[2]-1.*e[7]*e[9]*e[0]-1.*e[7]*e[14]*e[5]-1.*e[7]*e[12]*e[3]+e[1]*e[9]*e[6]+e[1]*e[0]*e[15]+e[1]*e[11]*e[8]+e[1]*e[2]*e[17]+1.500000000*e[16]*ep2[7]+.5000000000*e[16]*ep2[6]+.5000000000*e[16]*ep2[8]+.5000000000*ep2[1]*e[16]-.5000000000*e[16]*ep2[0]-.5000000000*e[16]*ep2[5]-.5000000000*e[16]*ep2[3]-.5000000000*e[16]*ep2[2]+.5000000000*ep2[4]*e[16];
A[33]=e[0]*e[30]*e[21]-1.*e[0]*e[35]*e[26]-1.*e[0]*e[31]*e[22]-1.*e[0]*e[32]*e[23]-1.*e[0]*e[34]*e[25]-1.*e[18]*e[34]*e[7]-1.*e[18]*e[32]*e[5]-1.*e[18]*e[35]*e[8]-1.*e[18]*e[31]*e[4]-1.*e[27]*e[26]*e[8]-1.*e[27]*e[22]*e[4]-1.*e[27]*e[25]*e[7]-1.*e[27]*e[23]*e[5]+e[6]*e[28]*e[25]+e[6]*e[19]*e[34]+e[6]*e[29]*e[26]+e[6]*e[20]*e[35]+e[21]*e[28]*e[4]+e[21]*e[1]*e[31]+e[21]*e[29]*e[5]+e[21]*e[2]*e[32]+e[30]*e[19]*e[4]+e[30]*e[1]*e[22]+e[30]*e[20]*e[5]+e[30]*e[2]*e[23]+e[24]*e[27]*e[6]+e[24]*e[0]*e[33]+e[24]*e[28]*e[7]+e[24]*e[1]*e[34]+e[24]*e[29]*e[8]+e[24]*e[2]*e[35]+e[33]*e[18]*e[6]+e[33]*e[19]*e[7]+e[33]*e[1]*e[25]+e[33]*e[20]*e[8]+e[33]*e[2]*e[26]+3.*e[0]*e[27]*e[18]+e[0]*e[28]*e[19]+e[0]*e[29]*e[20]+e[18]*e[28]*e[1]+e[18]*e[29]*e[2]+e[27]*e[19]*e[1]+e[27]*e[20]*e[2]+e[3]*e[27]*e[21]+e[3]*e[18]*e[30]+e[3]*e[28]*e[22]+e[3]*e[19]*e[31]+e[3]*e[29]*e[23]+e[3]*e[20]*e[32];
A[179]=e[19]*e[18]*e[6]+e[19]*e[0]*e[24]+e[19]*e[1]*e[25]+e[19]*e[20]*e[8]+e[19]*e[2]*e[26]+e[22]*e[21]*e[6]+e[22]*e[3]*e[24]+e[22]*e[4]*e[25]+e[22]*e[23]*e[8]+e[22]*e[5]*e[26]-1.*e[25]*e[21]*e[3]+e[25]*e[26]*e[8]-1.*e[25]*e[20]*e[2]-1.*e[25]*e[18]*e[0]-1.*e[25]*e[23]*e[5]+e[25]*e[24]*e[6]+e[1]*e[18]*e[24]+e[1]*e[20]*e[26]+e[4]*e[21]*e[24]+e[4]*e[23]*e[26]+.5000000000*ep2[19]*e[7]+.5000000000*ep2[22]*e[7]+1.500000000*ep2[25]*e[7]+.5000000000*e[7]*ep2[26]-.5000000000*e[7]*ep2[18]-.5000000000*e[7]*ep2[23]-.5000000000*e[7]*ep2[20]+.5000000000*e[7]*ep2[24]-.5000000000*e[7]*ep2[21];
A[34]=.5000000000*e[18]*ep2[11]+1.500000000*e[18]*ep2[9]+.5000000000*e[18]*ep2[10]+.5000000000*e[18]*ep2[12]+.5000000000*e[18]*ep2[15]-.5000000000*e[18]*ep2[16]-.5000000000*e[18]*ep2[17]-.5000000000*e[18]*ep2[14]-.5000000000*e[18]*ep2[13]+e[12]*e[9]*e[21]+e[12]*e[20]*e[14]+e[12]*e[11]*e[23]+e[12]*e[19]*e[13]+e[12]*e[10]*e[22]+e[21]*e[11]*e[14]+e[21]*e[10]*e[13]+e[15]*e[9]*e[24]+e[15]*e[20]*e[17]+e[15]*e[11]*e[26]+e[15]*e[19]*e[16]+e[15]*e[10]*e[25]+e[24]*e[11]*e[17]+e[24]*e[10]*e[16]-1.*e[9]*e[23]*e[14]-1.*e[9]*e[25]*e[16]-1.*e[9]*e[26]*e[17]+e[9]*e[20]*e[11]+e[9]*e[19]*e[10]-1.*e[9]*e[22]*e[13];
A[176]=e[13]*e[21]*e[24]+e[13]*e[23]*e[26]+e[19]*e[18]*e[15]+e[19]*e[9]*e[24]+e[19]*e[20]*e[17]+e[19]*e[11]*e[26]-1.*e[25]*e[23]*e[14]-1.*e[25]*e[20]*e[11]-1.*e[25]*e[18]*e[9]-1.*e[25]*e[21]*e[12]+e[22]*e[21]*e[15]+e[22]*e[12]*e[24]+e[22]*e[23]*e[17]+e[22]*e[14]*e[26]+e[22]*e[13]*e[25]+e[25]*e[24]*e[15]+e[25]*e[26]*e[17]+e[10]*e[19]*e[25]+e[10]*e[18]*e[24]+e[10]*e[20]*e[26]-.5000000000*e[16]*ep2[18]-.5000000000*e[16]*ep2[23]+.5000000000*e[16]*ep2[19]-.5000000000*e[16]*ep2[20]-.5000000000*e[16]*ep2[21]+.5000000000*ep2[22]*e[16]+1.500000000*ep2[25]*e[16]+.5000000000*e[16]*ep2[24]+.5000000000*e[16]*ep2[26];
A[35]=.5000000000*e[0]*ep2[12]+.5000000000*e[0]*ep2[15]+.5000000000*e[0]*ep2[11]+1.500000000*e[0]*ep2[9]+.5000000000*e[0]*ep2[10]-.5000000000*e[0]*ep2[16]-.5000000000*e[0]*ep2[17]-.5000000000*e[0]*ep2[14]-.5000000000*e[0]*ep2[13]+e[12]*e[9]*e[3]+e[12]*e[10]*e[4]+e[12]*e[1]*e[13]+e[12]*e[11]*e[5]+e[12]*e[2]*e[14]+e[15]*e[9]*e[6]+e[15]*e[10]*e[7]+e[15]*e[1]*e[16]+e[15]*e[11]*e[8]+e[15]*e[2]*e[17]+e[6]*e[11]*e[17]+e[6]*e[10]*e[16]+e[3]*e[11]*e[14]+e[3]*e[10]*e[13]+e[9]*e[10]*e[1]+e[9]*e[11]*e[2]-1.*e[9]*e[13]*e[4]-1.*e[9]*e[16]*e[7]-1.*e[9]*e[14]*e[5]-1.*e[9]*e[17]*e[8];
A[177]=e[19]*e[11]*e[35]+e[28]*e[18]*e[15]+e[28]*e[9]*e[24]+e[28]*e[20]*e[17]+e[28]*e[11]*e[26]-1.*e[25]*e[27]*e[9]-1.*e[25]*e[30]*e[12]-1.*e[25]*e[32]*e[14]+e[25]*e[33]*e[15]+e[25]*e[35]*e[17]-1.*e[25]*e[29]*e[11]-1.*e[34]*e[23]*e[14]+e[34]*e[24]*e[15]+e[34]*e[26]*e[17]-1.*e[34]*e[20]*e[11]-1.*e[34]*e[18]*e[9]-1.*e[34]*e[21]*e[12]+e[13]*e[30]*e[24]+e[13]*e[21]*e[33]+e[13]*e[31]*e[25]+e[13]*e[22]*e[34]+e[13]*e[32]*e[26]+e[13]*e[23]*e[35]+e[10]*e[27]*e[24]+e[10]*e[18]*e[33]+e[10]*e[28]*e[25]+e[10]*e[19]*e[34]+e[10]*e[29]*e[26]+e[10]*e[20]*e[35]+e[22]*e[30]*e[15]+e[22]*e[12]*e[33]+e[22]*e[32]*e[17]+e[22]*e[14]*e[35]+e[22]*e[31]*e[16]+e[31]*e[21]*e[15]+e[31]*e[12]*e[24]+e[31]*e[23]*e[17]+e[31]*e[14]*e[26]-1.*e[16]*e[27]*e[18]+e[16]*e[33]*e[24]-1.*e[16]*e[30]*e[21]-1.*e[16]*e[29]*e[20]+e[16]*e[35]*e[26]-1.*e[16]*e[32]*e[23]+e[16]*e[28]*e[19]+3.*e[16]*e[34]*e[25]+e[19]*e[27]*e[15]+e[19]*e[9]*e[33]+e[19]*e[29]*e[17];
A[45]=e[4]*e[27]*e[3]+e[4]*e[0]*e[30]+e[4]*e[29]*e[5]+e[4]*e[2]*e[32]+e[31]*e[0]*e[3]+e[31]*e[2]*e[5]+e[7]*e[27]*e[6]+e[7]*e[0]*e[33]+e[7]*e[29]*e[8]+e[7]*e[2]*e[35]+e[34]*e[0]*e[6]+e[34]*e[2]*e[8]+e[1]*e[27]*e[0]+e[1]*e[29]*e[2]+e[1]*e[34]*e[7]-1.*e[1]*e[32]*e[5]-1.*e[1]*e[33]*e[6]-1.*e[1]*e[30]*e[3]-1.*e[1]*e[35]*e[8]+e[1]*e[31]*e[4]+1.500000000*e[28]*ep2[1]+.5000000000*e[28]*ep2[4]+.5000000000*e[28]*ep2[0]-.5000000000*e[28]*ep2[6]-.5000000000*e[28]*ep2[5]+.5000000000*e[28]*ep2[7]-.5000000000*e[28]*ep2[3]+.5000000000*e[28]*ep2[2]-.5000000000*e[28]*ep2[8];
A[191]=-1.*e[35]*e[10]*e[1]-1.*e[35]*e[13]*e[4]+e[35]*e[16]*e[7]+e[35]*e[15]*e[6]-1.*e[35]*e[9]*e[0]-1.*e[35]*e[12]*e[3]+e[32]*e[12]*e[6]+e[32]*e[3]*e[15]+e[32]*e[13]*e[7]+e[32]*e[4]*e[16]-1.*e[8]*e[27]*e[9]-1.*e[8]*e[30]*e[12]-1.*e[8]*e[28]*e[10]-1.*e[8]*e[31]*e[13]+e[8]*e[29]*e[11]+e[11]*e[27]*e[6]+e[11]*e[0]*e[33]+e[11]*e[28]*e[7]+e[11]*e[1]*e[34]+e[29]*e[9]*e[6]+e[29]*e[0]*e[15]+e[29]*e[10]*e[7]+e[29]*e[1]*e[16]+e[5]*e[30]*e[15]+e[5]*e[12]*e[33]+e[5]*e[32]*e[17]+e[5]*e[14]*e[35]+e[5]*e[31]*e[16]+e[5]*e[13]*e[34]+e[8]*e[33]*e[15]+3.*e[8]*e[35]*e[17]+e[8]*e[34]*e[16]+e[2]*e[27]*e[15]+e[2]*e[9]*e[33]+e[2]*e[29]*e[17]+e[2]*e[11]*e[35]+e[2]*e[28]*e[16]+e[2]*e[10]*e[34]-1.*e[17]*e[27]*e[0]+e[17]*e[34]*e[7]+e[17]*e[33]*e[6]-1.*e[17]*e[30]*e[3]-1.*e[17]*e[28]*e[1]-1.*e[17]*e[31]*e[4]+e[14]*e[30]*e[6]+e[14]*e[3]*e[33]+e[14]*e[31]*e[7]+e[14]*e[4]*e[34]+e[14]*e[32]*e[8];
A[44]=e[19]*e[11]*e[2]+e[4]*e[18]*e[12]+e[4]*e[9]*e[21]+e[4]*e[20]*e[14]+e[4]*e[11]*e[23]+e[4]*e[19]*e[13]+e[4]*e[10]*e[22]+e[7]*e[18]*e[15]+e[7]*e[9]*e[24]+e[7]*e[20]*e[17]+e[7]*e[11]*e[26]+e[7]*e[19]*e[16]+e[7]*e[10]*e[25]+e[1]*e[18]*e[9]+e[1]*e[20]*e[11]-1.*e[10]*e[21]*e[3]-1.*e[10]*e[26]*e[8]-1.*e[10]*e[23]*e[5]-1.*e[10]*e[24]*e[6]+e[13]*e[18]*e[3]+e[13]*e[0]*e[21]+e[13]*e[1]*e[22]+e[13]*e[20]*e[5]+e[13]*e[2]*e[23]-1.*e[19]*e[15]*e[6]-1.*e[19]*e[14]*e[5]-1.*e[19]*e[12]*e[3]-1.*e[19]*e[17]*e[8]+e[22]*e[9]*e[3]+e[22]*e[0]*e[12]+e[22]*e[11]*e[5]+e[22]*e[2]*e[14]+e[16]*e[18]*e[6]+e[16]*e[0]*e[24]+e[16]*e[1]*e[25]+e[16]*e[20]*e[8]+e[16]*e[2]*e[26]-1.*e[1]*e[23]*e[14]-1.*e[1]*e[24]*e[15]-1.*e[1]*e[26]*e[17]-1.*e[1]*e[21]*e[12]+e[25]*e[9]*e[6]+e[25]*e[0]*e[15]+e[25]*e[11]*e[8]+e[25]*e[2]*e[17]+e[10]*e[18]*e[0]+3.*e[10]*e[19]*e[1]+e[10]*e[20]*e[2]+e[19]*e[9]*e[0];
A[190]=.5000000000*ep2[23]*e[26]+.5000000000*e[26]*ep2[25]+.5000000000*ep2[20]*e[26]-.5000000000*e[26]*ep2[18]+.5000000000*ep3[26]+.5000000000*e[26]*ep2[24]+e[20]*e[19]*e[25]-.5000000000*e[26]*ep2[19]-.5000000000*e[26]*ep2[21]+e[20]*e[18]*e[24]-.5000000000*e[26]*ep2[22]+e[23]*e[21]*e[24]+e[23]*e[22]*e[25];
A[47]=e[16]*e[9]*e[33]+e[16]*e[29]*e[17]+e[16]*e[11]*e[35]+e[16]*e[10]*e[34]+e[34]*e[11]*e[17]+e[34]*e[9]*e[15]-1.*e[10]*e[30]*e[12]-1.*e[10]*e[32]*e[14]-1.*e[10]*e[33]*e[15]-1.*e[10]*e[35]*e[17]+e[10]*e[27]*e[9]+e[10]*e[29]*e[11]+e[13]*e[27]*e[12]+e[13]*e[9]*e[30]+e[13]*e[29]*e[14]+e[13]*e[11]*e[32]+e[13]*e[10]*e[31]+e[31]*e[11]*e[14]+e[31]*e[9]*e[12]+e[16]*e[27]*e[15]+1.500000000*e[28]*ep2[10]+.5000000000*e[28]*ep2[16]+.5000000000*e[28]*ep2[9]+.5000000000*e[28]*ep2[11]-.5000000000*e[28]*ep2[12]-.5000000000*e[28]*ep2[15]-.5000000000*e[28]*ep2[17]-.5000000000*e[28]*ep2[14]+.5000000000*e[28]*ep2[13];
A[189]=.5000000000*ep2[20]*e[35]+.5000000000*ep2[23]*e[35]+1.500000000*e[35]*ep2[26]+.5000000000*e[35]*ep2[25]+.5000000000*e[35]*ep2[24]-.5000000000*e[35]*ep2[18]-.5000000000*e[35]*ep2[19]-.5000000000*e[35]*ep2[22]-.5000000000*e[35]*ep2[21]+e[20]*e[27]*e[24]+e[20]*e[18]*e[33]+e[20]*e[28]*e[25]+e[20]*e[19]*e[34]+e[20]*e[29]*e[26]+e[29]*e[19]*e[25]+e[29]*e[18]*e[24]+e[23]*e[30]*e[24]+e[23]*e[21]*e[33]+e[23]*e[31]*e[25]+e[23]*e[22]*e[34]+e[23]*e[32]*e[26]+e[32]*e[22]*e[25]+e[32]*e[21]*e[24]+e[26]*e[33]*e[24]+e[26]*e[34]*e[25]-1.*e[26]*e[27]*e[18]-1.*e[26]*e[30]*e[21]-1.*e[26]*e[31]*e[22]-1.*e[26]*e[28]*e[19];
A[46]=e[4]*e[2]*e[5]+.5000000000*e[1]*ep2[0]-.5000000000*e[1]*ep2[6]+e[7]*e[0]*e[6]+.5000000000*e[1]*ep2[7]+.5000000000*e[1]*ep2[4]-.5000000000*e[1]*ep2[8]+.5000000000*e[1]*ep2[2]-.5000000000*e[1]*ep2[3]+.5000000000*ep3[1]+e[7]*e[2]*e[8]-.5000000000*e[1]*ep2[5]+e[4]*e[0]*e[3];
A[188]=-.5000000000*e[17]*ep2[13]-.5000000000*e[17]*ep2[9]+.5000000000*e[17]*ep2[16]+.5000000000*e[17]*ep2[15]+.5000000000*ep3[17]-.5000000000*e[17]*ep2[10]+e[14]*e[13]*e[16]+e[14]*e[12]*e[15]+.5000000000*ep2[14]*e[17]+e[11]*e[10]*e[16]-.5000000000*e[17]*ep2[12]+.5000000000*ep2[11]*e[17]+e[11]*e[9]*e[15];
A[41]=e[4]*e[27]*e[30]+e[4]*e[29]*e[32]+e[4]*e[28]*e[31]+e[31]*e[27]*e[3]+e[31]*e[0]*e[30]+e[31]*e[29]*e[5]+e[31]*e[2]*e[32]+e[7]*e[27]*e[33]+e[7]*e[29]*e[35]+e[7]*e[28]*e[34]+e[28]*e[27]*e[0]+e[28]*e[29]*e[2]+e[34]*e[27]*e[6]+e[34]*e[0]*e[33]+e[34]*e[29]*e[8]+e[34]*e[2]*e[35]-1.*e[28]*e[32]*e[5]-1.*e[28]*e[33]*e[6]-1.*e[28]*e[30]*e[3]-1.*e[28]*e[35]*e[8]+.5000000000*e[1]*ep2[27]+.5000000000*e[1]*ep2[29]+1.500000000*e[1]*ep2[28]+.5000000000*e[1]*ep2[31]-.5000000000*e[1]*ep2[32]-.5000000000*e[1]*ep2[33]-.5000000000*e[1]*ep2[30]+.5000000000*e[1]*ep2[34]-.5000000000*e[1]*ep2[35];
A[187]=.5000000000*ep2[11]*e[35]+.5000000000*e[35]*ep2[16]-.5000000000*e[35]*ep2[9]-.5000000000*e[35]*ep2[12]+.5000000000*e[35]*ep2[15]+1.500000000*e[35]*ep2[17]-.5000000000*e[35]*ep2[10]+.5000000000*e[35]*ep2[14]-.5000000000*e[35]*ep2[13]+e[11]*e[27]*e[15]+e[11]*e[9]*e[33]+e[11]*e[29]*e[17]+e[11]*e[28]*e[16]+e[11]*e[10]*e[34]+e[29]*e[9]*e[15]+e[29]*e[10]*e[16]+e[14]*e[30]*e[15]+e[14]*e[12]*e[33]+e[14]*e[32]*e[17]+e[14]*e[31]*e[16]+e[14]*e[13]*e[34]+e[32]*e[12]*e[15]+e[32]*e[13]*e[16]+e[17]*e[33]*e[15]+e[17]*e[34]*e[16]-1.*e[17]*e[27]*e[9]-1.*e[17]*e[30]*e[12]-1.*e[17]*e[28]*e[10]-1.*e[17]*e[31]*e[13];
A[40]=e[34]*e[27]*e[33]+e[34]*e[29]*e[35]-.5000000000*e[28]*ep2[30]-.5000000000*e[28]*ep2[35]+.5000000000*ep3[28]+.5000000000*e[28]*ep2[27]+.5000000000*e[28]*ep2[29]+e[31]*e[27]*e[30]+e[31]*e[29]*e[32]-.5000000000*e[28]*ep2[32]-.5000000000*e[28]*ep2[33]+.5000000000*e[28]*ep2[31]+.5000000000*e[28]*ep2[34];
A[186]=.5000000000*ep2[5]*e[8]+e[2]*e[0]*e[6]+.5000000000*ep2[2]*e[8]+.5000000000*ep3[8]-.5000000000*e[8]*ep2[0]+e[5]*e[4]*e[7]+e[5]*e[3]*e[6]+.5000000000*e[8]*ep2[7]+e[2]*e[1]*e[7]-.5000000000*e[8]*ep2[1]-.5000000000*e[8]*ep2[4]-.5000000000*e[8]*ep2[3]+.5000000000*e[8]*ep2[6];
A[43]=e[28]*e[27]*e[9]+e[28]*e[29]*e[11]-1.*e[28]*e[30]*e[12]+e[28]*e[31]*e[13]-1.*e[28]*e[32]*e[14]-1.*e[28]*e[33]*e[15]-1.*e[28]*e[35]*e[17]+e[31]*e[27]*e[12]+e[31]*e[9]*e[30]+e[31]*e[29]*e[14]+e[31]*e[11]*e[32]+e[13]*e[27]*e[30]+e[13]*e[29]*e[32]+e[16]*e[27]*e[33]+e[16]*e[29]*e[35]+e[34]*e[27]*e[15]+e[34]*e[9]*e[33]+e[34]*e[29]*e[17]+e[34]*e[11]*e[35]+e[34]*e[28]*e[16]+.5000000000*e[10]*ep2[27]+.5000000000*e[10]*ep2[29]+1.500000000*e[10]*ep2[28]-.5000000000*e[10]*ep2[32]+.5000000000*e[10]*ep2[31]-.5000000000*e[10]*ep2[33]-.5000000000*e[10]*ep2[30]+.5000000000*e[10]*ep2[34]-.5000000000*e[10]*ep2[35];
A[185]=-.5000000000*e[35]*ep2[1]+.5000000000*e[35]*ep2[7]-.5000000000*e[35]*ep2[3]+.5000000000*ep2[2]*e[35]+1.500000000*e[35]*ep2[8]-.5000000000*e[35]*ep2[4]-.5000000000*e[35]*ep2[0]+.5000000000*e[35]*ep2[6]+.5000000000*e[35]*ep2[5]+e[2]*e[27]*e[6]+e[2]*e[0]*e[33]+e[2]*e[28]*e[7]+e[2]*e[1]*e[34]+e[2]*e[29]*e[8]-1.*e[8]*e[27]*e[0]+e[8]*e[34]*e[7]+e[8]*e[32]*e[5]+e[8]*e[33]*e[6]-1.*e[8]*e[30]*e[3]-1.*e[8]*e[28]*e[1]-1.*e[8]*e[31]*e[4]+e[29]*e[1]*e[7]+e[29]*e[0]*e[6]+e[5]*e[30]*e[6]+e[5]*e[3]*e[33]+e[5]*e[31]*e[7]+e[5]*e[4]*e[34]+e[32]*e[4]*e[7]+e[32]*e[3]*e[6];
A[42]=e[28]*e[27]*e[18]+e[28]*e[29]*e[20]+e[22]*e[27]*e[30]+e[22]*e[29]*e[32]+e[22]*e[28]*e[31]+e[31]*e[27]*e[21]+e[31]*e[18]*e[30]+e[31]*e[29]*e[23]+e[31]*e[20]*e[32]+e[25]*e[27]*e[33]+e[25]*e[29]*e[35]+e[25]*e[28]*e[34]+e[34]*e[27]*e[24]+e[34]*e[18]*e[33]+e[34]*e[29]*e[26]+e[34]*e[20]*e[35]-1.*e[28]*e[33]*e[24]-1.*e[28]*e[30]*e[21]-1.*e[28]*e[35]*e[26]-1.*e[28]*e[32]*e[23]-.5000000000*e[19]*ep2[33]-.5000000000*e[19]*ep2[30]-.5000000000*e[19]*ep2[35]+.5000000000*e[19]*ep2[27]+.5000000000*e[19]*ep2[29]+1.500000000*e[19]*ep2[28]+.5000000000*e[19]*ep2[31]+.5000000000*e[19]*ep2[34]-.5000000000*e[19]*ep2[32];
A[184]=e[23]*e[3]*e[15]-1.*e[17]*e[19]*e[1]-1.*e[17]*e[22]*e[4]-1.*e[17]*e[18]*e[0]+e[17]*e[25]*e[7]+e[17]*e[24]*e[6]+e[14]*e[21]*e[6]+e[14]*e[3]*e[24]+e[14]*e[22]*e[7]+e[14]*e[4]*e[25]+e[14]*e[23]*e[8]-1.*e[26]*e[10]*e[1]-1.*e[26]*e[13]*e[4]+e[26]*e[16]*e[7]+e[26]*e[15]*e[6]-1.*e[26]*e[9]*e[0]-1.*e[26]*e[12]*e[3]+e[23]*e[12]*e[6]+e[11]*e[18]*e[6]+e[11]*e[0]*e[24]+e[11]*e[19]*e[7]+e[11]*e[1]*e[25]+e[11]*e[20]*e[8]+e[11]*e[2]*e[26]+e[20]*e[9]*e[6]+e[20]*e[0]*e[15]+e[20]*e[10]*e[7]+e[20]*e[1]*e[16]+e[20]*e[2]*e[17]+e[5]*e[21]*e[15]+e[5]*e[12]*e[24]+e[5]*e[23]*e[17]+e[5]*e[14]*e[26]+e[5]*e[22]*e[16]+e[5]*e[13]*e[25]+e[8]*e[24]*e[15]+3.*e[8]*e[26]*e[17]+e[8]*e[25]*e[16]+e[2]*e[18]*e[15]+e[2]*e[9]*e[24]+e[2]*e[19]*e[16]+e[2]*e[10]*e[25]-1.*e[17]*e[21]*e[3]+e[23]*e[4]*e[16]+e[23]*e[13]*e[7]-1.*e[8]*e[18]*e[9]-1.*e[8]*e[21]*e[12]-1.*e[8]*e[19]*e[10]-1.*e[8]*e[22]*e[13];
A[54]=e[13]*e[18]*e[12]+e[13]*e[9]*e[21]+e[13]*e[20]*e[14]+e[13]*e[11]*e[23]+e[13]*e[10]*e[22]+e[22]*e[11]*e[14]+e[22]*e[9]*e[12]+e[16]*e[18]*e[15]+e[16]*e[9]*e[24]+e[16]*e[20]*e[17]+e[16]*e[11]*e[26]+e[16]*e[10]*e[25]+e[25]*e[11]*e[17]+e[25]*e[9]*e[15]-1.*e[10]*e[23]*e[14]-1.*e[10]*e[24]*e[15]-1.*e[10]*e[26]*e[17]+e[10]*e[20]*e[11]+e[10]*e[18]*e[9]-1.*e[10]*e[21]*e[12]+.5000000000*e[19]*ep2[11]+.5000000000*e[19]*ep2[9]+1.500000000*e[19]*ep2[10]+.5000000000*e[19]*ep2[13]+.5000000000*e[19]*ep2[16]-.5000000000*e[19]*ep2[12]-.5000000000*e[19]*ep2[15]-.5000000000*e[19]*ep2[17]-.5000000000*e[19]*ep2[14];
A[164]=e[10]*e[18]*e[6]+e[10]*e[0]*e[24]+e[10]*e[19]*e[7]+e[10]*e[1]*e[25]+e[10]*e[20]*e[8]+e[10]*e[2]*e[26]+e[19]*e[9]*e[6]+e[19]*e[0]*e[15]+e[19]*e[1]*e[16]+e[19]*e[11]*e[8]+e[19]*e[2]*e[17]+e[4]*e[21]*e[15]+e[4]*e[12]*e[24]+e[4]*e[23]*e[17]+e[4]*e[14]*e[26]+e[4]*e[22]*e[16]+e[4]*e[13]*e[25]+e[7]*e[24]*e[15]+e[7]*e[26]*e[17]+3.*e[7]*e[25]*e[16]+e[1]*e[18]*e[15]+e[1]*e[9]*e[24]+e[1]*e[20]*e[17]+e[1]*e[11]*e[26]-1.*e[16]*e[21]*e[3]+e[16]*e[26]*e[8]-1.*e[16]*e[20]*e[2]-1.*e[16]*e[18]*e[0]-1.*e[16]*e[23]*e[5]+e[16]*e[24]*e[6]+e[13]*e[21]*e[6]+e[13]*e[3]*e[24]+e[13]*e[22]*e[7]+e[13]*e[23]*e[8]+e[13]*e[5]*e[26]-1.*e[25]*e[11]*e[2]+e[25]*e[15]*e[6]-1.*e[25]*e[9]*e[0]-1.*e[25]*e[14]*e[5]-1.*e[25]*e[12]*e[3]+e[25]*e[17]*e[8]+e[22]*e[12]*e[6]+e[22]*e[3]*e[15]+e[22]*e[14]*e[8]+e[22]*e[5]*e[17]-1.*e[7]*e[23]*e[14]-1.*e[7]*e[20]*e[11]-1.*e[7]*e[18]*e[9]-1.*e[7]*e[21]*e[12];
A[55]=e[13]*e[9]*e[3]+e[13]*e[0]*e[12]+e[13]*e[10]*e[4]+e[13]*e[11]*e[5]+e[13]*e[2]*e[14]+e[16]*e[9]*e[6]+e[16]*e[0]*e[15]+e[16]*e[10]*e[7]+e[16]*e[11]*e[8]+e[16]*e[2]*e[17]+e[7]*e[11]*e[17]+e[7]*e[9]*e[15]+e[4]*e[11]*e[14]+e[4]*e[9]*e[12]+e[10]*e[9]*e[0]+e[10]*e[11]*e[2]-1.*e[10]*e[15]*e[6]-1.*e[10]*e[14]*e[5]-1.*e[10]*e[12]*e[3]-1.*e[10]*e[17]*e[8]+.5000000000*e[1]*ep2[11]+.5000000000*e[1]*ep2[9]+1.500000000*e[1]*ep2[10]-.5000000000*e[1]*ep2[12]-.5000000000*e[1]*ep2[15]-.5000000000*e[1]*ep2[17]-.5000000000*e[1]*ep2[14]+.5000000000*e[1]*ep2[13]+.5000000000*e[1]*ep2[16];
A[165]=e[1]*e[27]*e[6]+e[1]*e[0]*e[33]+e[1]*e[28]*e[7]+e[1]*e[29]*e[8]+e[1]*e[2]*e[35]-1.*e[7]*e[27]*e[0]-1.*e[7]*e[32]*e[5]+e[7]*e[33]*e[6]-1.*e[7]*e[30]*e[3]+e[7]*e[35]*e[8]-1.*e[7]*e[29]*e[2]+e[7]*e[31]*e[4]+e[28]*e[0]*e[6]+e[28]*e[2]*e[8]+e[4]*e[30]*e[6]+e[4]*e[3]*e[33]+e[4]*e[32]*e[8]+e[4]*e[5]*e[35]+e[31]*e[3]*e[6]+e[31]*e[5]*e[8]+.5000000000*ep2[1]*e[34]+1.500000000*e[34]*ep2[7]+.5000000000*e[34]*ep2[4]-.5000000000*e[34]*ep2[0]+.5000000000*e[34]*ep2[6]-.5000000000*e[34]*ep2[5]-.5000000000*e[34]*ep2[3]-.5000000000*e[34]*ep2[2]+.5000000000*e[34]*ep2[8];
A[52]=e[4]*e[18]*e[3]+e[4]*e[0]*e[21]+e[4]*e[1]*e[22]+e[4]*e[20]*e[5]+e[4]*e[2]*e[23]+e[22]*e[0]*e[3]+e[22]*e[2]*e[5]+e[7]*e[18]*e[6]+e[7]*e[0]*e[24]+e[7]*e[1]*e[25]+e[7]*e[20]*e[8]+e[7]*e[2]*e[26]+e[25]*e[0]*e[6]+e[25]*e[2]*e[8]+e[1]*e[18]*e[0]+e[1]*e[20]*e[2]-1.*e[1]*e[21]*e[3]-1.*e[1]*e[26]*e[8]-1.*e[1]*e[23]*e[5]-1.*e[1]*e[24]*e[6]+.5000000000*e[19]*ep2[4]+.5000000000*e[19]*ep2[0]-.5000000000*e[19]*ep2[6]-.5000000000*e[19]*ep2[5]+1.500000000*e[19]*ep2[1]+.5000000000*e[19]*ep2[7]-.5000000000*e[19]*ep2[3]+.5000000000*e[19]*ep2[2]-.5000000000*e[19]*ep2[8];
A[166]=-.5000000000*e[7]*ep2[0]+e[4]*e[5]*e[8]+.5000000000*ep2[4]*e[7]-.5000000000*e[7]*ep2[2]+.5000000000*e[7]*ep2[8]-.5000000000*e[7]*ep2[5]+.5000000000*e[7]*ep2[6]+e[1]*e[0]*e[6]+.5000000000*ep3[7]+e[4]*e[3]*e[6]+e[1]*e[2]*e[8]-.5000000000*e[7]*ep2[3]+.5000000000*ep2[1]*e[7];
A[53]=-1.*e[1]*e[32]*e[23]-1.*e[19]*e[32]*e[5]-1.*e[19]*e[33]*e[6]-1.*e[19]*e[30]*e[3]-1.*e[19]*e[35]*e[8]-1.*e[28]*e[21]*e[3]-1.*e[28]*e[26]*e[8]-1.*e[28]*e[23]*e[5]-1.*e[28]*e[24]*e[6]+e[7]*e[27]*e[24]+e[7]*e[18]*e[33]+e[7]*e[29]*e[26]+e[7]*e[20]*e[35]+e[22]*e[27]*e[3]+e[22]*e[0]*e[30]+e[22]*e[29]*e[5]+e[22]*e[2]*e[32]+e[31]*e[18]*e[3]+e[31]*e[0]*e[21]+e[31]*e[20]*e[5]+e[31]*e[2]*e[23]+e[25]*e[27]*e[6]+e[25]*e[0]*e[33]+e[25]*e[28]*e[7]+e[25]*e[1]*e[34]+e[25]*e[29]*e[8]+e[25]*e[2]*e[35]+e[34]*e[18]*e[6]+e[34]*e[0]*e[24]+e[34]*e[19]*e[7]+e[34]*e[20]*e[8]+e[34]*e[2]*e[26]+e[1]*e[27]*e[18]+3.*e[1]*e[28]*e[19]+e[1]*e[29]*e[20]+e[19]*e[27]*e[0]+e[19]*e[29]*e[2]+e[28]*e[18]*e[0]+e[28]*e[20]*e[2]+e[4]*e[27]*e[21]+e[4]*e[18]*e[30]+e[4]*e[28]*e[22]+e[4]*e[19]*e[31]+e[4]*e[29]*e[23]+e[4]*e[20]*e[32]-1.*e[1]*e[33]*e[24]-1.*e[1]*e[30]*e[21]-1.*e[1]*e[35]*e[26]+e[1]*e[31]*e[22];
A[167]=e[10]*e[27]*e[15]+e[10]*e[9]*e[33]+e[10]*e[29]*e[17]+e[10]*e[11]*e[35]+e[10]*e[28]*e[16]+e[28]*e[11]*e[17]+e[28]*e[9]*e[15]+e[13]*e[30]*e[15]+e[13]*e[12]*e[33]+e[13]*e[32]*e[17]+e[13]*e[14]*e[35]+e[13]*e[31]*e[16]+e[31]*e[14]*e[17]+e[31]*e[12]*e[15]+e[16]*e[33]*e[15]+e[16]*e[35]*e[17]-1.*e[16]*e[27]*e[9]-1.*e[16]*e[30]*e[12]-1.*e[16]*e[32]*e[14]-1.*e[16]*e[29]*e[11]+.5000000000*ep2[10]*e[34]+1.500000000*e[34]*ep2[16]-.5000000000*e[34]*ep2[9]-.5000000000*e[34]*ep2[11]-.5000000000*e[34]*ep2[12]+.5000000000*e[34]*ep2[15]+.5000000000*e[34]*ep2[17]-.5000000000*e[34]*ep2[14]+.5000000000*e[34]*ep2[13];
A[50]=.5000000000*e[19]*ep2[18]+.5000000000*e[19]*ep2[25]+.5000000000*e[19]*ep2[22]+e[25]*e[20]*e[26]-.5000000000*e[19]*ep2[21]+.5000000000*e[19]*ep2[20]-.5000000000*e[19]*ep2[26]-.5000000000*e[19]*ep2[23]-.5000000000*e[19]*ep2[24]+.5000000000*ep3[19]+e[22]*e[20]*e[23]+e[25]*e[18]*e[24]+e[22]*e[18]*e[21];
A[160]=.5000000000*e[34]*ep2[33]+.5000000000*e[34]*ep2[35]-.5000000000*e[34]*ep2[27]-.5000000000*e[34]*ep2[32]-.5000000000*e[34]*ep2[29]-.5000000000*e[34]*ep2[30]+.5000000000*ep2[28]*e[34]+e[31]*e[30]*e[33]+e[31]*e[32]*e[35]+e[28]*e[27]*e[33]+.5000000000*ep3[34]+e[28]*e[29]*e[35]+.5000000000*ep2[31]*e[34];
A[51]=e[4]*e[28]*e[13]+e[4]*e[10]*e[31]+e[7]*e[27]*e[15]+e[7]*e[9]*e[33]+e[7]*e[29]*e[17]+e[7]*e[11]*e[35]+e[7]*e[28]*e[16]+e[7]*e[10]*e[34]+e[1]*e[27]*e[9]+e[1]*e[29]*e[11]+3.*e[1]*e[28]*e[10]+e[10]*e[27]*e[0]-1.*e[10]*e[32]*e[5]-1.*e[10]*e[33]*e[6]-1.*e[10]*e[30]*e[3]-1.*e[10]*e[35]*e[8]+e[10]*e[29]*e[2]+e[13]*e[27]*e[3]+e[13]*e[0]*e[30]+e[13]*e[1]*e[31]+e[13]*e[29]*e[5]+e[13]*e[2]*e[32]+e[28]*e[11]*e[2]-1.*e[28]*e[15]*e[6]+e[28]*e[9]*e[0]-1.*e[28]*e[14]*e[5]-1.*e[28]*e[12]*e[3]-1.*e[28]*e[17]*e[8]+e[31]*e[9]*e[3]+e[31]*e[0]*e[12]+e[31]*e[11]*e[5]+e[31]*e[2]*e[14]+e[16]*e[27]*e[6]+e[16]*e[0]*e[33]+e[16]*e[1]*e[34]+e[16]*e[29]*e[8]+e[16]*e[2]*e[35]-1.*e[1]*e[30]*e[12]-1.*e[1]*e[32]*e[14]-1.*e[1]*e[33]*e[15]-1.*e[1]*e[35]*e[17]+e[34]*e[9]*e[6]+e[34]*e[0]*e[15]+e[34]*e[11]*e[8]+e[34]*e[2]*e[17]+e[4]*e[27]*e[12]+e[4]*e[9]*e[30]+e[4]*e[29]*e[14]+e[4]*e[11]*e[32];
A[161]=e[4]*e[30]*e[33]+e[4]*e[32]*e[35]+e[4]*e[31]*e[34]+e[31]*e[30]*e[6]+e[31]*e[3]*e[33]+e[31]*e[32]*e[8]+e[31]*e[5]*e[35]+e[28]*e[27]*e[6]+e[28]*e[0]*e[33]+e[28]*e[29]*e[8]+e[28]*e[2]*e[35]+e[34]*e[33]*e[6]+e[34]*e[35]*e[8]-1.*e[34]*e[27]*e[0]-1.*e[34]*e[32]*e[5]-1.*e[34]*e[30]*e[3]-1.*e[34]*e[29]*e[2]+e[1]*e[27]*e[33]+e[1]*e[29]*e[35]+e[1]*e[28]*e[34]+.5000000000*ep2[31]*e[7]-.5000000000*e[7]*ep2[27]-.5000000000*e[7]*ep2[32]+.5000000000*e[7]*ep2[28]-.5000000000*e[7]*ep2[29]+.5000000000*e[7]*ep2[33]-.5000000000*e[7]*ep2[30]+1.500000000*e[7]*ep2[34]+.5000000000*e[7]*ep2[35];
A[48]=-.5000000000*e[10]*ep2[14]-.5000000000*e[10]*ep2[17]-.5000000000*e[10]*ep2[15]+e[13]*e[11]*e[14]+e[16]*e[11]*e[17]+.5000000000*e[10]*ep2[13]+e[13]*e[9]*e[12]-.5000000000*e[10]*ep2[12]+.5000000000*ep3[10]+e[16]*e[9]*e[15]+.5000000000*e[10]*ep2[16]+.5000000000*e[10]*ep2[11]+.5000000000*e[10]*ep2[9];
A[162]=e[22]*e[32]*e[35]+e[22]*e[31]*e[34]+e[31]*e[30]*e[24]+e[31]*e[21]*e[33]+e[31]*e[32]*e[26]+e[31]*e[23]*e[35]+e[34]*e[33]*e[24]+e[34]*e[35]*e[26]-1.*e[34]*e[27]*e[18]-1.*e[34]*e[30]*e[21]-1.*e[34]*e[29]*e[20]-1.*e[34]*e[32]*e[23]+e[19]*e[27]*e[33]+e[19]*e[29]*e[35]+e[19]*e[28]*e[34]+e[28]*e[27]*e[24]+e[28]*e[18]*e[33]+e[28]*e[29]*e[26]+e[28]*e[20]*e[35]+e[22]*e[30]*e[33]+.5000000000*ep2[28]*e[25]+.5000000000*ep2[31]*e[25]+.5000000000*e[25]*ep2[33]+.5000000000*e[25]*ep2[35]+1.500000000*e[25]*ep2[34]-.5000000000*e[25]*ep2[27]-.5000000000*e[25]*ep2[32]-.5000000000*e[25]*ep2[29]-.5000000000*e[25]*ep2[30];
A[49]=-1.*e[19]*e[35]*e[26]-1.*e[19]*e[32]*e[23]+e[19]*e[27]*e[18]+e[19]*e[29]*e[20]+e[22]*e[27]*e[21]+e[22]*e[18]*e[30]+e[22]*e[19]*e[31]+e[22]*e[29]*e[23]+e[22]*e[20]*e[32]+e[31]*e[18]*e[21]+e[31]*e[20]*e[23]+e[25]*e[27]*e[24]+e[25]*e[18]*e[33]+e[25]*e[19]*e[34]+e[25]*e[29]*e[26]+e[25]*e[20]*e[35]+e[34]*e[18]*e[24]+e[34]*e[20]*e[26]-1.*e[19]*e[33]*e[24]-1.*e[19]*e[30]*e[21]+1.500000000*e[28]*ep2[19]+.5000000000*e[28]*ep2[18]+.5000000000*e[28]*ep2[20]+.5000000000*e[28]*ep2[22]+.5000000000*e[28]*ep2[25]-.5000000000*e[28]*ep2[26]-.5000000000*e[28]*ep2[23]-.5000000000*e[28]*ep2[24]-.5000000000*e[28]*ep2[21];
A[163]=e[10]*e[27]*e[33]+e[10]*e[29]*e[35]+e[10]*e[28]*e[34]+e[34]*e[33]*e[15]+e[34]*e[35]*e[17]+e[28]*e[27]*e[15]+e[28]*e[9]*e[33]+e[28]*e[29]*e[17]+e[28]*e[11]*e[35]-1.*e[34]*e[27]*e[9]-1.*e[34]*e[30]*e[12]+e[34]*e[31]*e[13]-1.*e[34]*e[32]*e[14]-1.*e[34]*e[29]*e[11]+e[31]*e[30]*e[15]+e[31]*e[12]*e[33]+e[31]*e[32]*e[17]+e[31]*e[14]*e[35]+e[13]*e[30]*e[33]+e[13]*e[32]*e[35]-.5000000000*e[16]*ep2[27]-.5000000000*e[16]*ep2[32]+.5000000000*e[16]*ep2[28]-.5000000000*e[16]*ep2[29]+.5000000000*e[16]*ep2[31]+.5000000000*e[16]*ep2[33]-.5000000000*e[16]*ep2[30]+1.500000000*e[16]*ep2[34]+.5000000000*e[16]*ep2[35];
A[63]=e[29]*e[32]*e[14]-1.*e[29]*e[33]*e[15]-1.*e[29]*e[34]*e[16]+e[32]*e[27]*e[12]+e[32]*e[9]*e[30]+e[32]*e[28]*e[13]+e[32]*e[10]*e[31]+e[14]*e[27]*e[30]+e[14]*e[28]*e[31]+e[17]*e[27]*e[33]+e[17]*e[28]*e[34]+e[35]*e[27]*e[15]+e[35]*e[9]*e[33]+e[35]*e[29]*e[17]+e[35]*e[28]*e[16]+e[35]*e[10]*e[34]+e[29]*e[27]*e[9]+e[29]*e[28]*e[10]-1.*e[29]*e[30]*e[12]-1.*e[29]*e[31]*e[13]+.5000000000*e[11]*ep2[27]+1.500000000*e[11]*ep2[29]+.5000000000*e[11]*ep2[28]+.5000000000*e[11]*ep2[32]-.5000000000*e[11]*ep2[31]-.5000000000*e[11]*ep2[33]-.5000000000*e[11]*ep2[30]-.5000000000*e[11]*ep2[34]+.5000000000*e[11]*ep2[35];
A[173]=e[1]*e[20]*e[35]+e[19]*e[27]*e[6]+e[19]*e[0]*e[33]+e[19]*e[28]*e[7]+e[19]*e[29]*e[8]+e[19]*e[2]*e[35]+e[28]*e[18]*e[6]+e[28]*e[0]*e[24]+e[28]*e[20]*e[8]+e[28]*e[2]*e[26]+e[4]*e[30]*e[24]+e[4]*e[21]*e[33]+e[4]*e[31]*e[25]+e[4]*e[22]*e[34]+e[4]*e[32]*e[26]+e[4]*e[23]*e[35]-1.*e[7]*e[27]*e[18]+e[7]*e[33]*e[24]-1.*e[7]*e[30]*e[21]-1.*e[7]*e[29]*e[20]+e[7]*e[35]*e[26]+e[7]*e[31]*e[22]-1.*e[7]*e[32]*e[23]-1.*e[25]*e[27]*e[0]-1.*e[25]*e[32]*e[5]-1.*e[25]*e[30]*e[3]-1.*e[25]*e[29]*e[2]-1.*e[34]*e[21]*e[3]-1.*e[34]*e[20]*e[2]-1.*e[34]*e[18]*e[0]-1.*e[34]*e[23]*e[5]+e[22]*e[30]*e[6]+e[22]*e[3]*e[33]+e[22]*e[32]*e[8]+e[22]*e[5]*e[35]+e[31]*e[21]*e[6]+e[31]*e[3]*e[24]+e[31]*e[23]*e[8]+e[31]*e[5]*e[26]+e[34]*e[26]*e[8]+e[1]*e[27]*e[24]+e[1]*e[18]*e[33]+e[1]*e[28]*e[25]+e[1]*e[19]*e[34]+e[1]*e[29]*e[26]+e[34]*e[24]*e[6]+e[25]*e[33]*e[6]+3.*e[25]*e[34]*e[7]+e[25]*e[35]*e[8];
A[62]=.5000000000*e[20]*ep2[27]+1.500000000*e[20]*ep2[29]+.5000000000*e[20]*ep2[28]+.5000000000*e[20]*ep2[32]+.5000000000*e[20]*ep2[35]-.5000000000*e[20]*ep2[31]-.5000000000*e[20]*ep2[33]-.5000000000*e[20]*ep2[30]-.5000000000*e[20]*ep2[34]+e[29]*e[27]*e[18]+e[29]*e[28]*e[19]+e[23]*e[27]*e[30]+e[23]*e[29]*e[32]+e[23]*e[28]*e[31]+e[32]*e[27]*e[21]+e[32]*e[18]*e[30]+e[32]*e[28]*e[22]+e[32]*e[19]*e[31]+e[26]*e[27]*e[33]+e[26]*e[29]*e[35]+e[26]*e[28]*e[34]+e[35]*e[27]*e[24]+e[35]*e[18]*e[33]+e[35]*e[28]*e[25]+e[35]*e[19]*e[34]-1.*e[29]*e[33]*e[24]-1.*e[29]*e[30]*e[21]-1.*e[29]*e[31]*e[22]-1.*e[29]*e[34]*e[25];
A[172]=e[19]*e[1]*e[7]+e[19]*e[0]*e[6]+e[19]*e[2]*e[8]+e[4]*e[21]*e[6]+e[4]*e[3]*e[24]+e[4]*e[22]*e[7]+e[4]*e[23]*e[8]+e[4]*e[5]*e[26]+e[22]*e[3]*e[6]+e[22]*e[5]*e[8]+e[7]*e[24]*e[6]+e[7]*e[26]*e[8]+e[1]*e[18]*e[6]+e[1]*e[0]*e[24]+e[1]*e[20]*e[8]+e[1]*e[2]*e[26]-1.*e[7]*e[21]*e[3]-1.*e[7]*e[20]*e[2]-1.*e[7]*e[18]*e[0]-1.*e[7]*e[23]*e[5]+.5000000000*e[25]*ep2[4]-.5000000000*e[25]*ep2[0]+.5000000000*e[25]*ep2[6]-.5000000000*e[25]*ep2[5]+.5000000000*e[25]*ep2[1]+1.500000000*e[25]*ep2[7]-.5000000000*e[25]*ep2[3]-.5000000000*e[25]*ep2[2]+.5000000000*e[25]*ep2[8];
A[61]=e[5]*e[27]*e[30]+e[5]*e[29]*e[32]+e[5]*e[28]*e[31]+e[32]*e[27]*e[3]+e[32]*e[0]*e[30]+e[32]*e[28]*e[4]+e[32]*e[1]*e[31]+e[8]*e[27]*e[33]+e[8]*e[29]*e[35]+e[8]*e[28]*e[34]+e[29]*e[27]*e[0]+e[29]*e[28]*e[1]+e[35]*e[27]*e[6]+e[35]*e[0]*e[33]+e[35]*e[28]*e[7]+e[35]*e[1]*e[34]-1.*e[29]*e[34]*e[7]-1.*e[29]*e[33]*e[6]-1.*e[29]*e[30]*e[3]-1.*e[29]*e[31]*e[4]+.5000000000*e[2]*ep2[27]+1.500000000*e[2]*ep2[29]+.5000000000*e[2]*ep2[28]+.5000000000*e[2]*ep2[32]-.5000000000*e[2]*ep2[31]-.5000000000*e[2]*ep2[33]-.5000000000*e[2]*ep2[30]-.5000000000*e[2]*ep2[34]+.5000000000*e[2]*ep2[35];
A[175]=e[13]*e[12]*e[6]+e[13]*e[3]*e[15]+e[13]*e[4]*e[16]+e[13]*e[14]*e[8]+e[13]*e[5]*e[17]+e[16]*e[15]*e[6]+e[16]*e[17]*e[8]+e[1]*e[11]*e[17]+e[1]*e[9]*e[15]+e[1]*e[10]*e[16]+e[4]*e[14]*e[17]+e[4]*e[12]*e[15]+e[10]*e[9]*e[6]+e[10]*e[0]*e[15]+e[10]*e[11]*e[8]+e[10]*e[2]*e[17]-1.*e[16]*e[11]*e[2]-1.*e[16]*e[9]*e[0]-1.*e[16]*e[14]*e[5]-1.*e[16]*e[12]*e[3]+.5000000000*ep2[13]*e[7]+1.500000000*ep2[16]*e[7]+.5000000000*e[7]*ep2[17]+.5000000000*e[7]*ep2[15]-.5000000000*e[7]*ep2[9]-.5000000000*e[7]*ep2[11]-.5000000000*e[7]*ep2[12]+.5000000000*e[7]*ep2[10]-.5000000000*e[7]*ep2[14];
A[60]=.5000000000*e[29]*ep2[32]+.5000000000*e[29]*ep2[35]-.5000000000*e[29]*ep2[31]-.5000000000*e[29]*ep2[33]-.5000000000*e[29]*ep2[30]-.5000000000*e[29]*ep2[34]+e[32]*e[27]*e[30]+.5000000000*ep3[29]+.5000000000*e[29]*ep2[28]+e[35]*e[28]*e[34]+.5000000000*e[29]*ep2[27]+e[35]*e[27]*e[33]+e[32]*e[28]*e[31];
A[174]=-1.*e[16]*e[21]*e[12]+e[10]*e[18]*e[15]+e[10]*e[9]*e[24]+e[10]*e[20]*e[17]+e[10]*e[11]*e[26]+e[19]*e[11]*e[17]+e[19]*e[9]*e[15]+e[19]*e[10]*e[16]+e[13]*e[21]*e[15]+e[13]*e[12]*e[24]+e[13]*e[23]*e[17]+e[13]*e[14]*e[26]+e[13]*e[22]*e[16]+e[22]*e[14]*e[17]+e[22]*e[12]*e[15]+e[16]*e[24]*e[15]+e[16]*e[26]*e[17]-1.*e[16]*e[23]*e[14]-1.*e[16]*e[20]*e[11]-1.*e[16]*e[18]*e[9]+.5000000000*ep2[13]*e[25]+1.500000000*e[25]*ep2[16]+.5000000000*e[25]*ep2[17]+.5000000000*e[25]*ep2[15]+.5000000000*ep2[10]*e[25]-.5000000000*e[25]*ep2[9]-.5000000000*e[25]*ep2[11]-.5000000000*e[25]*ep2[12]-.5000000000*e[25]*ep2[14];
A[59]=e[19]*e[20]*e[2]+e[22]*e[18]*e[3]+e[22]*e[0]*e[21]+e[22]*e[19]*e[4]+e[22]*e[20]*e[5]+e[22]*e[2]*e[23]-1.*e[19]*e[21]*e[3]-1.*e[19]*e[26]*e[8]+e[19]*e[25]*e[7]-1.*e[19]*e[23]*e[5]-1.*e[19]*e[24]*e[6]+e[4]*e[18]*e[21]+e[4]*e[20]*e[23]+e[25]*e[18]*e[6]+e[25]*e[0]*e[24]+e[25]*e[20]*e[8]+e[25]*e[2]*e[26]+e[7]*e[18]*e[24]+e[7]*e[20]*e[26]+e[19]*e[18]*e[0]+1.500000000*ep2[19]*e[1]+.5000000000*e[1]*ep2[22]+.5000000000*e[1]*ep2[18]+.5000000000*e[1]*ep2[20]+.5000000000*e[1]*ep2[25]-.5000000000*e[1]*ep2[26]-.5000000000*e[1]*ep2[23]-.5000000000*e[1]*ep2[24]-.5000000000*e[1]*ep2[21];
A[169]=e[19]*e[27]*e[24]+e[19]*e[18]*e[33]+e[19]*e[28]*e[25]+e[19]*e[29]*e[26]+e[19]*e[20]*e[35]+e[28]*e[18]*e[24]+e[28]*e[20]*e[26]+e[22]*e[30]*e[24]+e[22]*e[21]*e[33]+e[22]*e[31]*e[25]+e[22]*e[32]*e[26]+e[22]*e[23]*e[35]+e[31]*e[21]*e[24]+e[31]*e[23]*e[26]+e[25]*e[33]*e[24]+e[25]*e[35]*e[26]-1.*e[25]*e[27]*e[18]-1.*e[25]*e[30]*e[21]-1.*e[25]*e[29]*e[20]-1.*e[25]*e[32]*e[23]-.5000000000*e[34]*ep2[18]-.5000000000*e[34]*ep2[23]-.5000000000*e[34]*ep2[20]-.5000000000*e[34]*ep2[21]+.5000000000*ep2[19]*e[34]+.5000000000*ep2[22]*e[34]+1.500000000*e[34]*ep2[25]+.5000000000*e[34]*ep2[24]+.5000000000*e[34]*ep2[26];
A[58]=e[16]*e[0]*e[6]+e[16]*e[2]*e[8]+e[1]*e[11]*e[2]-1.*e[1]*e[15]*e[6]+e[1]*e[9]*e[0]-1.*e[1]*e[14]*e[5]-1.*e[1]*e[12]*e[3]-1.*e[1]*e[17]*e[8]+e[4]*e[9]*e[3]+e[4]*e[0]*e[12]+e[4]*e[1]*e[13]+e[4]*e[11]*e[5]+e[4]*e[2]*e[14]+e[13]*e[0]*e[3]+e[13]*e[2]*e[5]+e[7]*e[9]*e[6]+e[7]*e[0]*e[15]+e[7]*e[1]*e[16]+e[7]*e[11]*e[8]+e[7]*e[2]*e[17]-.5000000000*e[10]*ep2[6]-.5000000000*e[10]*ep2[5]-.5000000000*e[10]*ep2[3]-.5000000000*e[10]*ep2[8]+1.500000000*e[10]*ep2[1]+.5000000000*e[10]*ep2[0]+.5000000000*e[10]*ep2[2]+.5000000000*e[10]*ep2[4]+.5000000000*e[10]*ep2[7];
A[168]=e[13]*e[14]*e[17]+e[13]*e[12]*e[15]+e[10]*e[9]*e[15]+.5000000000*e[16]*ep2[15]-.5000000000*e[16]*ep2[11]-.5000000000*e[16]*ep2[12]-.5000000000*e[16]*ep2[14]+e[10]*e[11]*e[17]+.5000000000*ep2[10]*e[16]+.5000000000*ep3[16]-.5000000000*e[16]*ep2[9]+.5000000000*e[16]*ep2[17]+.5000000000*ep2[13]*e[16];
A[57]=e[10]*e[29]*e[20]+e[22]*e[27]*e[12]+e[22]*e[9]*e[30]+e[22]*e[29]*e[14]+e[22]*e[11]*e[32]+e[22]*e[10]*e[31]+e[31]*e[18]*e[12]+e[31]*e[9]*e[21]+e[31]*e[20]*e[14]+e[31]*e[11]*e[23]-1.*e[10]*e[33]*e[24]-1.*e[10]*e[30]*e[21]-1.*e[10]*e[35]*e[26]-1.*e[10]*e[32]*e[23]+e[10]*e[34]*e[25]+e[19]*e[27]*e[9]+e[19]*e[29]*e[11]+e[28]*e[18]*e[9]+e[28]*e[20]*e[11]+e[16]*e[27]*e[24]+e[16]*e[18]*e[33]+e[16]*e[28]*e[25]+e[16]*e[19]*e[34]+e[16]*e[29]*e[26]+e[16]*e[20]*e[35]-1.*e[19]*e[30]*e[12]-1.*e[19]*e[32]*e[14]-1.*e[19]*e[33]*e[15]-1.*e[19]*e[35]*e[17]-1.*e[28]*e[23]*e[14]-1.*e[28]*e[24]*e[15]-1.*e[28]*e[26]*e[17]-1.*e[28]*e[21]*e[12]+e[25]*e[27]*e[15]+e[25]*e[9]*e[33]+e[25]*e[29]*e[17]+e[25]*e[11]*e[35]+e[34]*e[18]*e[15]+e[34]*e[9]*e[24]+e[34]*e[20]*e[17]+e[34]*e[11]*e[26]+e[13]*e[27]*e[21]+e[13]*e[18]*e[30]+e[13]*e[28]*e[22]+e[13]*e[19]*e[31]+e[13]*e[29]*e[23]+e[13]*e[20]*e[32]+e[10]*e[27]*e[18]+3.*e[10]*e[28]*e[19];
A[171]=e[4]*e[30]*e[15]+e[4]*e[12]*e[33]+e[4]*e[32]*e[17]+e[4]*e[14]*e[35]+e[4]*e[31]*e[16]+e[4]*e[13]*e[34]+e[7]*e[33]*e[15]+e[7]*e[35]*e[17]+3.*e[7]*e[34]*e[16]+e[1]*e[27]*e[15]+e[1]*e[9]*e[33]+e[1]*e[29]*e[17]+e[1]*e[11]*e[35]+e[1]*e[28]*e[16]+e[1]*e[10]*e[34]-1.*e[16]*e[27]*e[0]-1.*e[16]*e[32]*e[5]+e[16]*e[33]*e[6]-1.*e[16]*e[30]*e[3]+e[16]*e[35]*e[8]-1.*e[16]*e[29]*e[2]+e[13]*e[30]*e[6]+e[13]*e[3]*e[33]+e[13]*e[31]*e[7]+e[13]*e[32]*e[8]+e[13]*e[5]*e[35]-1.*e[34]*e[11]*e[2]+e[34]*e[15]*e[6]-1.*e[34]*e[9]*e[0]-1.*e[34]*e[14]*e[5]-1.*e[34]*e[12]*e[3]+e[34]*e[17]*e[8]+e[31]*e[12]*e[6]+e[31]*e[3]*e[15]+e[31]*e[14]*e[8]+e[31]*e[5]*e[17]-1.*e[7]*e[27]*e[9]-1.*e[7]*e[30]*e[12]+e[7]*e[28]*e[10]-1.*e[7]*e[32]*e[14]+e[10]*e[27]*e[6]+e[10]*e[0]*e[33]+e[10]*e[29]*e[8]+e[10]*e[2]*e[35]+e[28]*e[9]*e[6]+e[28]*e[0]*e[15]+e[28]*e[11]*e[8]+e[28]*e[2]*e[17]-1.*e[7]*e[29]*e[11];
A[56]=e[22]*e[18]*e[12]+e[22]*e[9]*e[21]+e[22]*e[20]*e[14]+e[22]*e[11]*e[23]+e[22]*e[19]*e[13]+e[25]*e[18]*e[15]+e[25]*e[9]*e[24]+e[25]*e[20]*e[17]+e[25]*e[11]*e[26]+e[25]*e[19]*e[16]+e[16]*e[18]*e[24]+e[16]*e[20]*e[26]+e[13]*e[18]*e[21]+e[13]*e[20]*e[23]+e[19]*e[18]*e[9]+e[19]*e[20]*e[11]-1.*e[19]*e[23]*e[14]-1.*e[19]*e[24]*e[15]-1.*e[19]*e[26]*e[17]-1.*e[19]*e[21]*e[12]+.5000000000*e[10]*ep2[22]+.5000000000*e[10]*ep2[25]+1.500000000*e[10]*ep2[19]+.5000000000*e[10]*ep2[18]+.5000000000*e[10]*ep2[20]-.5000000000*e[10]*ep2[26]-.5000000000*e[10]*ep2[23]-.5000000000*e[10]*ep2[24]-.5000000000*e[10]*ep2[21];
A[170]=e[19]*e[20]*e[26]-.5000000000*e[25]*ep2[20]+e[22]*e[21]*e[24]+e[19]*e[18]*e[24]+.5000000000*ep2[22]*e[25]-.5000000000*e[25]*ep2[21]-.5000000000*e[25]*ep2[23]+.5000000000*ep2[19]*e[25]-.5000000000*e[25]*ep2[18]+.5000000000*e[25]*ep2[24]+.5000000000*e[25]*ep2[26]+.5000000000*ep3[25]+e[22]*e[23]*e[26];
A[73]=-1.*e[20]*e[33]*e[6]-1.*e[20]*e[30]*e[3]-1.*e[20]*e[31]*e[4]-1.*e[29]*e[21]*e[3]-1.*e[29]*e[22]*e[4]-1.*e[29]*e[25]*e[7]-1.*e[29]*e[24]*e[6]+e[8]*e[27]*e[24]+e[8]*e[18]*e[33]+e[8]*e[28]*e[25]+e[8]*e[19]*e[34]+e[23]*e[27]*e[3]+e[23]*e[0]*e[30]+e[23]*e[28]*e[4]+e[23]*e[1]*e[31]+e[32]*e[18]*e[3]+e[32]*e[0]*e[21]+e[32]*e[19]*e[4]+e[32]*e[1]*e[22]+e[26]*e[27]*e[6]+e[26]*e[0]*e[33]+e[26]*e[28]*e[7]+e[26]*e[1]*e[34]+e[26]*e[29]*e[8]+e[26]*e[2]*e[35]+e[35]*e[18]*e[6]+e[35]*e[0]*e[24]+e[35]*e[19]*e[7]+e[35]*e[1]*e[25]+e[35]*e[20]*e[8]+e[2]*e[27]*e[18]+e[2]*e[28]*e[19]+3.*e[2]*e[29]*e[20]+e[20]*e[27]*e[0]+e[20]*e[28]*e[1]+e[29]*e[18]*e[0]+e[29]*e[19]*e[1]+e[5]*e[27]*e[21]+e[5]*e[18]*e[30]+e[5]*e[28]*e[22]+e[5]*e[19]*e[31]+e[5]*e[29]*e[23]+e[5]*e[20]*e[32]-1.*e[2]*e[33]*e[24]-1.*e[2]*e[30]*e[21]-1.*e[2]*e[31]*e[22]+e[2]*e[32]*e[23]-1.*e[2]*e[34]*e[25]-1.*e[20]*e[34]*e[7];
A[72]=e[5]*e[18]*e[3]+e[5]*e[0]*e[21]+e[5]*e[19]*e[4]+e[5]*e[1]*e[22]+e[5]*e[2]*e[23]+e[23]*e[1]*e[4]+e[23]*e[0]*e[3]+e[8]*e[18]*e[6]+e[8]*e[0]*e[24]+e[8]*e[19]*e[7]+e[8]*e[1]*e[25]+e[8]*e[2]*e[26]+e[26]*e[1]*e[7]+e[26]*e[0]*e[6]+e[2]*e[18]*e[0]+e[2]*e[19]*e[1]-1.*e[2]*e[21]*e[3]-1.*e[2]*e[22]*e[4]-1.*e[2]*e[25]*e[7]-1.*e[2]*e[24]*e[6]-.5000000000*e[20]*ep2[4]+.5000000000*e[20]*ep2[0]-.5000000000*e[20]*ep2[6]+.5000000000*e[20]*ep2[5]+.5000000000*e[20]*ep2[1]-.5000000000*e[20]*ep2[7]-.5000000000*e[20]*ep2[3]+1.500000000*e[20]*ep2[2]+.5000000000*e[20]*ep2[8];
A[75]=e[14]*e[9]*e[3]+e[14]*e[0]*e[12]+e[14]*e[10]*e[4]+e[14]*e[1]*e[13]+e[14]*e[11]*e[5]+e[17]*e[9]*e[6]+e[17]*e[0]*e[15]+e[17]*e[10]*e[7]+e[17]*e[1]*e[16]+e[17]*e[11]*e[8]+e[8]*e[9]*e[15]+e[8]*e[10]*e[16]+e[5]*e[9]*e[12]+e[5]*e[10]*e[13]+e[11]*e[9]*e[0]+e[11]*e[10]*e[1]-1.*e[11]*e[13]*e[4]-1.*e[11]*e[16]*e[7]-1.*e[11]*e[15]*e[6]-1.*e[11]*e[12]*e[3]+.5000000000*e[2]*ep2[14]+.5000000000*e[2]*ep2[17]+1.500000000*e[2]*ep2[11]+.5000000000*e[2]*ep2[9]+.5000000000*e[2]*ep2[10]-.5000000000*e[2]*ep2[16]-.5000000000*e[2]*ep2[12]-.5000000000*e[2]*ep2[15]-.5000000000*e[2]*ep2[13];
A[74]=e[14]*e[18]*e[12]+e[14]*e[9]*e[21]+e[14]*e[11]*e[23]+e[14]*e[19]*e[13]+e[14]*e[10]*e[22]+e[23]*e[9]*e[12]+e[23]*e[10]*e[13]+e[17]*e[18]*e[15]+e[17]*e[9]*e[24]+e[17]*e[11]*e[26]+e[17]*e[19]*e[16]+e[17]*e[10]*e[25]+e[26]*e[9]*e[15]+e[26]*e[10]*e[16]-1.*e[11]*e[24]*e[15]-1.*e[11]*e[25]*e[16]+e[11]*e[18]*e[9]-1.*e[11]*e[21]*e[12]+e[11]*e[19]*e[10]-1.*e[11]*e[22]*e[13]+1.500000000*e[20]*ep2[11]+.5000000000*e[20]*ep2[9]+.5000000000*e[20]*ep2[10]+.5000000000*e[20]*ep2[14]+.5000000000*e[20]*ep2[17]-.5000000000*e[20]*ep2[16]-.5000000000*e[20]*ep2[12]-.5000000000*e[20]*ep2[15]-.5000000000*e[20]*ep2[13];
A[77]=e[23]*e[10]*e[31]+e[32]*e[18]*e[12]+e[32]*e[9]*e[21]+e[32]*e[19]*e[13]+e[32]*e[10]*e[22]-1.*e[11]*e[33]*e[24]-1.*e[11]*e[30]*e[21]+e[11]*e[35]*e[26]-1.*e[11]*e[31]*e[22]-1.*e[11]*e[34]*e[25]+e[20]*e[27]*e[9]+e[20]*e[28]*e[10]+e[29]*e[18]*e[9]+e[29]*e[19]*e[10]+e[17]*e[27]*e[24]+e[17]*e[18]*e[33]+e[17]*e[28]*e[25]+e[17]*e[19]*e[34]+e[17]*e[29]*e[26]+e[17]*e[20]*e[35]-1.*e[20]*e[30]*e[12]-1.*e[20]*e[31]*e[13]-1.*e[20]*e[33]*e[15]-1.*e[20]*e[34]*e[16]-1.*e[29]*e[24]*e[15]-1.*e[29]*e[25]*e[16]-1.*e[29]*e[21]*e[12]-1.*e[29]*e[22]*e[13]+e[26]*e[27]*e[15]+e[26]*e[9]*e[33]+e[26]*e[28]*e[16]+e[26]*e[10]*e[34]+e[35]*e[18]*e[15]+e[35]*e[9]*e[24]+e[35]*e[19]*e[16]+e[35]*e[10]*e[25]+e[14]*e[27]*e[21]+e[14]*e[18]*e[30]+e[14]*e[28]*e[22]+e[14]*e[19]*e[31]+e[14]*e[29]*e[23]+e[14]*e[20]*e[32]+e[11]*e[27]*e[18]+e[11]*e[28]*e[19]+3.*e[11]*e[29]*e[20]+e[23]*e[27]*e[12]+e[23]*e[9]*e[30]+e[23]*e[11]*e[32]+e[23]*e[28]*e[13];
A[76]=e[23]*e[18]*e[12]+e[23]*e[9]*e[21]+e[23]*e[20]*e[14]+e[23]*e[19]*e[13]+e[23]*e[10]*e[22]+e[26]*e[18]*e[15]+e[26]*e[9]*e[24]+e[26]*e[20]*e[17]+e[26]*e[19]*e[16]+e[26]*e[10]*e[25]+e[17]*e[19]*e[25]+e[17]*e[18]*e[24]+e[14]*e[19]*e[22]+e[14]*e[18]*e[21]+e[20]*e[18]*e[9]+e[20]*e[19]*e[10]-1.*e[20]*e[24]*e[15]-1.*e[20]*e[25]*e[16]-1.*e[20]*e[21]*e[12]-1.*e[20]*e[22]*e[13]+.5000000000*e[11]*ep2[23]+.5000000000*e[11]*ep2[26]+.5000000000*e[11]*ep2[19]+.5000000000*e[11]*ep2[18]+1.500000000*e[11]*ep2[20]-.5000000000*e[11]*ep2[22]-.5000000000*e[11]*ep2[24]-.5000000000*e[11]*ep2[21]-.5000000000*e[11]*ep2[25];
A[79]=-1.*e[20]*e[21]*e[3]+e[20]*e[26]*e[8]-1.*e[20]*e[22]*e[4]-1.*e[20]*e[25]*e[7]-1.*e[20]*e[24]*e[6]+e[5]*e[19]*e[22]+e[5]*e[18]*e[21]+e[26]*e[18]*e[6]+e[26]*e[0]*e[24]+e[26]*e[19]*e[7]+e[26]*e[1]*e[25]+e[8]*e[19]*e[25]+e[8]*e[18]*e[24]+e[20]*e[18]*e[0]+e[20]*e[19]*e[1]+e[23]*e[18]*e[3]+e[23]*e[0]*e[21]+e[23]*e[19]*e[4]+e[23]*e[1]*e[22]+e[23]*e[20]*e[5]+1.500000000*ep2[20]*e[2]+.5000000000*e[2]*ep2[23]+.5000000000*e[2]*ep2[19]+.5000000000*e[2]*ep2[18]+.5000000000*e[2]*ep2[26]-.5000000000*e[2]*ep2[22]-.5000000000*e[2]*ep2[24]-.5000000000*e[2]*ep2[21]-.5000000000*e[2]*ep2[25];
A[78]=-1.*e[2]*e[15]*e[6]+e[2]*e[9]*e[0]-1.*e[2]*e[12]*e[3]+e[5]*e[9]*e[3]+e[5]*e[0]*e[12]+e[5]*e[10]*e[4]+e[5]*e[1]*e[13]+e[5]*e[2]*e[14]+e[14]*e[1]*e[4]+e[14]*e[0]*e[3]+e[8]*e[9]*e[6]+e[8]*e[0]*e[15]+e[8]*e[10]*e[7]+e[8]*e[1]*e[16]+e[8]*e[2]*e[17]+e[17]*e[1]*e[7]+e[17]*e[0]*e[6]+e[2]*e[10]*e[1]-1.*e[2]*e[13]*e[4]-1.*e[2]*e[16]*e[7]+.5000000000*e[11]*ep2[1]+.5000000000*e[11]*ep2[0]+1.500000000*e[11]*ep2[2]+.5000000000*e[11]*ep2[5]+.5000000000*e[11]*ep2[8]-.5000000000*e[11]*ep2[4]-.5000000000*e[11]*ep2[6]-.5000000000*e[11]*ep2[7]-.5000000000*e[11]*ep2[3];
A[64]=e[5]*e[19]*e[13]+e[5]*e[10]*e[22]+e[8]*e[18]*e[15]+e[8]*e[9]*e[24]+e[8]*e[20]*e[17]+e[8]*e[11]*e[26]+e[8]*e[19]*e[16]+e[8]*e[10]*e[25]+e[2]*e[18]*e[9]+e[2]*e[19]*e[10]-1.*e[11]*e[21]*e[3]-1.*e[11]*e[22]*e[4]-1.*e[11]*e[25]*e[7]-1.*e[11]*e[24]*e[6]+e[14]*e[18]*e[3]+e[14]*e[0]*e[21]+e[14]*e[19]*e[4]+e[14]*e[1]*e[22]+e[14]*e[2]*e[23]-1.*e[20]*e[13]*e[4]-1.*e[20]*e[16]*e[7]-1.*e[20]*e[15]*e[6]-1.*e[20]*e[12]*e[3]+e[23]*e[9]*e[3]+e[23]*e[0]*e[12]+e[23]*e[10]*e[4]+e[23]*e[1]*e[13]+e[17]*e[18]*e[6]+e[17]*e[0]*e[24]+e[17]*e[19]*e[7]+e[17]*e[1]*e[25]+e[17]*e[2]*e[26]-1.*e[2]*e[24]*e[15]-1.*e[2]*e[25]*e[16]-1.*e[2]*e[21]*e[12]-1.*e[2]*e[22]*e[13]+e[26]*e[9]*e[6]+e[26]*e[0]*e[15]+e[26]*e[10]*e[7]+e[26]*e[1]*e[16]+e[11]*e[18]*e[0]+e[11]*e[19]*e[1]+3.*e[11]*e[20]*e[2]+e[20]*e[9]*e[0]+e[20]*e[10]*e[1]+e[5]*e[18]*e[12]+e[5]*e[9]*e[21]+e[5]*e[20]*e[14]+e[5]*e[11]*e[23];
A[65]=e[32]*e[1]*e[4]+e[32]*e[0]*e[3]+e[8]*e[27]*e[6]+e[8]*e[0]*e[33]+e[8]*e[28]*e[7]+e[8]*e[1]*e[34]+e[35]*e[1]*e[7]+e[35]*e[0]*e[6]+e[2]*e[27]*e[0]+e[2]*e[28]*e[1]-1.*e[2]*e[34]*e[7]+e[2]*e[32]*e[5]-1.*e[2]*e[33]*e[6]-1.*e[2]*e[30]*e[3]+e[2]*e[35]*e[8]-1.*e[2]*e[31]*e[4]+e[5]*e[27]*e[3]+e[5]*e[0]*e[30]+e[5]*e[28]*e[4]+e[5]*e[1]*e[31]+1.500000000*e[29]*ep2[2]-.5000000000*e[29]*ep2[4]+.5000000000*e[29]*ep2[0]-.5000000000*e[29]*ep2[6]+.5000000000*e[29]*ep2[5]+.5000000000*e[29]*ep2[1]-.5000000000*e[29]*ep2[7]-.5000000000*e[29]*ep2[3]+.5000000000*e[29]*ep2[8];
A[66]=e[5]*e[0]*e[3]+e[8]*e[1]*e[7]+e[8]*e[0]*e[6]+e[5]*e[1]*e[4]-.5000000000*e[2]*ep2[4]+.5000000000*ep3[2]+.5000000000*e[2]*ep2[1]-.5000000000*e[2]*ep2[3]+.5000000000*e[2]*ep2[0]+.5000000000*e[2]*ep2[8]+.5000000000*e[2]*ep2[5]-.5000000000*e[2]*ep2[6]-.5000000000*e[2]*ep2[7];
A[67]=e[35]*e[9]*e[15]+e[35]*e[10]*e[16]-1.*e[11]*e[30]*e[12]-1.*e[11]*e[31]*e[13]-1.*e[11]*e[33]*e[15]-1.*e[11]*e[34]*e[16]+e[11]*e[27]*e[9]+e[11]*e[28]*e[10]+e[14]*e[27]*e[12]+e[14]*e[9]*e[30]+e[14]*e[11]*e[32]+e[14]*e[28]*e[13]+e[14]*e[10]*e[31]+e[32]*e[9]*e[12]+e[32]*e[10]*e[13]+e[17]*e[27]*e[15]+e[17]*e[9]*e[33]+e[17]*e[11]*e[35]+e[17]*e[28]*e[16]+e[17]*e[10]*e[34]+1.500000000*e[29]*ep2[11]-.5000000000*e[29]*ep2[16]+.5000000000*e[29]*ep2[9]-.5000000000*e[29]*ep2[12]-.5000000000*e[29]*ep2[15]+.5000000000*e[29]*ep2[17]+.5000000000*e[29]*ep2[10]+.5000000000*e[29]*ep2[14]-.5000000000*e[29]*ep2[13];
A[68]=e[14]*e[9]*e[12]+e[17]*e[10]*e[16]+e[17]*e[9]*e[15]+.5000000000*ep3[11]+e[14]*e[10]*e[13]+.5000000000*e[11]*ep2[10]-.5000000000*e[11]*ep2[15]+.5000000000*e[11]*ep2[14]-.5000000000*e[11]*ep2[13]-.5000000000*e[11]*ep2[12]+.5000000000*e[11]*ep2[9]-.5000000000*e[11]*ep2[16]+.5000000000*e[11]*ep2[17];
A[69]=e[20]*e[27]*e[18]+e[20]*e[28]*e[19]+e[23]*e[27]*e[21]+e[23]*e[18]*e[30]+e[23]*e[28]*e[22]+e[23]*e[19]*e[31]+e[23]*e[20]*e[32]+e[32]*e[19]*e[22]+e[32]*e[18]*e[21]+e[26]*e[27]*e[24]+e[26]*e[18]*e[33]+e[26]*e[28]*e[25]+e[26]*e[19]*e[34]+e[26]*e[20]*e[35]+e[35]*e[19]*e[25]+e[35]*e[18]*e[24]-1.*e[20]*e[33]*e[24]-1.*e[20]*e[30]*e[21]-1.*e[20]*e[31]*e[22]-1.*e[20]*e[34]*e[25]+.5000000000*e[29]*ep2[23]+.5000000000*e[29]*ep2[26]-.5000000000*e[29]*ep2[22]-.5000000000*e[29]*ep2[24]-.5000000000*e[29]*ep2[21]-.5000000000*e[29]*ep2[25]+1.500000000*e[29]*ep2[20]+.5000000000*e[29]*ep2[19]+.5000000000*e[29]*ep2[18];
A[70]=.5000000000*e[20]*ep2[26]+.5000000000*e[20]*ep2[18]+.5000000000*ep3[20]+.5000000000*e[20]*ep2[19]+e[26]*e[18]*e[24]+.5000000000*e[20]*ep2[23]-.5000000000*e[20]*ep2[25]+e[23]*e[19]*e[22]-.5000000000*e[20]*ep2[24]-.5000000000*e[20]*ep2[21]-.5000000000*e[20]*ep2[22]+e[23]*e[18]*e[21]+e[26]*e[19]*e[25];
A[71]=e[8]*e[28]*e[16]+e[8]*e[10]*e[34]+e[2]*e[27]*e[9]+3.*e[2]*e[29]*e[11]+e[2]*e[28]*e[10]+e[11]*e[27]*e[0]-1.*e[11]*e[34]*e[7]-1.*e[11]*e[33]*e[6]-1.*e[11]*e[30]*e[3]+e[11]*e[28]*e[1]-1.*e[11]*e[31]*e[4]+e[14]*e[27]*e[3]+e[14]*e[0]*e[30]+e[14]*e[28]*e[4]+e[14]*e[1]*e[31]+e[14]*e[2]*e[32]+e[29]*e[10]*e[1]-1.*e[29]*e[13]*e[4]-1.*e[29]*e[16]*e[7]-1.*e[29]*e[15]*e[6]+e[29]*e[9]*e[0]-1.*e[29]*e[12]*e[3]+e[32]*e[9]*e[3]+e[32]*e[0]*e[12]+e[32]*e[10]*e[4]+e[32]*e[1]*e[13]+e[17]*e[27]*e[6]+e[17]*e[0]*e[33]+e[17]*e[28]*e[7]+e[17]*e[1]*e[34]+e[17]*e[2]*e[35]-1.*e[2]*e[30]*e[12]-1.*e[2]*e[31]*e[13]-1.*e[2]*e[33]*e[15]-1.*e[2]*e[34]*e[16]+e[35]*e[9]*e[6]+e[35]*e[0]*e[15]+e[35]*e[10]*e[7]+e[35]*e[1]*e[16]+e[5]*e[27]*e[12]+e[5]*e[9]*e[30]+e[5]*e[29]*e[14]+e[5]*e[11]*e[32]+e[5]*e[28]*e[13]+e[5]*e[10]*e[31]+e[8]*e[27]*e[15]+e[8]*e[9]*e[33]+e[8]*e[29]*e[17]+e[8]*e[11]*e[35];
A[91]=-1.*e[12]*e[34]*e[7]+e[12]*e[32]*e[5]-1.*e[12]*e[35]*e[8]-1.*e[12]*e[29]*e[2]-1.*e[12]*e[28]*e[1]+e[12]*e[31]*e[4]-1.*e[30]*e[11]*e[2]-1.*e[30]*e[10]*e[1]+e[30]*e[13]*e[4]-1.*e[30]*e[16]*e[7]+e[30]*e[14]*e[5]-1.*e[30]*e[17]*e[8]+e[15]*e[3]*e[33]+e[15]*e[31]*e[7]+e[15]*e[4]*e[34]+e[15]*e[32]*e[8]+e[15]*e[5]*e[35]+e[3]*e[27]*e[9]-1.*e[3]*e[28]*e[10]-1.*e[3]*e[34]*e[16]-1.*e[3]*e[35]*e[17]-1.*e[3]*e[29]*e[11]+e[33]*e[13]*e[7]+e[33]*e[4]*e[16]+e[33]*e[14]*e[8]+e[33]*e[5]*e[17]+e[9]*e[28]*e[4]+e[9]*e[1]*e[31]+e[9]*e[29]*e[5]+e[9]*e[2]*e[32]+e[27]*e[10]*e[4]+e[27]*e[1]*e[13]+e[27]*e[11]*e[5]+e[27]*e[2]*e[14]+3.*e[3]*e[30]*e[12]+e[3]*e[32]*e[14]+e[3]*e[31]*e[13]+e[6]*e[30]*e[15]+e[6]*e[12]*e[33]+e[6]*e[32]*e[17]+e[6]*e[14]*e[35]+e[6]*e[31]*e[16]+e[6]*e[13]*e[34]+e[0]*e[27]*e[12]+e[0]*e[9]*e[30]+e[0]*e[29]*e[14]+e[0]*e[11]*e[32]+e[0]*e[28]*e[13]+e[0]*e[10]*e[31];
A[90]=.5000000000*e[21]*ep2[24]-.5000000000*e[21]*ep2[25]+.5000000000*e[21]*ep2[23]-.5000000000*e[21]*ep2[26]+.5000000000*ep2[18]*e[21]+.5000000000*e[21]*ep2[22]-.5000000000*e[21]*ep2[20]+e[24]*e[22]*e[25]+e[24]*e[23]*e[26]-.5000000000*e[21]*ep2[19]+e[18]*e[19]*e[22]+e[18]*e[20]*e[23]+.5000000000*ep3[21];
A[89]=-.5000000000*e[30]*ep2[26]-.5000000000*e[30]*ep2[19]-.5000000000*e[30]*ep2[20]-.5000000000*e[30]*ep2[25]+.5000000000*ep2[18]*e[30]+1.500000000*e[30]*ep2[21]+.5000000000*e[30]*ep2[22]+.5000000000*e[30]*ep2[23]+.5000000000*e[30]*ep2[24]+e[18]*e[27]*e[21]+e[18]*e[28]*e[22]+e[18]*e[19]*e[31]+e[18]*e[29]*e[23]+e[18]*e[20]*e[32]+e[27]*e[19]*e[22]+e[27]*e[20]*e[23]+e[21]*e[31]*e[22]+e[21]*e[32]*e[23]+e[24]*e[21]*e[33]+e[24]*e[31]*e[25]+e[24]*e[22]*e[34]+e[24]*e[32]*e[26]+e[24]*e[23]*e[35]+e[33]*e[22]*e[25]+e[33]*e[23]*e[26]-1.*e[21]*e[29]*e[20]-1.*e[21]*e[35]*e[26]-1.*e[21]*e[28]*e[19]-1.*e[21]*e[34]*e[25];
A[88]=.5000000000*e[12]*ep2[15]-.5000000000*e[12]*ep2[17]+e[15]*e[13]*e[16]-.5000000000*e[12]*ep2[10]+e[15]*e[14]*e[17]-.5000000000*e[12]*ep2[16]-.5000000000*e[12]*ep2[11]+e[9]*e[10]*e[13]+.5000000000*e[12]*ep2[13]+.5000000000*ep2[9]*e[12]+.5000000000*ep3[12]+e[9]*e[11]*e[14]+.5000000000*e[12]*ep2[14];
A[95]=e[12]*e[13]*e[4]+e[12]*e[14]*e[5]+e[15]*e[12]*e[6]+e[15]*e[13]*e[7]+e[15]*e[4]*e[16]+e[15]*e[14]*e[8]+e[15]*e[5]*e[17]+e[6]*e[14]*e[17]+e[6]*e[13]*e[16]+e[0]*e[11]*e[14]+e[0]*e[9]*e[12]+e[0]*e[10]*e[13]+e[9]*e[10]*e[4]+e[9]*e[1]*e[13]+e[9]*e[11]*e[5]+e[9]*e[2]*e[14]-1.*e[12]*e[11]*e[2]-1.*e[12]*e[10]*e[1]-1.*e[12]*e[16]*e[7]-1.*e[12]*e[17]*e[8]+1.500000000*ep2[12]*e[3]+.5000000000*e[3]*ep2[15]-.5000000000*e[3]*ep2[16]+.5000000000*e[3]*ep2[9]-.5000000000*e[3]*ep2[11]-.5000000000*e[3]*ep2[17]-.5000000000*e[3]*ep2[10]+.5000000000*e[3]*ep2[14]+.5000000000*e[3]*ep2[13];
A[94]=e[18]*e[11]*e[14]+e[18]*e[9]*e[12]+e[18]*e[10]*e[13]+e[12]*e[23]*e[14]+e[12]*e[22]*e[13]+e[15]*e[12]*e[24]+e[15]*e[23]*e[17]+e[15]*e[14]*e[26]+e[15]*e[22]*e[16]+e[15]*e[13]*e[25]+e[24]*e[14]*e[17]+e[24]*e[13]*e[16]-1.*e[12]*e[25]*e[16]-1.*e[12]*e[26]*e[17]-1.*e[12]*e[20]*e[11]-1.*e[12]*e[19]*e[10]+e[9]*e[20]*e[14]+e[9]*e[11]*e[23]+e[9]*e[19]*e[13]+e[9]*e[10]*e[22]+.5000000000*ep2[9]*e[21]-.5000000000*e[21]*ep2[16]-.5000000000*e[21]*ep2[11]-.5000000000*e[21]*ep2[17]-.5000000000*e[21]*ep2[10]+1.500000000*e[21]*ep2[12]+.5000000000*e[21]*ep2[14]+.5000000000*e[21]*ep2[13]+.5000000000*e[21]*ep2[15];
A[93]=-1.*e[21]*e[35]*e[8]-1.*e[21]*e[29]*e[2]-1.*e[21]*e[28]*e[1]+e[21]*e[31]*e[4]-1.*e[30]*e[26]*e[8]-1.*e[30]*e[20]*e[2]-1.*e[30]*e[19]*e[1]+e[30]*e[22]*e[4]-1.*e[30]*e[25]*e[7]+e[30]*e[23]*e[5]+e[6]*e[31]*e[25]+e[6]*e[22]*e[34]+e[6]*e[32]*e[26]+e[6]*e[23]*e[35]+e[24]*e[30]*e[6]+e[24]*e[3]*e[33]+e[24]*e[31]*e[7]+e[24]*e[4]*e[34]+e[24]*e[32]*e[8]+e[24]*e[5]*e[35]+e[33]*e[21]*e[6]+e[33]*e[22]*e[7]+e[33]*e[4]*e[25]+e[33]*e[23]*e[8]+e[33]*e[5]*e[26]+e[0]*e[27]*e[21]+e[0]*e[18]*e[30]+e[0]*e[28]*e[22]+e[0]*e[19]*e[31]+e[0]*e[29]*e[23]+e[0]*e[20]*e[32]+e[18]*e[27]*e[3]+e[18]*e[28]*e[4]+e[18]*e[1]*e[31]+e[18]*e[29]*e[5]+e[18]*e[2]*e[32]+e[27]*e[19]*e[4]+e[27]*e[1]*e[22]+e[27]*e[20]*e[5]+e[27]*e[2]*e[23]+3.*e[3]*e[30]*e[21]+e[3]*e[31]*e[22]+e[3]*e[32]*e[23]-1.*e[3]*e[29]*e[20]-1.*e[3]*e[35]*e[26]-1.*e[3]*e[28]*e[19]-1.*e[3]*e[34]*e[25]-1.*e[21]*e[34]*e[7]+e[21]*e[32]*e[5];
A[92]=e[18]*e[1]*e[4]+e[18]*e[0]*e[3]+e[18]*e[2]*e[5]+e[3]*e[22]*e[4]+e[3]*e[23]*e[5]+e[6]*e[3]*e[24]+e[6]*e[22]*e[7]+e[6]*e[4]*e[25]+e[6]*e[23]*e[8]+e[6]*e[5]*e[26]+e[24]*e[4]*e[7]+e[24]*e[5]*e[8]+e[0]*e[19]*e[4]+e[0]*e[1]*e[22]+e[0]*e[20]*e[5]+e[0]*e[2]*e[23]-1.*e[3]*e[26]*e[8]-1.*e[3]*e[20]*e[2]-1.*e[3]*e[19]*e[1]-1.*e[3]*e[25]*e[7]+.5000000000*e[21]*ep2[4]+.5000000000*e[21]*ep2[0]+.5000000000*e[21]*ep2[6]+.5000000000*e[21]*ep2[5]-.5000000000*e[21]*ep2[1]-.5000000000*e[21]*ep2[7]+1.500000000*e[21]*ep2[3]-.5000000000*e[21]*ep2[2]-.5000000000*e[21]*ep2[8];
A[82]=.5000000000*ep2[27]*e[21]+1.500000000*e[21]*ep2[30]+.5000000000*e[21]*ep2[32]+.5000000000*e[21]*ep2[31]+.5000000000*e[21]*ep2[33]-.5000000000*e[21]*ep2[28]-.5000000000*e[21]*ep2[29]-.5000000000*e[21]*ep2[34]-.5000000000*e[21]*ep2[35]+e[18]*e[27]*e[30]+e[18]*e[29]*e[32]+e[18]*e[28]*e[31]+e[27]*e[28]*e[22]+e[27]*e[19]*e[31]+e[27]*e[29]*e[23]+e[27]*e[20]*e[32]+e[30]*e[31]*e[22]+e[30]*e[32]*e[23]+e[24]*e[30]*e[33]+e[24]*e[32]*e[35]+e[24]*e[31]*e[34]+e[33]*e[31]*e[25]+e[33]*e[22]*e[34]+e[33]*e[32]*e[26]+e[33]*e[23]*e[35]-1.*e[30]*e[29]*e[20]-1.*e[30]*e[35]*e[26]-1.*e[30]*e[28]*e[19]-1.*e[30]*e[34]*e[25];
A[192]=-.5000000000*e[26]*ep2[4]-.5000000000*e[26]*ep2[0]+.5000000000*e[26]*ep2[6]+.5000000000*e[26]*ep2[5]-.5000000000*e[26]*ep2[1]+.5000000000*e[26]*ep2[7]-.5000000000*e[26]*ep2[3]+.5000000000*e[26]*ep2[2]+1.500000000*e[26]*ep2[8]+e[20]*e[0]*e[6]+e[20]*e[2]*e[8]+e[5]*e[21]*e[6]+e[5]*e[3]*e[24]+e[5]*e[22]*e[7]+e[5]*e[4]*e[25]+e[5]*e[23]*e[8]+e[23]*e[4]*e[7]+e[23]*e[3]*e[6]+e[8]*e[24]*e[6]+e[8]*e[25]*e[7]+e[2]*e[18]*e[6]+e[2]*e[0]*e[24]+e[2]*e[19]*e[7]+e[2]*e[1]*e[25]-1.*e[8]*e[21]*e[3]-1.*e[8]*e[19]*e[1]-1.*e[8]*e[22]*e[4]-1.*e[8]*e[18]*e[0]+e[20]*e[1]*e[7];
A[83]=e[9]*e[27]*e[30]+e[9]*e[29]*e[32]+e[9]*e[28]*e[31]+e[33]*e[30]*e[15]+e[33]*e[32]*e[17]+e[33]*e[14]*e[35]+e[33]*e[31]*e[16]+e[33]*e[13]*e[34]+e[27]*e[29]*e[14]+e[27]*e[11]*e[32]+e[27]*e[28]*e[13]+e[27]*e[10]*e[31]-1.*e[30]*e[28]*e[10]+e[30]*e[31]*e[13]+e[30]*e[32]*e[14]-1.*e[30]*e[34]*e[16]-1.*e[30]*e[35]*e[17]-1.*e[30]*e[29]*e[11]+e[15]*e[32]*e[35]+e[15]*e[31]*e[34]-.5000000000*e[12]*ep2[34]-.5000000000*e[12]*ep2[35]+.5000000000*e[12]*ep2[27]+.5000000000*e[12]*ep2[32]-.5000000000*e[12]*ep2[28]-.5000000000*e[12]*ep2[29]+.5000000000*e[12]*ep2[31]+.5000000000*e[12]*ep2[33]+1.500000000*e[12]*ep2[30];
A[193]=e[23]*e[30]*e[6]+e[23]*e[3]*e[33]+e[23]*e[31]*e[7]+e[23]*e[4]*e[34]+e[32]*e[21]*e[6]+e[32]*e[3]*e[24]+e[32]*e[22]*e[7]+e[32]*e[4]*e[25]+e[26]*e[33]*e[6]+e[26]*e[34]*e[7]+3.*e[26]*e[35]*e[8]+e[35]*e[24]*e[6]+e[35]*e[25]*e[7]+e[2]*e[27]*e[24]+e[2]*e[18]*e[33]+e[2]*e[28]*e[25]+e[2]*e[19]*e[34]+e[2]*e[29]*e[26]+e[2]*e[20]*e[35]+e[20]*e[27]*e[6]+e[20]*e[0]*e[33]+e[20]*e[28]*e[7]+e[20]*e[1]*e[34]+e[20]*e[29]*e[8]+e[29]*e[18]*e[6]+e[29]*e[0]*e[24]+e[29]*e[19]*e[7]+e[29]*e[1]*e[25]+e[5]*e[30]*e[24]+e[5]*e[21]*e[33]+e[5]*e[31]*e[25]+e[5]*e[22]*e[34]+e[5]*e[32]*e[26]+e[5]*e[23]*e[35]-1.*e[8]*e[27]*e[18]+e[8]*e[33]*e[24]-1.*e[8]*e[30]*e[21]-1.*e[8]*e[31]*e[22]+e[8]*e[32]*e[23]-1.*e[8]*e[28]*e[19]+e[8]*e[34]*e[25]-1.*e[26]*e[27]*e[0]-1.*e[26]*e[30]*e[3]-1.*e[26]*e[28]*e[1]-1.*e[26]*e[31]*e[4]-1.*e[35]*e[21]*e[3]-1.*e[35]*e[19]*e[1]-1.*e[35]*e[22]*e[4]-1.*e[35]*e[18]*e[0];
A[80]=e[27]*e[29]*e[32]+e[27]*e[28]*e[31]+e[33]*e[32]*e[35]+e[33]*e[31]*e[34]+.5000000000*ep3[30]-.5000000000*e[30]*ep2[28]-.5000000000*e[30]*ep2[29]-.5000000000*e[30]*ep2[34]+.5000000000*e[30]*ep2[33]+.5000000000*ep2[27]*e[30]+.5000000000*e[30]*ep2[32]+.5000000000*e[30]*ep2[31]-.5000000000*e[30]*ep2[35];
A[194]=.5000000000*ep2[14]*e[26]+1.500000000*e[26]*ep2[17]+.5000000000*e[26]*ep2[15]+.5000000000*e[26]*ep2[16]+.5000000000*ep2[11]*e[26]-.5000000000*e[26]*ep2[9]-.5000000000*e[26]*ep2[12]-.5000000000*e[26]*ep2[10]-.5000000000*e[26]*ep2[13]+e[20]*e[11]*e[17]+e[20]*e[9]*e[15]+e[20]*e[10]*e[16]+e[14]*e[21]*e[15]+e[14]*e[12]*e[24]+e[14]*e[23]*e[17]+e[14]*e[22]*e[16]+e[14]*e[13]*e[25]+e[23]*e[12]*e[15]+e[23]*e[13]*e[16]+e[17]*e[24]*e[15]+e[17]*e[25]*e[16]-1.*e[17]*e[18]*e[9]-1.*e[17]*e[21]*e[12]-1.*e[17]*e[19]*e[10]-1.*e[17]*e[22]*e[13]+e[11]*e[18]*e[15]+e[11]*e[9]*e[24]+e[11]*e[19]*e[16]+e[11]*e[10]*e[25];
A[81]=e[0]*e[27]*e[30]+e[0]*e[29]*e[32]+e[0]*e[28]*e[31]+e[30]*e[31]*e[4]+e[30]*e[32]*e[5]+e[6]*e[30]*e[33]+e[6]*e[32]*e[35]+e[6]*e[31]*e[34]+e[27]*e[28]*e[4]+e[27]*e[1]*e[31]+e[27]*e[29]*e[5]+e[27]*e[2]*e[32]+e[33]*e[31]*e[7]+e[33]*e[4]*e[34]+e[33]*e[32]*e[8]+e[33]*e[5]*e[35]-1.*e[30]*e[34]*e[7]-1.*e[30]*e[35]*e[8]-1.*e[30]*e[29]*e[2]-1.*e[30]*e[28]*e[1]+1.500000000*e[3]*ep2[30]+.5000000000*e[3]*ep2[32]+.5000000000*e[3]*ep2[31]+.5000000000*e[3]*ep2[27]-.5000000000*e[3]*ep2[28]-.5000000000*e[3]*ep2[29]+.5000000000*e[3]*ep2[33]-.5000000000*e[3]*ep2[34]-.5000000000*e[3]*ep2[35];
A[195]=.5000000000*ep2[14]*e[8]+1.500000000*ep2[17]*e[8]+.5000000000*e[8]*ep2[15]+.5000000000*e[8]*ep2[16]-.5000000000*e[8]*ep2[9]+.5000000000*e[8]*ep2[11]-.5000000000*e[8]*ep2[12]-.5000000000*e[8]*ep2[10]-.5000000000*e[8]*ep2[13]+e[14]*e[12]*e[6]+e[14]*e[3]*e[15]+e[14]*e[13]*e[7]+e[14]*e[4]*e[16]+e[14]*e[5]*e[17]+e[17]*e[15]*e[6]+e[17]*e[16]*e[7]+e[2]*e[11]*e[17]+e[2]*e[9]*e[15]+e[2]*e[10]*e[16]+e[5]*e[12]*e[15]+e[5]*e[13]*e[16]+e[11]*e[9]*e[6]+e[11]*e[0]*e[15]+e[11]*e[10]*e[7]+e[11]*e[1]*e[16]-1.*e[17]*e[10]*e[1]-1.*e[17]*e[13]*e[4]-1.*e[17]*e[9]*e[0]-1.*e[17]*e[12]*e[3];
A[86]=-.5000000000*e[3]*ep2[1]-.5000000000*e[3]*ep2[7]+.5000000000*ep3[3]-.5000000000*e[3]*ep2[8]+e[0]*e[2]*e[5]+.5000000000*e[3]*ep2[6]+.5000000000*e[3]*ep2[4]-.5000000000*e[3]*ep2[2]+e[0]*e[1]*e[4]+e[6]*e[4]*e[7]+.5000000000*ep2[0]*e[3]+.5000000000*e[3]*ep2[5]+e[6]*e[5]*e[8];
A[196]=.5000000000*ep2[23]*e[17]+1.500000000*ep2[26]*e[17]+.5000000000*e[17]*ep2[25]+.5000000000*e[17]*ep2[24]-.5000000000*e[17]*ep2[18]-.5000000000*e[17]*ep2[19]+.5000000000*e[17]*ep2[20]-.5000000000*e[17]*ep2[22]-.5000000000*e[17]*ep2[21]+e[23]*e[21]*e[15]+e[23]*e[12]*e[24]+e[23]*e[14]*e[26]+e[23]*e[22]*e[16]+e[23]*e[13]*e[25]+e[26]*e[24]*e[15]+e[26]*e[25]*e[16]+e[11]*e[19]*e[25]+e[11]*e[18]*e[24]+e[11]*e[20]*e[26]+e[14]*e[22]*e[25]+e[14]*e[21]*e[24]+e[20]*e[18]*e[15]+e[20]*e[9]*e[24]+e[20]*e[19]*e[16]+e[20]*e[10]*e[25]-1.*e[26]*e[18]*e[9]-1.*e[26]*e[21]*e[12]-1.*e[26]*e[19]*e[10]-1.*e[26]*e[22]*e[13];
A[87]=-1.*e[12]*e[34]*e[16]-1.*e[12]*e[35]*e[17]-1.*e[12]*e[29]*e[11]+e[9]*e[27]*e[12]+e[9]*e[29]*e[14]+e[9]*e[11]*e[32]+e[9]*e[28]*e[13]+e[9]*e[10]*e[31]+e[27]*e[11]*e[14]+e[27]*e[10]*e[13]+e[12]*e[32]*e[14]+e[12]*e[31]*e[13]+e[15]*e[12]*e[33]+e[15]*e[32]*e[17]+e[15]*e[14]*e[35]+e[15]*e[31]*e[16]+e[15]*e[13]*e[34]+e[33]*e[14]*e[17]+e[33]*e[13]*e[16]-1.*e[12]*e[28]*e[10]+.5000000000*ep2[9]*e[30]-.5000000000*e[30]*ep2[16]-.5000000000*e[30]*ep2[11]+1.500000000*e[30]*ep2[12]+.5000000000*e[30]*ep2[15]-.5000000000*e[30]*ep2[17]-.5000000000*e[30]*ep2[10]+.5000000000*e[30]*ep2[14]+.5000000000*e[30]*ep2[13];
A[197]=e[32]*e[22]*e[16]+e[32]*e[13]*e[25]-1.*e[17]*e[27]*e[18]+e[17]*e[33]*e[24]-1.*e[17]*e[30]*e[21]+e[17]*e[29]*e[20]+3.*e[17]*e[35]*e[26]-1.*e[17]*e[31]*e[22]-1.*e[17]*e[28]*e[19]+e[17]*e[34]*e[25]+e[20]*e[27]*e[15]+e[20]*e[9]*e[33]+e[20]*e[28]*e[16]+e[20]*e[10]*e[34]+e[29]*e[18]*e[15]+e[29]*e[9]*e[24]+e[29]*e[19]*e[16]+e[29]*e[10]*e[25]-1.*e[26]*e[27]*e[9]-1.*e[26]*e[30]*e[12]-1.*e[26]*e[28]*e[10]-1.*e[26]*e[31]*e[13]+e[26]*e[33]*e[15]+e[26]*e[34]*e[16]+e[35]*e[24]*e[15]+e[35]*e[25]*e[16]-1.*e[35]*e[18]*e[9]-1.*e[35]*e[21]*e[12]-1.*e[35]*e[19]*e[10]-1.*e[35]*e[22]*e[13]+e[14]*e[30]*e[24]+e[14]*e[21]*e[33]+e[14]*e[31]*e[25]+e[14]*e[22]*e[34]+e[14]*e[32]*e[26]+e[14]*e[23]*e[35]+e[11]*e[27]*e[24]+e[11]*e[18]*e[33]+e[11]*e[28]*e[25]+e[11]*e[19]*e[34]+e[11]*e[29]*e[26]+e[11]*e[20]*e[35]+e[23]*e[30]*e[15]+e[23]*e[12]*e[33]+e[23]*e[32]*e[17]+e[23]*e[31]*e[16]+e[23]*e[13]*e[34]+e[32]*e[21]*e[15]+e[32]*e[12]*e[24];
A[84]=e[6]*e[23]*e[17]+e[6]*e[14]*e[26]+e[6]*e[22]*e[16]+e[6]*e[13]*e[25]+e[0]*e[20]*e[14]+e[0]*e[11]*e[23]+e[0]*e[19]*e[13]+e[0]*e[10]*e[22]-1.*e[12]*e[26]*e[8]-1.*e[12]*e[20]*e[2]-1.*e[12]*e[19]*e[1]+e[12]*e[22]*e[4]-1.*e[12]*e[25]*e[7]+e[12]*e[23]*e[5]-1.*e[21]*e[11]*e[2]-1.*e[21]*e[10]*e[1]+e[21]*e[13]*e[4]-1.*e[21]*e[16]*e[7]+e[21]*e[14]*e[5]-1.*e[21]*e[17]*e[8]+e[15]*e[3]*e[24]+e[15]*e[22]*e[7]+e[15]*e[4]*e[25]+e[15]*e[23]*e[8]+e[15]*e[5]*e[26]-1.*e[3]*e[25]*e[16]-1.*e[3]*e[26]*e[17]-1.*e[3]*e[20]*e[11]-1.*e[3]*e[19]*e[10]+e[24]*e[13]*e[7]+e[24]*e[4]*e[16]+e[24]*e[14]*e[8]+e[24]*e[5]*e[17]+e[9]*e[18]*e[3]+e[9]*e[0]*e[21]+e[9]*e[19]*e[4]+e[9]*e[1]*e[22]+e[9]*e[20]*e[5]+e[9]*e[2]*e[23]+e[18]*e[0]*e[12]+e[18]*e[10]*e[4]+e[18]*e[1]*e[13]+e[18]*e[11]*e[5]+e[18]*e[2]*e[14]+3.*e[3]*e[21]*e[12]+e[3]*e[23]*e[14]+e[3]*e[22]*e[13]+e[6]*e[21]*e[15]+e[6]*e[12]*e[24];
A[198]=.5000000000*ep2[5]*e[17]+1.500000000*e[17]*ep2[8]+.5000000000*e[17]*ep2[7]+.5000000000*e[17]*ep2[6]+.5000000000*ep2[2]*e[17]-.5000000000*e[17]*ep2[4]-.5000000000*e[17]*ep2[0]-.5000000000*e[17]*ep2[1]-.5000000000*e[17]*ep2[3]+e[11]*e[1]*e[7]+e[11]*e[0]*e[6]+e[11]*e[2]*e[8]+e[5]*e[12]*e[6]+e[5]*e[3]*e[15]+e[5]*e[13]*e[7]+e[5]*e[4]*e[16]+e[5]*e[14]*e[8]+e[14]*e[4]*e[7]+e[14]*e[3]*e[6]+e[8]*e[15]*e[6]+e[8]*e[16]*e[7]-1.*e[8]*e[10]*e[1]-1.*e[8]*e[13]*e[4]-1.*e[8]*e[9]*e[0]-1.*e[8]*e[12]*e[3]+e[2]*e[9]*e[6]+e[2]*e[0]*e[15]+e[2]*e[10]*e[7]+e[2]*e[1]*e[16];
A[85]=e[6]*e[4]*e[34]+e[6]*e[32]*e[8]+e[6]*e[5]*e[35]+e[33]*e[4]*e[7]+e[33]*e[5]*e[8]+e[0]*e[27]*e[3]+e[0]*e[28]*e[4]+e[0]*e[1]*e[31]+e[0]*e[29]*e[5]+e[0]*e[2]*e[32]-1.*e[3]*e[34]*e[7]+e[3]*e[32]*e[5]+e[3]*e[33]*e[6]-1.*e[3]*e[35]*e[8]-1.*e[3]*e[29]*e[2]-1.*e[3]*e[28]*e[1]+e[3]*e[31]*e[4]+e[27]*e[1]*e[4]+e[27]*e[2]*e[5]+e[6]*e[31]*e[7]+.5000000000*e[30]*ep2[4]+.5000000000*e[30]*ep2[6]+.5000000000*e[30]*ep2[5]-.5000000000*e[30]*ep2[1]-.5000000000*e[30]*ep2[7]-.5000000000*e[30]*ep2[2]-.5000000000*e[30]*ep2[8]+.5000000000*ep2[0]*e[30]+1.500000000*e[30]*ep2[3];
A[199]=.5000000000*ep2[23]*e[8]+1.500000000*ep2[26]*e[8]-.5000000000*e[8]*ep2[18]-.5000000000*e[8]*ep2[19]-.5000000000*e[8]*ep2[22]+.5000000000*e[8]*ep2[24]-.5000000000*e[8]*ep2[21]+.5000000000*e[8]*ep2[25]+.5000000000*ep2[20]*e[8]+e[20]*e[18]*e[6]+e[20]*e[0]*e[24]+e[20]*e[19]*e[7]+e[20]*e[1]*e[25]+e[20]*e[2]*e[26]+e[23]*e[21]*e[6]+e[23]*e[3]*e[24]+e[23]*e[22]*e[7]+e[23]*e[4]*e[25]+e[23]*e[5]*e[26]-1.*e[26]*e[21]*e[3]-1.*e[26]*e[19]*e[1]-1.*e[26]*e[22]*e[4]-1.*e[26]*e[18]*e[0]+e[26]*e[25]*e[7]+e[26]*e[24]*e[6]+e[2]*e[19]*e[25]+e[2]*e[18]*e[24]+e[5]*e[22]*e[25]+e[5]*e[21]*e[24];
A[109]=e[19]*e[27]*e[21]+e[19]*e[18]*e[30]+e[19]*e[28]*e[22]+e[19]*e[29]*e[23]+e[19]*e[20]*e[32]+e[28]*e[18]*e[21]+e[28]*e[20]*e[23]+e[22]*e[30]*e[21]+e[22]*e[32]*e[23]+e[25]*e[30]*e[24]+e[25]*e[21]*e[33]+e[25]*e[22]*e[34]+e[25]*e[32]*e[26]+e[25]*e[23]*e[35]+e[34]*e[21]*e[24]+e[34]*e[23]*e[26]-1.*e[22]*e[27]*e[18]-1.*e[22]*e[33]*e[24]-1.*e[22]*e[29]*e[20]-1.*e[22]*e[35]*e[26]+.5000000000*ep2[19]*e[31]+1.500000000*e[31]*ep2[22]+.5000000000*e[31]*ep2[21]+.5000000000*e[31]*ep2[23]+.5000000000*e[31]*ep2[25]-.5000000000*e[31]*ep2[26]-.5000000000*e[31]*ep2[18]-.5000000000*e[31]*ep2[20]-.5000000000*e[31]*ep2[24];
A[108]=-.5000000000*e[13]*ep2[15]+.5000000000*e[13]*ep2[16]+.5000000000*e[13]*ep2[12]+e[16]*e[12]*e[15]+.5000000000*ep3[13]+e[10]*e[11]*e[14]+.5000000000*e[13]*ep2[14]-.5000000000*e[13]*ep2[17]-.5000000000*e[13]*ep2[11]-.5000000000*e[13]*ep2[9]+.5000000000*ep2[10]*e[13]+e[10]*e[9]*e[12]+e[16]*e[14]*e[17];
A[111]=-1.*e[13]*e[29]*e[2]-1.*e[31]*e[11]*e[2]-1.*e[31]*e[15]*e[6]-1.*e[31]*e[9]*e[0]+e[31]*e[14]*e[5]+e[31]*e[12]*e[3]-1.*e[31]*e[17]*e[8]+e[16]*e[30]*e[6]+e[16]*e[3]*e[33]+e[16]*e[4]*e[34]+e[16]*e[32]*e[8]+e[16]*e[5]*e[35]-1.*e[4]*e[27]*e[9]+e[4]*e[28]*e[10]-1.*e[4]*e[33]*e[15]-1.*e[4]*e[35]*e[17]-1.*e[4]*e[29]*e[11]+e[34]*e[12]*e[6]+e[34]*e[3]*e[15]+e[34]*e[14]*e[8]+e[34]*e[5]*e[17]+e[10]*e[27]*e[3]+e[10]*e[0]*e[30]+e[10]*e[29]*e[5]+e[10]*e[2]*e[32]+e[28]*e[9]*e[3]+e[28]*e[0]*e[12]+e[28]*e[11]*e[5]+e[28]*e[2]*e[14]+e[4]*e[30]*e[12]+e[4]*e[32]*e[14]+3.*e[4]*e[31]*e[13]+e[7]*e[30]*e[15]+e[7]*e[12]*e[33]+e[7]*e[32]*e[17]+e[7]*e[14]*e[35]+e[7]*e[31]*e[16]+e[7]*e[13]*e[34]+e[1]*e[27]*e[12]+e[1]*e[9]*e[30]+e[1]*e[29]*e[14]+e[1]*e[11]*e[32]+e[1]*e[28]*e[13]+e[1]*e[10]*e[31]-1.*e[13]*e[27]*e[0]+e[13]*e[32]*e[5]-1.*e[13]*e[33]*e[6]+e[13]*e[30]*e[3]-1.*e[13]*e[35]*e[8];
A[110]=e[25]*e[23]*e[26]+e[19]*e[20]*e[23]+e[19]*e[18]*e[21]+e[25]*e[21]*e[24]+.5000000000*ep3[22]+.5000000000*e[22]*ep2[23]+.5000000000*ep2[19]*e[22]-.5000000000*e[22]*ep2[18]-.5000000000*e[22]*ep2[24]+.5000000000*e[22]*ep2[21]+.5000000000*e[22]*ep2[25]-.5000000000*e[22]*ep2[20]-.5000000000*e[22]*ep2[26];
A[105]=e[34]*e[5]*e[8]+e[1]*e[27]*e[3]+e[1]*e[0]*e[30]+e[1]*e[28]*e[4]+e[1]*e[29]*e[5]+e[1]*e[2]*e[32]-1.*e[4]*e[27]*e[0]+e[4]*e[34]*e[7]+e[4]*e[32]*e[5]-1.*e[4]*e[33]*e[6]+e[4]*e[30]*e[3]-1.*e[4]*e[35]*e[8]-1.*e[4]*e[29]*e[2]+e[28]*e[0]*e[3]+e[28]*e[2]*e[5]+e[7]*e[30]*e[6]+e[7]*e[3]*e[33]+e[7]*e[32]*e[8]+e[7]*e[5]*e[35]+e[34]*e[3]*e[6]+.5000000000*ep2[1]*e[31]+1.500000000*e[31]*ep2[4]-.5000000000*e[31]*ep2[0]-.5000000000*e[31]*ep2[6]+.5000000000*e[31]*ep2[5]+.5000000000*e[31]*ep2[7]+.5000000000*e[31]*ep2[3]-.5000000000*e[31]*ep2[2]-.5000000000*e[31]*ep2[8];
A[104]=e[1]*e[20]*e[14]+e[1]*e[11]*e[23]+e[13]*e[21]*e[3]-1.*e[13]*e[26]*e[8]-1.*e[13]*e[20]*e[2]-1.*e[13]*e[18]*e[0]+e[13]*e[23]*e[5]-1.*e[13]*e[24]*e[6]-1.*e[22]*e[11]*e[2]-1.*e[22]*e[15]*e[6]-1.*e[22]*e[9]*e[0]+e[22]*e[14]*e[5]+e[22]*e[12]*e[3]-1.*e[22]*e[17]*e[8]+e[16]*e[21]*e[6]+e[16]*e[3]*e[24]+e[16]*e[4]*e[25]+e[16]*e[23]*e[8]+e[16]*e[5]*e[26]-1.*e[4]*e[24]*e[15]-1.*e[4]*e[26]*e[17]-1.*e[4]*e[20]*e[11]-1.*e[4]*e[18]*e[9]+e[25]*e[12]*e[6]+e[25]*e[3]*e[15]+e[25]*e[14]*e[8]+e[25]*e[5]*e[17]+e[10]*e[18]*e[3]+e[10]*e[0]*e[21]+e[10]*e[19]*e[4]+e[10]*e[1]*e[22]+e[10]*e[20]*e[5]+e[10]*e[2]*e[23]+e[19]*e[9]*e[3]+e[19]*e[0]*e[12]+e[19]*e[1]*e[13]+e[19]*e[11]*e[5]+e[19]*e[2]*e[14]+e[4]*e[21]*e[12]+e[4]*e[23]*e[14]+3.*e[4]*e[22]*e[13]+e[7]*e[21]*e[15]+e[7]*e[12]*e[24]+e[7]*e[23]*e[17]+e[7]*e[14]*e[26]+e[7]*e[22]*e[16]+e[7]*e[13]*e[25]+e[1]*e[18]*e[12]+e[1]*e[9]*e[21];
A[107]=e[10]*e[27]*e[12]+e[10]*e[9]*e[30]+e[10]*e[29]*e[14]+e[10]*e[11]*e[32]+e[10]*e[28]*e[13]+e[28]*e[11]*e[14]+e[28]*e[9]*e[12]+e[13]*e[30]*e[12]+e[13]*e[32]*e[14]+e[16]*e[30]*e[15]+e[16]*e[12]*e[33]+e[16]*e[32]*e[17]+e[16]*e[14]*e[35]+e[16]*e[13]*e[34]+e[34]*e[14]*e[17]+e[34]*e[12]*e[15]-1.*e[13]*e[27]*e[9]-1.*e[13]*e[33]*e[15]-1.*e[13]*e[35]*e[17]-1.*e[13]*e[29]*e[11]+.5000000000*ep2[10]*e[31]+.5000000000*e[31]*ep2[16]-.5000000000*e[31]*ep2[9]-.5000000000*e[31]*ep2[11]+.5000000000*e[31]*ep2[12]-.5000000000*e[31]*ep2[15]-.5000000000*e[31]*ep2[17]+.5000000000*e[31]*ep2[14]+1.500000000*e[31]*ep2[13];
A[106]=-.5000000000*e[4]*ep2[6]-.5000000000*e[4]*ep2[0]+e[1]*e[2]*e[5]+.5000000000*e[4]*ep2[7]+e[1]*e[0]*e[3]+e[7]*e[5]*e[8]-.5000000000*e[4]*ep2[8]+.5000000000*e[4]*ep2[3]+.5000000000*e[4]*ep2[5]+e[7]*e[3]*e[6]-.5000000000*e[4]*ep2[2]+.5000000000*ep3[4]+.5000000000*ep2[1]*e[4];
A[100]=e[34]*e[32]*e[35]-.5000000000*e[31]*ep2[35]+.5000000000*e[31]*ep2[34]+.5000000000*ep2[28]*e[31]+.5000000000*ep3[31]+.5000000000*e[31]*ep2[32]+e[34]*e[30]*e[33]-.5000000000*e[31]*ep2[27]+.5000000000*e[31]*ep2[30]-.5000000000*e[31]*ep2[33]-.5000000000*e[31]*ep2[29]+e[28]*e[29]*e[32]+e[28]*e[27]*e[30];
A[101]=e[1]*e[27]*e[30]+e[1]*e[29]*e[32]+e[1]*e[28]*e[31]+e[31]*e[30]*e[3]+e[31]*e[32]*e[5]+e[7]*e[30]*e[33]+e[7]*e[32]*e[35]+e[7]*e[31]*e[34]+e[28]*e[27]*e[3]+e[28]*e[0]*e[30]+e[28]*e[29]*e[5]+e[28]*e[2]*e[32]+e[34]*e[30]*e[6]+e[34]*e[3]*e[33]+e[34]*e[32]*e[8]+e[34]*e[5]*e[35]-1.*e[31]*e[27]*e[0]-1.*e[31]*e[33]*e[6]-1.*e[31]*e[35]*e[8]-1.*e[31]*e[29]*e[2]+.5000000000*e[4]*ep2[30]+.5000000000*e[4]*ep2[32]+1.500000000*e[4]*ep2[31]-.5000000000*e[4]*ep2[27]+.5000000000*e[4]*ep2[28]-.5000000000*e[4]*ep2[29]-.5000000000*e[4]*ep2[33]+.5000000000*e[4]*ep2[34]-.5000000000*e[4]*ep2[35];
A[102]=.5000000000*e[22]*ep2[30]+.5000000000*e[22]*ep2[32]+1.500000000*e[22]*ep2[31]+.5000000000*e[22]*ep2[34]-.5000000000*e[22]*ep2[27]-.5000000000*e[22]*ep2[29]-.5000000000*e[22]*ep2[33]-.5000000000*e[22]*ep2[35]+e[28]*e[18]*e[30]+e[28]*e[29]*e[23]+e[28]*e[20]*e[32]+e[31]*e[30]*e[21]+e[31]*e[32]*e[23]+e[25]*e[30]*e[33]+e[25]*e[32]*e[35]+e[25]*e[31]*e[34]+e[34]*e[30]*e[24]+e[34]*e[21]*e[33]+e[34]*e[32]*e[26]+e[34]*e[23]*e[35]-1.*e[31]*e[27]*e[18]-1.*e[31]*e[33]*e[24]-1.*e[31]*e[29]*e[20]-1.*e[31]*e[35]*e[26]+e[19]*e[27]*e[30]+e[19]*e[29]*e[32]+e[19]*e[28]*e[31]+e[28]*e[27]*e[21]+.5000000000*ep2[28]*e[22];
A[103]=e[16]*e[30]*e[33]+e[16]*e[32]*e[35]+e[10]*e[27]*e[30]+e[10]*e[29]*e[32]+e[10]*e[28]*e[31]+e[34]*e[30]*e[15]+e[34]*e[12]*e[33]+e[34]*e[32]*e[17]+e[34]*e[14]*e[35]+e[34]*e[31]*e[16]+e[28]*e[27]*e[12]+e[28]*e[9]*e[30]+e[28]*e[29]*e[14]+e[28]*e[11]*e[32]-1.*e[31]*e[27]*e[9]+e[31]*e[30]*e[12]+e[31]*e[32]*e[14]-1.*e[31]*e[33]*e[15]-1.*e[31]*e[35]*e[17]-1.*e[31]*e[29]*e[11]-.5000000000*e[13]*ep2[27]+.5000000000*e[13]*ep2[32]+.5000000000*e[13]*ep2[28]-.5000000000*e[13]*ep2[29]+1.500000000*e[13]*ep2[31]-.5000000000*e[13]*ep2[33]+.5000000000*e[13]*ep2[30]+.5000000000*e[13]*ep2[34]-.5000000000*e[13]*ep2[35];
A[96]=e[21]*e[23]*e[14]+e[21]*e[22]*e[13]+e[24]*e[21]*e[15]+e[24]*e[23]*e[17]+e[24]*e[14]*e[26]+e[24]*e[22]*e[16]+e[24]*e[13]*e[25]+e[15]*e[22]*e[25]+e[15]*e[23]*e[26]+e[9]*e[19]*e[22]+e[9]*e[18]*e[21]+e[9]*e[20]*e[23]+e[18]*e[20]*e[14]+e[18]*e[11]*e[23]+e[18]*e[19]*e[13]+e[18]*e[10]*e[22]-1.*e[21]*e[25]*e[16]-1.*e[21]*e[26]*e[17]-1.*e[21]*e[20]*e[11]-1.*e[21]*e[19]*e[10]+1.500000000*ep2[21]*e[12]+.5000000000*e[12]*ep2[24]-.5000000000*e[12]*ep2[26]+.5000000000*e[12]*ep2[18]+.5000000000*e[12]*ep2[23]-.5000000000*e[12]*ep2[19]-.5000000000*e[12]*ep2[20]+.5000000000*e[12]*ep2[22]-.5000000000*e[12]*ep2[25];
A[97]=-1.*e[12]*e[29]*e[20]-1.*e[12]*e[35]*e[26]-1.*e[12]*e[28]*e[19]-1.*e[12]*e[34]*e[25]+e[18]*e[29]*e[14]+e[18]*e[11]*e[32]+e[18]*e[28]*e[13]+e[18]*e[10]*e[31]+e[27]*e[20]*e[14]+e[27]*e[11]*e[23]+e[27]*e[19]*e[13]+e[27]*e[10]*e[22]+e[15]*e[30]*e[24]+e[15]*e[21]*e[33]+e[15]*e[31]*e[25]+e[15]*e[22]*e[34]+e[15]*e[32]*e[26]+e[15]*e[23]*e[35]-1.*e[21]*e[28]*e[10]-1.*e[21]*e[34]*e[16]-1.*e[21]*e[35]*e[17]-1.*e[21]*e[29]*e[11]-1.*e[30]*e[25]*e[16]-1.*e[30]*e[26]*e[17]-1.*e[30]*e[20]*e[11]-1.*e[30]*e[19]*e[10]+e[24]*e[32]*e[17]+e[24]*e[14]*e[35]+e[24]*e[31]*e[16]+e[24]*e[13]*e[34]+e[33]*e[23]*e[17]+e[33]*e[14]*e[26]+e[33]*e[22]*e[16]+e[33]*e[13]*e[25]+3.*e[12]*e[30]*e[21]+e[12]*e[31]*e[22]+e[12]*e[32]*e[23]+e[9]*e[27]*e[21]+e[9]*e[18]*e[30]+e[9]*e[28]*e[22]+e[9]*e[19]*e[31]+e[9]*e[29]*e[23]+e[9]*e[20]*e[32]+e[21]*e[32]*e[14]+e[21]*e[31]*e[13]+e[30]*e[23]*e[14]+e[30]*e[22]*e[13]+e[12]*e[27]*e[18]+e[12]*e[33]*e[24];
A[98]=e[0]*e[11]*e[5]+e[0]*e[2]*e[14]+e[9]*e[1]*e[4]+e[9]*e[0]*e[3]+e[9]*e[2]*e[5]+e[3]*e[13]*e[4]+e[3]*e[14]*e[5]+e[6]*e[3]*e[15]+e[6]*e[13]*e[7]+e[6]*e[4]*e[16]+e[6]*e[14]*e[8]+e[6]*e[5]*e[17]+e[15]*e[4]*e[7]+e[15]*e[5]*e[8]-1.*e[3]*e[11]*e[2]-1.*e[3]*e[10]*e[1]-1.*e[3]*e[16]*e[7]-1.*e[3]*e[17]*e[8]+e[0]*e[10]*e[4]+e[0]*e[1]*e[13]+1.500000000*e[12]*ep2[3]+.5000000000*e[12]*ep2[4]+.5000000000*e[12]*ep2[5]+.5000000000*e[12]*ep2[6]+.5000000000*ep2[0]*e[12]-.5000000000*e[12]*ep2[1]-.5000000000*e[12]*ep2[7]-.5000000000*e[12]*ep2[2]-.5000000000*e[12]*ep2[8];
A[99]=e[21]*e[24]*e[6]+e[0]*e[19]*e[22]+e[0]*e[20]*e[23]+e[24]*e[22]*e[7]+e[24]*e[4]*e[25]+e[24]*e[23]*e[8]+e[24]*e[5]*e[26]+e[6]*e[22]*e[25]+e[6]*e[23]*e[26]+e[18]*e[0]*e[21]+e[18]*e[19]*e[4]+e[18]*e[1]*e[22]+e[18]*e[20]*e[5]+e[18]*e[2]*e[23]+e[21]*e[22]*e[4]+e[21]*e[23]*e[5]-1.*e[21]*e[26]*e[8]-1.*e[21]*e[20]*e[2]-1.*e[21]*e[19]*e[1]-1.*e[21]*e[25]*e[7]+1.500000000*ep2[21]*e[3]+.5000000000*e[3]*ep2[22]+.5000000000*e[3]*ep2[23]+.5000000000*e[3]*ep2[24]-.5000000000*e[3]*ep2[26]-.5000000000*e[3]*ep2[19]-.5000000000*e[3]*ep2[20]-.5000000000*e[3]*ep2[25]+.5000000000*ep2[18]*e[3];
A[127]=e[11]*e[27]*e[12]+e[11]*e[9]*e[30]+e[11]*e[29]*e[14]+e[11]*e[28]*e[13]+e[11]*e[10]*e[31]+e[29]*e[9]*e[12]+e[29]*e[10]*e[13]+e[14]*e[30]*e[12]+e[14]*e[31]*e[13]+e[17]*e[30]*e[15]+e[17]*e[12]*e[33]+e[17]*e[14]*e[35]+e[17]*e[31]*e[16]+e[17]*e[13]*e[34]+e[35]*e[12]*e[15]+e[35]*e[13]*e[16]-1.*e[14]*e[27]*e[9]-1.*e[14]*e[28]*e[10]-1.*e[14]*e[33]*e[15]-1.*e[14]*e[34]*e[16]+.5000000000*ep2[11]*e[32]-.5000000000*e[32]*ep2[16]-.5000000000*e[32]*ep2[9]+.5000000000*e[32]*ep2[12]-.5000000000*e[32]*ep2[15]+.5000000000*e[32]*ep2[17]-.5000000000*e[32]*ep2[10]+1.500000000*e[32]*ep2[14]+.5000000000*e[32]*ep2[13];
A[126]=e[8]*e[3]*e[6]+.5000000000*ep2[2]*e[5]-.5000000000*e[5]*ep2[0]+.5000000000*e[5]*ep2[4]-.5000000000*e[5]*ep2[6]+.5000000000*e[5]*ep2[8]+e[8]*e[4]*e[7]+.5000000000*ep3[5]+e[2]*e[0]*e[3]+.5000000000*e[5]*ep2[3]-.5000000000*e[5]*ep2[7]+e[2]*e[1]*e[4]-.5000000000*e[5]*ep2[1];
A[125]=e[2]*e[27]*e[3]+e[2]*e[0]*e[30]+e[2]*e[28]*e[4]+e[2]*e[1]*e[31]+e[2]*e[29]*e[5]-1.*e[5]*e[27]*e[0]-1.*e[5]*e[34]*e[7]-1.*e[5]*e[33]*e[6]+e[5]*e[30]*e[3]+e[5]*e[35]*e[8]-1.*e[5]*e[28]*e[1]+e[5]*e[31]*e[4]+e[29]*e[1]*e[4]+e[29]*e[0]*e[3]+e[8]*e[30]*e[6]+e[8]*e[3]*e[33]+e[8]*e[31]*e[7]+e[8]*e[4]*e[34]+e[35]*e[4]*e[7]+e[35]*e[3]*e[6]+.5000000000*ep2[2]*e[32]+1.500000000*e[32]*ep2[5]+.5000000000*e[32]*ep2[4]-.5000000000*e[32]*ep2[0]-.5000000000*e[32]*ep2[6]-.5000000000*e[32]*ep2[1]-.5000000000*e[32]*ep2[7]+.5000000000*e[32]*ep2[3]+.5000000000*e[32]*ep2[8];
A[124]=-1.*e[14]*e[19]*e[1]+e[14]*e[22]*e[4]-1.*e[14]*e[18]*e[0]-1.*e[14]*e[25]*e[7]-1.*e[14]*e[24]*e[6]-1.*e[23]*e[10]*e[1]+e[23]*e[13]*e[4]-1.*e[23]*e[16]*e[7]-1.*e[23]*e[15]*e[6]-1.*e[23]*e[9]*e[0]+e[23]*e[12]*e[3]+e[17]*e[21]*e[6]+e[17]*e[3]*e[24]+e[17]*e[22]*e[7]+e[17]*e[4]*e[25]+e[17]*e[5]*e[26]-1.*e[5]*e[24]*e[15]-1.*e[5]*e[25]*e[16]-1.*e[5]*e[18]*e[9]-1.*e[5]*e[19]*e[10]+e[26]*e[12]*e[6]+e[26]*e[3]*e[15]+e[26]*e[13]*e[7]+e[26]*e[4]*e[16]+e[11]*e[18]*e[3]+e[11]*e[0]*e[21]+e[11]*e[19]*e[4]+e[11]*e[1]*e[22]+e[11]*e[20]*e[5]+e[11]*e[2]*e[23]+e[20]*e[9]*e[3]+e[20]*e[0]*e[12]+e[20]*e[10]*e[4]+e[20]*e[1]*e[13]+e[20]*e[2]*e[14]+e[5]*e[21]*e[12]+3.*e[5]*e[23]*e[14]+e[5]*e[22]*e[13]+e[8]*e[21]*e[15]+e[8]*e[12]*e[24]+e[8]*e[23]*e[17]+e[8]*e[14]*e[26]+e[8]*e[22]*e[16]+e[8]*e[13]*e[25]+e[2]*e[18]*e[12]+e[2]*e[9]*e[21]+e[2]*e[19]*e[13]+e[2]*e[10]*e[22]+e[14]*e[21]*e[3];
A[123]=-.5000000000*e[14]*ep2[27]+1.500000000*e[14]*ep2[32]-.5000000000*e[14]*ep2[28]+.5000000000*e[14]*ep2[29]+.5000000000*e[14]*ep2[31]-.5000000000*e[14]*ep2[33]+.5000000000*e[14]*ep2[30]-.5000000000*e[14]*ep2[34]+.5000000000*e[14]*ep2[35]+e[11]*e[27]*e[30]+e[11]*e[29]*e[32]+e[11]*e[28]*e[31]+e[35]*e[30]*e[15]+e[35]*e[12]*e[33]+e[35]*e[32]*e[17]+e[35]*e[31]*e[16]+e[35]*e[13]*e[34]+e[29]*e[27]*e[12]+e[29]*e[9]*e[30]+e[29]*e[28]*e[13]+e[29]*e[10]*e[31]-1.*e[32]*e[27]*e[9]+e[32]*e[30]*e[12]-1.*e[32]*e[28]*e[10]+e[32]*e[31]*e[13]-1.*e[32]*e[33]*e[15]-1.*e[32]*e[34]*e[16]+e[17]*e[30]*e[33]+e[17]*e[31]*e[34];
A[122]=-.5000000000*e[23]*ep2[33]-.5000000000*e[23]*ep2[34]+.5000000000*ep2[29]*e[23]+.5000000000*e[23]*ep2[30]+1.500000000*e[23]*ep2[32]+.5000000000*e[23]*ep2[31]+.5000000000*e[23]*ep2[35]-.5000000000*e[23]*ep2[27]-.5000000000*e[23]*ep2[28]+e[32]*e[30]*e[21]+e[32]*e[31]*e[22]+e[26]*e[30]*e[33]+e[26]*e[32]*e[35]+e[26]*e[31]*e[34]+e[35]*e[30]*e[24]+e[35]*e[21]*e[33]+e[35]*e[31]*e[25]+e[35]*e[22]*e[34]-1.*e[32]*e[27]*e[18]-1.*e[32]*e[33]*e[24]-1.*e[32]*e[28]*e[19]-1.*e[32]*e[34]*e[25]+e[20]*e[27]*e[30]+e[20]*e[29]*e[32]+e[20]*e[28]*e[31]+e[29]*e[27]*e[21]+e[29]*e[18]*e[30]+e[29]*e[28]*e[22]+e[29]*e[19]*e[31];
A[121]=e[2]*e[27]*e[30]+e[2]*e[29]*e[32]+e[2]*e[28]*e[31]+e[32]*e[30]*e[3]+e[32]*e[31]*e[4]+e[8]*e[30]*e[33]+e[8]*e[32]*e[35]+e[8]*e[31]*e[34]+e[29]*e[27]*e[3]+e[29]*e[0]*e[30]+e[29]*e[28]*e[4]+e[29]*e[1]*e[31]+e[35]*e[30]*e[6]+e[35]*e[3]*e[33]+e[35]*e[31]*e[7]+e[35]*e[4]*e[34]-1.*e[32]*e[27]*e[0]-1.*e[32]*e[34]*e[7]-1.*e[32]*e[33]*e[6]-1.*e[32]*e[28]*e[1]+.5000000000*e[5]*ep2[30]+1.500000000*e[5]*ep2[32]+.5000000000*e[5]*ep2[31]-.5000000000*e[5]*ep2[27]-.5000000000*e[5]*ep2[28]+.5000000000*e[5]*ep2[29]-.5000000000*e[5]*ep2[33]-.5000000000*e[5]*ep2[34]+.5000000000*e[5]*ep2[35];
A[120]=.5000000000*e[32]*ep2[31]+.5000000000*e[32]*ep2[35]-.5000000000*e[32]*ep2[27]+e[29]*e[27]*e[30]+e[29]*e[28]*e[31]+e[35]*e[30]*e[33]+e[35]*e[31]*e[34]+.5000000000*ep2[29]*e[32]+.5000000000*ep3[32]-.5000000000*e[32]*ep2[33]-.5000000000*e[32]*ep2[34]+.5000000000*e[32]*ep2[30]-.5000000000*e[32]*ep2[28];
A[118]=e[10]*e[1]*e[4]+e[10]*e[0]*e[3]+e[10]*e[2]*e[5]+e[4]*e[12]*e[3]+e[4]*e[14]*e[5]+e[7]*e[12]*e[6]+e[7]*e[3]*e[15]+e[7]*e[4]*e[16]+e[7]*e[14]*e[8]+e[7]*e[5]*e[17]+e[16]*e[3]*e[6]+e[16]*e[5]*e[8]-1.*e[4]*e[11]*e[2]-1.*e[4]*e[15]*e[6]-1.*e[4]*e[9]*e[0]-1.*e[4]*e[17]*e[8]+e[1]*e[9]*e[3]+e[1]*e[0]*e[12]+e[1]*e[11]*e[5]+e[1]*e[2]*e[14]+1.500000000*e[13]*ep2[4]+.5000000000*e[13]*ep2[3]+.5000000000*e[13]*ep2[5]+.5000000000*e[13]*ep2[7]+.5000000000*ep2[1]*e[13]-.5000000000*e[13]*ep2[0]-.5000000000*e[13]*ep2[6]-.5000000000*e[13]*ep2[2]-.5000000000*e[13]*ep2[8];
A[119]=e[25]*e[21]*e[6]+e[25]*e[3]*e[24]+e[25]*e[23]*e[8]+e[25]*e[5]*e[26]+e[7]*e[21]*e[24]+e[7]*e[23]*e[26]+e[19]*e[18]*e[3]+e[19]*e[0]*e[21]+e[19]*e[1]*e[22]+e[19]*e[20]*e[5]+e[19]*e[2]*e[23]+e[22]*e[21]*e[3]+e[22]*e[23]*e[5]-1.*e[22]*e[26]*e[8]-1.*e[22]*e[20]*e[2]-1.*e[22]*e[18]*e[0]+e[22]*e[25]*e[7]-1.*e[22]*e[24]*e[6]+e[1]*e[18]*e[21]+e[1]*e[20]*e[23]+.5000000000*e[4]*ep2[25]-.5000000000*e[4]*ep2[26]-.5000000000*e[4]*ep2[18]-.5000000000*e[4]*ep2[20]-.5000000000*e[4]*ep2[24]+.5000000000*ep2[19]*e[4]+1.500000000*ep2[22]*e[4]+.5000000000*e[4]*ep2[21]+.5000000000*e[4]*ep2[23];
A[116]=e[22]*e[21]*e[12]+e[22]*e[23]*e[14]+e[25]*e[21]*e[15]+e[25]*e[12]*e[24]+e[25]*e[23]*e[17]+e[25]*e[14]*e[26]+e[25]*e[22]*e[16]+e[16]*e[21]*e[24]+e[16]*e[23]*e[26]+e[10]*e[19]*e[22]+e[10]*e[18]*e[21]+e[10]*e[20]*e[23]+e[19]*e[18]*e[12]+e[19]*e[9]*e[21]+e[19]*e[20]*e[14]+e[19]*e[11]*e[23]-1.*e[22]*e[24]*e[15]-1.*e[22]*e[26]*e[17]-1.*e[22]*e[20]*e[11]-1.*e[22]*e[18]*e[9]-.5000000000*e[13]*ep2[26]-.5000000000*e[13]*ep2[18]+.5000000000*e[13]*ep2[23]+.5000000000*e[13]*ep2[19]-.5000000000*e[13]*ep2[20]-.5000000000*e[13]*ep2[24]+.5000000000*e[13]*ep2[21]+1.500000000*ep2[22]*e[13]+.5000000000*e[13]*ep2[25];
A[117]=e[13]*e[30]*e[21]+3.*e[13]*e[31]*e[22]+e[13]*e[32]*e[23]+e[10]*e[27]*e[21]+e[10]*e[18]*e[30]+e[10]*e[28]*e[22]+e[10]*e[19]*e[31]+e[10]*e[29]*e[23]+e[10]*e[20]*e[32]+e[22]*e[30]*e[12]+e[22]*e[32]*e[14]+e[31]*e[21]*e[12]+e[31]*e[23]*e[14]-1.*e[13]*e[27]*e[18]-1.*e[13]*e[33]*e[24]-1.*e[13]*e[29]*e[20]-1.*e[13]*e[35]*e[26]+e[13]*e[28]*e[19]+e[13]*e[34]*e[25]+e[19]*e[27]*e[12]+e[19]*e[9]*e[30]+e[19]*e[29]*e[14]+e[19]*e[11]*e[32]+e[28]*e[18]*e[12]+e[28]*e[9]*e[21]+e[28]*e[20]*e[14]+e[28]*e[11]*e[23]+e[16]*e[30]*e[24]+e[16]*e[21]*e[33]+e[16]*e[31]*e[25]+e[16]*e[22]*e[34]+e[16]*e[32]*e[26]+e[16]*e[23]*e[35]-1.*e[22]*e[27]*e[9]-1.*e[22]*e[33]*e[15]-1.*e[22]*e[35]*e[17]-1.*e[22]*e[29]*e[11]-1.*e[31]*e[24]*e[15]-1.*e[31]*e[26]*e[17]-1.*e[31]*e[20]*e[11]-1.*e[31]*e[18]*e[9]+e[25]*e[30]*e[15]+e[25]*e[12]*e[33]+e[25]*e[32]*e[17]+e[25]*e[14]*e[35]+e[34]*e[21]*e[15]+e[34]*e[12]*e[24]+e[34]*e[23]*e[17]+e[34]*e[14]*e[26];
A[114]=e[19]*e[11]*e[14]+e[19]*e[9]*e[12]+e[19]*e[10]*e[13]+e[13]*e[21]*e[12]+e[13]*e[23]*e[14]+e[16]*e[21]*e[15]+e[16]*e[12]*e[24]+e[16]*e[23]*e[17]+e[16]*e[14]*e[26]+e[16]*e[13]*e[25]+e[25]*e[14]*e[17]+e[25]*e[12]*e[15]-1.*e[13]*e[24]*e[15]-1.*e[13]*e[26]*e[17]-1.*e[13]*e[20]*e[11]-1.*e[13]*e[18]*e[9]+e[10]*e[18]*e[12]+e[10]*e[9]*e[21]+e[10]*e[20]*e[14]+e[10]*e[11]*e[23]+1.500000000*e[22]*ep2[13]+.5000000000*e[22]*ep2[14]+.5000000000*e[22]*ep2[12]+.5000000000*e[22]*ep2[16]+.5000000000*ep2[10]*e[22]-.5000000000*e[22]*ep2[9]-.5000000000*e[22]*ep2[11]-.5000000000*e[22]*ep2[15]-.5000000000*e[22]*ep2[17];
A[115]=e[13]*e[12]*e[3]+e[13]*e[14]*e[5]+e[16]*e[12]*e[6]+e[16]*e[3]*e[15]+e[16]*e[13]*e[7]+e[16]*e[14]*e[8]+e[16]*e[5]*e[17]+e[7]*e[14]*e[17]+e[7]*e[12]*e[15]+e[1]*e[11]*e[14]+e[1]*e[9]*e[12]+e[1]*e[10]*e[13]+e[10]*e[9]*e[3]+e[10]*e[0]*e[12]+e[10]*e[11]*e[5]+e[10]*e[2]*e[14]-1.*e[13]*e[11]*e[2]-1.*e[13]*e[15]*e[6]-1.*e[13]*e[9]*e[0]-1.*e[13]*e[17]*e[8]+1.500000000*ep2[13]*e[4]+.5000000000*e[4]*ep2[16]-.5000000000*e[4]*ep2[9]-.5000000000*e[4]*ep2[11]+.5000000000*e[4]*ep2[12]-.5000000000*e[4]*ep2[15]-.5000000000*e[4]*ep2[17]+.5000000000*e[4]*ep2[10]+.5000000000*e[4]*ep2[14];
A[112]=e[19]*e[1]*e[4]+e[19]*e[0]*e[3]+e[19]*e[2]*e[5]+e[4]*e[21]*e[3]+e[4]*e[23]*e[5]+e[7]*e[21]*e[6]+e[7]*e[3]*e[24]+e[7]*e[4]*e[25]+e[7]*e[23]*e[8]+e[7]*e[5]*e[26]+e[25]*e[3]*e[6]+e[25]*e[5]*e[8]+e[1]*e[18]*e[3]+e[1]*e[0]*e[21]+e[1]*e[20]*e[5]+e[1]*e[2]*e[23]-1.*e[4]*e[26]*e[8]-1.*e[4]*e[20]*e[2]-1.*e[4]*e[18]*e[0]-1.*e[4]*e[24]*e[6]+1.500000000*e[22]*ep2[4]-.5000000000*e[22]*ep2[0]-.5000000000*e[22]*ep2[6]+.5000000000*e[22]*ep2[5]+.5000000000*e[22]*ep2[1]+.5000000000*e[22]*ep2[7]+.5000000000*e[22]*ep2[3]-.5000000000*e[22]*ep2[2]-.5000000000*e[22]*ep2[8];
A[113]=-1.*e[31]*e[20]*e[2]-1.*e[31]*e[18]*e[0]+e[31]*e[23]*e[5]-1.*e[31]*e[24]*e[6]+e[7]*e[30]*e[24]+e[7]*e[21]*e[33]+e[7]*e[32]*e[26]+e[7]*e[23]*e[35]+e[25]*e[30]*e[6]+e[25]*e[3]*e[33]+e[25]*e[31]*e[7]+e[25]*e[4]*e[34]+e[25]*e[32]*e[8]+e[25]*e[5]*e[35]+e[34]*e[21]*e[6]+e[34]*e[3]*e[24]+e[34]*e[22]*e[7]+e[34]*e[23]*e[8]+e[34]*e[5]*e[26]+e[1]*e[27]*e[21]+e[1]*e[18]*e[30]+e[1]*e[28]*e[22]+e[1]*e[19]*e[31]+e[1]*e[29]*e[23]+e[1]*e[20]*e[32]+e[19]*e[27]*e[3]+e[19]*e[0]*e[30]+e[19]*e[28]*e[4]+e[19]*e[29]*e[5]+e[19]*e[2]*e[32]+e[28]*e[18]*e[3]+e[28]*e[0]*e[21]+e[28]*e[20]*e[5]+e[28]*e[2]*e[23]+e[4]*e[30]*e[21]+3.*e[4]*e[31]*e[22]+e[4]*e[32]*e[23]-1.*e[4]*e[27]*e[18]-1.*e[4]*e[33]*e[24]-1.*e[4]*e[29]*e[20]-1.*e[4]*e[35]*e[26]-1.*e[22]*e[27]*e[0]+e[22]*e[32]*e[5]-1.*e[22]*e[33]*e[6]+e[22]*e[30]*e[3]-1.*e[22]*e[35]*e[8]-1.*e[22]*e[29]*e[2]+e[31]*e[21]*e[3]-1.*e[31]*e[26]*e[8];
int perm[20] = {6, 8, 18, 15, 12, 5, 14, 7, 4, 11, 19, 13, 1, 16, 17, 3, 10, 9, 2, 0};
double AA[200];
for (int i = 0; i < 20; i++)
{
for (int j = 0; j < 10; j++) AA[i + j * 20] = A[perm[i] + j * 20];
}
for (int i = 0; i < 200; i++)
{
A[i] = AA[i];
}
}
void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const
{
Mat X1 = _m1.getMat(), X2 = _m2.getMat(), model = _model.getMat();
const Point2d* x1ptr = X1.ptr<Point2d>();
const Point2d* x2ptr = X2.ptr<Point2d>();
int n = X1.checkVector(2);
Matx33d E(model.ptr<double>());
_err.create(n, 1, CV_32F);
Mat err = _err.getMat();
for (int i = 0; i < n; i++)
{
Vec3d x1(x1ptr[i].x, x1ptr[i].y, 1.);
Vec3d x2(x2ptr[i].x, x2ptr[i].y, 1.);
Vec3d Ex1 = E * x1;
Vec3d Etx2 = E.t() * x2;
double x2tEx1 = x2.dot(Ex1);
double a = Ex1[0] * Ex1[0];
double b = Ex1[1] * Ex1[1];
double c = Etx2[0] * Etx2[0];
double d = Etx2[1] * Etx2[1];
err.at<float>(i) = (float)(x2tEx1 * x2tEx1 / (a + b + c + d));
}
}
};
}
// Input should be a vector of n 2D points or a Nx2 matrix
cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, double focal, Point2d pp,
int method, double prob, double threshold, OutputArray _mask)
{
Mat points1, points2;
_points1.getMat().convertTo(points1, CV_64F);
_points2.getMat().convertTo(points2, CV_64F);
int npoints = points1.checkVector(2);
CV_Assert( npoints >= 5 && points2.checkVector(2) == npoints &&
points1.type() == points2.type());
if( points1.channels() > 1 )
{
points1 = points1.reshape(1, npoints);
points2 = points2.reshape(1, npoints);
}
double ifocal = focal != 0 ? 1./focal : 1.;
for( int i = 0; i < npoints; i++ )
{
points1.at<double>(i, 0) = (points1.at<double>(i, 0) - pp.x)*ifocal;
points1.at<double>(i, 1) = (points1.at<double>(i, 1) - pp.y)*ifocal;
points2.at<double>(i, 0) = (points2.at<double>(i, 0) - pp.x)*ifocal;
points2.at<double>(i, 1) = (points2.at<double>(i, 1) - pp.y)*ifocal;
}
// Reshape data to fit opencv ransac function
points1 = points1.reshape(2, npoints);
points2 = points2.reshape(2, npoints);
threshold /= focal;
Mat E;
if( method == RANSAC )
createRANSACPointSetRegistrator(new EMEstimatorCallback, 5, threshold, prob)->run(points1, points2, E, _mask);
else
createLMeDSPointSetRegistrator(new EMEstimatorCallback, 5, prob)->run(points1, points2, E, _mask);
return E;
}
int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2, OutputArray _R,
OutputArray _t, double focal, Point2d pp, InputOutputArray _mask)
{
Mat points1, points2;
_points1.getMat().copyTo(points1);
_points2.getMat().copyTo(points2);
int npoints = points1.checkVector(2);
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
points1.type() == points2.type());
if (points1.channels() > 1)
{
points1 = points1.reshape(1, npoints);
points2 = points2.reshape(1, npoints);
}
points1.convertTo(points1, CV_64F);
points2.convertTo(points2, CV_64F);
points1.col(0) = (points1.col(0) - pp.x) / focal;
points2.col(0) = (points2.col(0) - pp.x) / focal;
points1.col(1) = (points1.col(1) - pp.y) / focal;
points2.col(1) = (points2.col(1) - pp.y) / focal;
points1 = points1.t();
points2 = points2.t();
Mat R1, R2, t;
decomposeEssentialMat(E, R1, R2, t);
Mat P0 = Mat::eye(3, 4, R1.type());
Mat P1(3, 4, R1.type()), P2(3, 4, R1.type()), P3(3, 4, R1.type()), P4(3, 4, R1.type());
P1(Range::all(), Range(0, 3)) = R1 * 1.0; P1.col(3) = t * 1.0;
P2(Range::all(), Range(0, 3)) = R2 * 1.0; P2.col(3) = t * 1.0;
P3(Range::all(), Range(0, 3)) = R1 * 1.0; P3.col(3) = -t * 1.0;
P4(Range::all(), Range(0, 3)) = R2 * 1.0; P4.col(3) = -t * 1.0;
// Do the cheirality check.
// Notice here a threshold dist is used to filter
// out far away points (i.e. infinite points) since
// there depth may vary between postive and negtive.
double dist = 50.0;
Mat Q;
triangulatePoints(P0, P1, points1, points2, Q);
Mat mask1 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask1 = (Q.row(2) < dist) & mask1;
Q = P1 * Q;
mask1 = (Q.row(2) > 0) & mask1;
mask1 = (Q.row(2) < dist) & mask1;
triangulatePoints(P0, P2, points1, points2, Q);
Mat mask2 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask2 = (Q.row(2) < dist) & mask2;
Q = P2 * Q;
mask2 = (Q.row(2) > 0) & mask2;
mask2 = (Q.row(2) < dist) & mask2;
triangulatePoints(P0, P3, points1, points2, Q);
Mat mask3 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask3 = (Q.row(2) < dist) & mask3;
Q = P3 * Q;
mask3 = (Q.row(2) > 0) & mask3;
mask3 = (Q.row(2) < dist) & mask3;
triangulatePoints(P0, P4, points1, points2, Q);
Mat mask4 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask4 = (Q.row(2) < dist) & mask4;
Q = P4 * Q;
mask4 = (Q.row(2) > 0) & mask4;
mask4 = (Q.row(2) < dist) & mask4;
// If _mask is given, then use it to filter outliers.
if (_mask.needed())
{
_mask.create(1, npoints, CV_8U, -1, true);
Mat mask = _mask.getMat();
bitwise_and(mask, mask1, mask1);
bitwise_and(mask, mask2, mask2);
bitwise_and(mask, mask3, mask3);
bitwise_and(mask, mask4, mask4);
}
CV_Assert(_R.needed() && _t.needed());
_R.create(3, 3, R1.type());
_t.create(3, 1, t.type());
int good1 = countNonZero(mask1);
int good2 = countNonZero(mask2);
int good3 = countNonZero(mask3);
int good4 = countNonZero(mask4);
if (good1 >= good2 && good1 >= good3 && good1 >= good4)
{
R1.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask1.copyTo(_mask);
return good1;
}
else if (good2 >= good1 && good2 >= good3 && good2 >= good4)
{
R2.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask2.copyTo(_mask);
return good2;
}
else if (good3 >= good1 && good3 >= good2 && good3 >= good4)
{
t = -t;
R1.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask3.copyTo(_mask);
return good3;
}
else
{
t = -t;
R2.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask4.copyTo(_mask);
return good4;
}
}
void cv::decomposeEssentialMat( InputArray _E, OutputArray _R1, OutputArray _R2, OutputArray _t )
{
Mat E = _E.getMat().reshape(1, 3);
CV_Assert(E.cols == 3 && E.rows == 3);
Mat D, U, Vt;
SVD::compute(E, D, U, Vt);
if (determinant(U) < 0) U *= -1.;
if (determinant(Vt) < 0) Vt *= -1.;
Mat W = (Mat_<double>(3, 3) << 0, 1, 0, -1, 0, 0, 0, 0, 1);
W.convertTo(W, E.type());
Mat R1, R2, t;
R1 = U * W * Vt;
R2 = U * W.t() * Vt;
t = U.col(2) * 1.0;
R1.copyTo(_R1);
R2.copyTo(_R2);
t.copyTo(_t);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,226 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <stdio.h>
/*
This is translation to C++ of the Matlab's LMSolve package by Miroslav Balda.
Here is the original copyright:
============================================================================
Copyright (c) 2007, Miroslav Balda
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
namespace cv
{
class LMSolverImpl : public LMSolver
{
public:
LMSolverImpl() : maxIters(100) { init(); };
LMSolverImpl(const Ptr<LMSolver::Callback>& _cb, int _maxIters) : cb(_cb), maxIters(_maxIters) { init(); }
void init()
{
epsx = epsf = FLT_EPSILON;
printInterval = 0;
}
int run(InputOutputArray _param0) const
{
Mat param0 = _param0.getMat(), x, xd, r, rd, J, A, Ap, v, temp_d, d;
int ptype = param0.type();
CV_Assert( (param0.cols == 1 || param0.rows == 1) && (ptype == CV_32F || ptype == CV_64F));
CV_Assert( !cb.empty() );
int lx = param0.rows + param0.cols - 1;
param0.convertTo(x, CV_64F);
if( x.cols != 1 )
transpose(x, x);
if( !cb->compute(x, r, J) )
return -1;
double S = norm(r, NORM_L2SQR);
int nfJ = 2;
mulTransposed(J, A, true);
gemm(J, r, 1, noArray(), 0, v, GEMM_1_T);
Mat D = A.diag().clone();
const double Rlo = 0.25, Rhi = 0.75;
double lambda = 1, lc = 0.75;
int i, iter = 0;
if( printInterval != 0 )
{
printf("************************************************************************************\n");
printf("\titr\tnfJ\t\tSUM(r^2)\t\tx\t\tdx\t\tl\t\tlc\n");
printf("************************************************************************************\n");
}
for( ;; )
{
CV_Assert( A.type() == CV_64F && A.rows == lx );
A.copyTo(Ap);
for( i = 0; i < lx; i++ )
Ap.at<double>(i, i) += lambda*D.at<double>(i);
solve(Ap, v, d, DECOMP_EIG);
subtract(x, d, xd);
if( !cb->compute(xd, rd, noArray()) )
return -1;
nfJ++;
double Sd = norm(rd, NORM_L2SQR);
gemm(A, d, -1, v, 2, temp_d);
double dS = d.dot(temp_d);
double R = (S - Sd)/(fabs(dS) > DBL_EPSILON ? dS : 1);
if( R > Rhi )
{
lambda *= 0.5;
if( lambda < lc )
lambda = 0;
}
else if( R < Rlo )
{
// find new nu if R too low
double t = d.dot(v);
double nu = (Sd - S)/(fabs(t) > DBL_EPSILON ? t : 1) + 2;
nu = std::min(std::max(nu, 2.), 10.);
if( lambda == 0 )
{
invert(A, Ap, DECOMP_EIG);
double maxval = DBL_EPSILON;
for( i = 0; i < lx; i++ )
maxval = std::max(maxval, std::abs(Ap.at<double>(i,i)));
lambda = lc = 1./maxval;
nu *= 0.5;
}
lambda *= nu;
}
if( Sd < S )
{
nfJ++;
S = Sd;
std::swap(x, xd);
if( !cb->compute(x, r, J) )
return -1;
mulTransposed(J, A, true);
gemm(J, r, 1, noArray(), 0, v, GEMM_1_T);
}
iter++;
bool proceed = iter < maxIters && norm(d, NORM_INF) >= epsx && norm(r, NORM_INF) >= epsf;
if( printInterval != 0 && (iter % printInterval == 0 || iter == 1 || !proceed) )
{
printf("%c%10d %10d %15.4e %16.4e %17.4e %16.4e %17.4e\n",
(proceed ? ' ' : '*'), iter, nfJ, S, x.at<double>(0), d.at<double>(0), lambda, lc);
}
if(!proceed)
break;
}
if( param0.size != x.size )
transpose(x, x);
x.convertTo(param0, ptype);
if( iter == maxIters )
iter = -iter;
return iter;
}
void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<LMSolver::Callback> cb;
double epsx;
double epsf;
int maxIters;
int printInterval;
};
CV_INIT_ALGORITHM(LMSolverImpl, "LMSolver",
obj.info()->addParam(obj, "epsx", obj.epsx);
obj.info()->addParam(obj, "epsf", obj.epsf);
obj.info()->addParam(obj, "maxIters", obj.maxIters);
obj.info()->addParam(obj, "printInterval", obj.printInterval));
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
{
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
return new LMSolverImpl(cb, maxIters);
}
}

View File

@@ -1,485 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "_modelest.h"
#include <algorithm>
#include <iterator>
#include <limits>
using namespace std;
CvModelEstimator2::CvModelEstimator2(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions)
{
modelPoints = _modelPoints;
modelSize = _modelSize;
maxBasicSolutions = _maxBasicSolutions;
checkPartialSubsets = true;
rng = cvRNG(-1);
}
CvModelEstimator2::~CvModelEstimator2()
{
}
void CvModelEstimator2::setSeed( int64 seed )
{
rng = cvRNG(seed);
}
int CvModelEstimator2::findInliers( const CvMat* m1, const CvMat* m2,
const CvMat* model, CvMat* _err,
CvMat* _mask, double threshold )
{
int i, count = _err->rows*_err->cols, goodCount = 0;
const float* err = _err->data.fl;
uchar* mask = _mask->data.ptr;
computeReprojError( m1, m2, model, _err );
threshold *= threshold;
for( i = 0; i < count; i++ )
goodCount += mask[i] = err[i] <= threshold;
return goodCount;
}
CV_IMPL int
cvRANSACUpdateNumIters( double p, double ep,
int model_points, int max_iters )
{
if( model_points <= 0 )
CV_Error( CV_StsOutOfRange, "the number of model points should be positive" );
p = MAX(p, 0.);
p = MIN(p, 1.);
ep = MAX(ep, 0.);
ep = MIN(ep, 1.);
// avoid inf's & nan's
double num = MAX(1. - p, DBL_MIN);
double denom = 1. - pow(1. - ep,model_points);
if( denom < DBL_MIN )
return 0;
num = log(num);
denom = log(denom);
return denom >= 0 || -num >= max_iters*(-denom) ?
max_iters : cvRound(num/denom);
}
bool CvModelEstimator2::runRANSAC( const CvMat* m1, const CvMat* m2, CvMat* model,
CvMat* mask0, double reprojThreshold,
double confidence, int maxIters )
{
bool result = false;
cv::Ptr<CvMat> mask = cvCloneMat(mask0);
cv::Ptr<CvMat> models, err, tmask;
cv::Ptr<CvMat> ms1, ms2;
int iter, niters = maxIters;
int count = m1->rows*m1->cols, maxGoodCount = 0;
CV_Assert( CV_ARE_SIZES_EQ(m1, m2) && CV_ARE_SIZES_EQ(m1, mask) );
if( count < modelPoints )
return false;
models = cvCreateMat( modelSize.height*maxBasicSolutions, modelSize.width, CV_64FC1 );
err = cvCreateMat( 1, count, CV_32FC1 );
tmask = cvCreateMat( 1, count, CV_8UC1 );
if( count > modelPoints )
{
ms1 = cvCreateMat( 1, modelPoints, m1->type );
ms2 = cvCreateMat( 1, modelPoints, m2->type );
}
else
{
niters = 1;
ms1 = cvCloneMat(m1);
ms2 = cvCloneMat(m2);
}
for( iter = 0; iter < niters; iter++ )
{
int i, goodCount, nmodels;
if( count > modelPoints )
{
bool found = getSubset( m1, m2, ms1, ms2, 300 );
if( !found )
{
if( iter == 0 )
return false;
break;
}
}
nmodels = runKernel( ms1, ms2, models );
if( nmodels <= 0 )
continue;
for( i = 0; i < nmodels; i++ )
{
CvMat model_i;
cvGetRows( models, &model_i, i*modelSize.height, (i+1)*modelSize.height );
goodCount = findInliers( m1, m2, &model_i, err, tmask, reprojThreshold );
if( goodCount > MAX(maxGoodCount, modelPoints-1) )
{
std::swap(tmask, mask);
cvCopy( &model_i, model );
maxGoodCount = goodCount;
niters = cvRANSACUpdateNumIters( confidence,
(double)(count - goodCount)/count, modelPoints, niters );
}
}
}
if( maxGoodCount > 0 )
{
if( mask != mask0 )
cvCopy( mask, mask0 );
result = true;
}
return result;
}
static CV_IMPLEMENT_QSORT( icvSortDistances, int, CV_LT )
bool CvModelEstimator2::runLMeDS( const CvMat* m1, const CvMat* m2, CvMat* model,
CvMat* mask, double confidence, int maxIters )
{
const double outlierRatio = 0.45;
bool result = false;
cv::Ptr<CvMat> models;
cv::Ptr<CvMat> ms1, ms2;
cv::Ptr<CvMat> err;
int iter, niters = maxIters;
int count = m1->rows*m1->cols;
double minMedian = DBL_MAX, sigma;
CV_Assert( CV_ARE_SIZES_EQ(m1, m2) && CV_ARE_SIZES_EQ(m1, mask) );
if( count < modelPoints )
return false;
models = cvCreateMat( modelSize.height*maxBasicSolutions, modelSize.width, CV_64FC1 );
err = cvCreateMat( 1, count, CV_32FC1 );
if( count > modelPoints )
{
ms1 = cvCreateMat( 1, modelPoints, m1->type );
ms2 = cvCreateMat( 1, modelPoints, m2->type );
}
else
{
niters = 1;
ms1 = cvCloneMat(m1);
ms2 = cvCloneMat(m2);
}
niters = cvRound(log(1-confidence)/log(1-pow(1-outlierRatio,(double)modelPoints)));
niters = MIN( MAX(niters, 3), maxIters );
for( iter = 0; iter < niters; iter++ )
{
int i, nmodels;
if( count > modelPoints )
{
bool found = getSubset( m1, m2, ms1, ms2, 300 );
if( !found )
{
if( iter == 0 )
return false;
break;
}
}
nmodels = runKernel( ms1, ms2, models );
if( nmodels <= 0 )
continue;
for( i = 0; i < nmodels; i++ )
{
CvMat model_i;
cvGetRows( models, &model_i, i*modelSize.height, (i+1)*modelSize.height );
computeReprojError( m1, m2, &model_i, err );
icvSortDistances( err->data.i, count, 0 );
double median = count % 2 != 0 ?
err->data.fl[count/2] : (err->data.fl[count/2-1] + err->data.fl[count/2])*0.5;
if( median < minMedian )
{
minMedian = median;
cvCopy( &model_i, model );
}
}
}
if( minMedian < DBL_MAX )
{
sigma = 2.5*1.4826*(1 + 5./(count - modelPoints))*sqrt(minMedian);
sigma = MAX( sigma, 0.001 );
count = findInliers( m1, m2, model, err, mask, sigma );
result = count >= modelPoints;
}
return result;
}
bool CvModelEstimator2::getSubset( const CvMat* m1, const CvMat* m2,
CvMat* ms1, CvMat* ms2, int maxAttempts )
{
cv::AutoBuffer<int> _idx(modelPoints);
int* idx = _idx;
int i = 0, j, k, idx_i, iters = 0;
int type = CV_MAT_TYPE(m1->type), elemSize = CV_ELEM_SIZE(type);
const int *m1ptr = m1->data.i, *m2ptr = m2->data.i;
int *ms1ptr = ms1->data.i, *ms2ptr = ms2->data.i;
int count = m1->cols*m1->rows;
assert( CV_IS_MAT_CONT(m1->type & m2->type) && (elemSize % sizeof(int) == 0) );
elemSize /= sizeof(int);
for(; iters < maxAttempts; iters++)
{
for( i = 0; i < modelPoints && iters < maxAttempts; )
{
idx[i] = idx_i = cvRandInt(&rng) % count;
for( j = 0; j < i; j++ )
if( idx_i == idx[j] )
break;
if( j < i )
continue;
for( k = 0; k < elemSize; k++ )
{
ms1ptr[i*elemSize + k] = m1ptr[idx_i*elemSize + k];
ms2ptr[i*elemSize + k] = m2ptr[idx_i*elemSize + k];
}
if( checkPartialSubsets && (!checkSubset( ms1, i+1 ) || !checkSubset( ms2, i+1 )))
{
iters++;
continue;
}
i++;
}
if( !checkPartialSubsets && i == modelPoints &&
(!checkSubset( ms1, i ) || !checkSubset( ms2, i )))
continue;
break;
}
return i == modelPoints && iters < maxAttempts;
}
bool CvModelEstimator2::checkSubset( const CvMat* m, int count )
{
int j, k, i, i0, i1;
CvPoint2D64f* ptr = (CvPoint2D64f*)m->data.ptr;
assert( CV_MAT_TYPE(m->type) == CV_64FC2 );
if( checkPartialSubsets )
i0 = i1 = count - 1;
else
i0 = 0, i1 = count - 1;
for( i = i0; i <= i1; i++ )
{
// check that the i-th selected point does not belong
// to a line connecting some previously selected points
for( j = 0; j < i; j++ )
{
double dx1 = ptr[j].x - ptr[i].x;
double dy1 = ptr[j].y - ptr[i].y;
for( k = 0; k < j; k++ )
{
double dx2 = ptr[k].x - ptr[i].x;
double dy2 = ptr[k].y - ptr[i].y;
if( fabs(dx2*dy1 - dy2*dx1) <= FLT_EPSILON*(fabs(dx1) + fabs(dy1) + fabs(dx2) + fabs(dy2)))
break;
}
if( k < j )
break;
}
if( j < i )
break;
}
return i >= i1;
}
namespace cv
{
class Affine3DEstimator : public CvModelEstimator2
{
public:
Affine3DEstimator() : CvModelEstimator2(4, cvSize(4, 3), 1) {}
virtual int runKernel( const CvMat* m1, const CvMat* m2, CvMat* model );
protected:
virtual void computeReprojError( const CvMat* m1, const CvMat* m2, const CvMat* model, CvMat* error );
virtual bool checkSubset( const CvMat* ms1, int count );
};
}
int cv::Affine3DEstimator::runKernel( const CvMat* m1, const CvMat* m2, CvMat* model )
{
const Point3d* from = reinterpret_cast<const Point3d*>(m1->data.ptr);
const Point3d* to = reinterpret_cast<const Point3d*>(m2->data.ptr);
Mat A(12, 12, CV_64F);
Mat B(12, 1, CV_64F);
A = Scalar(0.0);
for(int i = 0; i < modelPoints; ++i)
{
*B.ptr<Point3d>(3*i) = to[i];
double *aptr = A.ptr<double>(3*i);
for(int k = 0; k < 3; ++k)
{
aptr[3] = 1.0;
*reinterpret_cast<Point3d*>(aptr) = from[i];
aptr += 16;
}
}
CvMat cvA = A;
CvMat cvB = B;
CvMat cvX;
cvReshape(model, &cvX, 1, 12);
cvSolve(&cvA, &cvB, &cvX, CV_SVD );
return 1;
}
void cv::Affine3DEstimator::computeReprojError( const CvMat* m1, const CvMat* m2, const CvMat* model, CvMat* error )
{
int count = m1->rows * m1->cols;
const Point3d* from = reinterpret_cast<const Point3d*>(m1->data.ptr);
const Point3d* to = reinterpret_cast<const Point3d*>(m2->data.ptr);
const double* F = model->data.db;
float* err = error->data.fl;
for(int i = 0; i < count; i++ )
{
const Point3d& f = from[i];
const Point3d& t = to[i];
double a = F[0]*f.x + F[1]*f.y + F[ 2]*f.z + F[ 3] - t.x;
double b = F[4]*f.x + F[5]*f.y + F[ 6]*f.z + F[ 7] - t.y;
double c = F[8]*f.x + F[9]*f.y + F[10]*f.z + F[11] - t.z;
err[i] = (float)sqrt(a*a + b*b + c*c);
}
}
bool cv::Affine3DEstimator::checkSubset( const CvMat* ms1, int count )
{
CV_Assert( CV_MAT_TYPE(ms1->type) == CV_64FC3 );
int j, k, i = count - 1;
const Point3d* ptr = reinterpret_cast<const Point3d*>(ms1->data.ptr);
// check that the i-th selected point does not belong
// to a line connecting some previously selected points
for(j = 0; j < i; ++j)
{
Point3d d1 = ptr[j] - ptr[i];
double n1 = norm(d1);
for(k = 0; k < j; ++k)
{
Point3d d2 = ptr[k] - ptr[i];
double n = norm(d2) * n1;
if (fabs(d1.dot(d2) / n) > 0.996)
break;
}
if( k < j )
break;
}
return j == i;
}
int cv::estimateAffine3D(InputArray _from, InputArray _to,
OutputArray _out, OutputArray _inliers,
double param1, double param2)
{
Mat from = _from.getMat(), to = _to.getMat();
int count = from.checkVector(3, CV_32F);
CV_Assert( count >= 0 && to.checkVector(3, CV_32F) == count );
_out.create(3, 4, CV_64F);
Mat out = _out.getMat();
_inliers.create(count, 1, CV_8U, -1, true);
Mat inliers = _inliers.getMat();
inliers = Scalar::all(1);
Mat dFrom, dTo;
from.convertTo(dFrom, CV_64F);
to.convertTo(dTo, CV_64F);
CvMat F3x4 = out;
CvMat mask = inliers;
CvMat m1 = dFrom;
CvMat m2 = dTo;
const double epsilon = numeric_limits<double>::epsilon();
param1 = param1 <= 0 ? 3 : param1;
param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
return Affine3DEstimator().runRANSAC(&m1, &m2, &F3x4, &mask, param1, param2 );
}

View File

@@ -5,8 +5,6 @@
#include "polynom_solver.h"
#include "p3p.h"
using namespace std;
void p3p::init_inverse_parameters()
{
inv_fx = 1. / fx;

View File

@@ -1,9 +1,8 @@
#include "precomp.hpp"
#include "polynom_solver.h"
#include <math.h>
#include <iostream>
using namespace std;
#include "precomp.hpp"
#include "polynom_solver.h"
int solve_deg2(double a, double b, double c, double & x1, double & x2)
{

View File

@@ -39,6 +39,7 @@
//
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
/* POSIT structure */
struct CvPOSITObject
@@ -119,12 +120,6 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
float diff = (float)criteria.epsilon;
float inv_focalLength = 1 / focalLength;
/* init variables */
int N = pObject->N;
float *objectVectors = pObject->obj_vecs;
float *invMatrix = pObject->inv_matr;
float *imgVectors = pObject->img_vecs;
/* Check bad arguments */
if( imagePoints == NULL )
return CV_NULLPTR_ERR;
@@ -143,6 +138,12 @@ static CvStatus icvPOSIT( CvPOSITObject *pObject, CvPoint2D32f *imagePoints,
if( (criteria.type & CV_TERMCRIT_ITER) && criteria.max_iter <= 0 )
return CV_BADFACTOR_ERR;
/* init variables */
int N = pObject->N;
float *objectVectors = pObject->obj_vecs;
float *invMatrix = pObject->inv_matr;
float *imgVectors = pObject->img_vecs;
while( !converged )
{
if( count == 0 )

View File

@@ -42,16 +42,12 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/internal.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <vector>
#include "opencv2/core/private.hpp"
#ifdef HAVE_TEGRA_OPTIMIZATION
#include "opencv2/calib3d/calib3d_tegra.hpp"
@@ -59,4 +55,51 @@
#define GET_OPTIMIZED(func) (func)
#endif
namespace cv
{
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters );
class CV_EXPORTS LMSolver : public Algorithm
{
public:
class CV_EXPORTS Callback
{
public:
virtual ~Callback() {}
virtual bool compute(InputArray param, OutputArray err, OutputArray J) const = 0;
};
virtual void setCallback(const Ptr<LMSolver::Callback>& cb) = 0;
virtual int run(InputOutputArray _param0) const = 0;
};
CV_EXPORTS Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters);
class CV_EXPORTS PointSetRegistrator : public Algorithm
{
public:
class CV_EXPORTS Callback
{
public:
virtual ~Callback() {}
virtual int runKernel(InputArray m1, InputArray m2, OutputArray model) const = 0;
virtual void computeError(InputArray m1, InputArray m2, InputArray model, OutputArray err) const = 0;
virtual bool checkSubset(InputArray, InputArray, int) const { return true; }
};
virtual void setCallback(const Ptr<PointSetRegistrator::Callback>& cb) = 0;
virtual bool run(InputArray m1, InputArray m2, OutputArray model, OutputArray mask) const = 0;
};
CV_EXPORTS Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& cb,
int modelPoints, double threshold,
double confidence=0.99, int maxIters=1000 );
CV_EXPORTS Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& cb,
int modelPoints, double confidence=0.99, int maxIters=1000 );
}
#endif

View File

@@ -0,0 +1,537 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <algorithm>
#include <iterator>
#include <limits>
namespace cv
{
int RANSACUpdateNumIters( double p, double ep, int modelPoints, int maxIters )
{
if( modelPoints <= 0 )
CV_Error( Error::StsOutOfRange, "the number of model points should be positive" );
p = MAX(p, 0.);
p = MIN(p, 1.);
ep = MAX(ep, 0.);
ep = MIN(ep, 1.);
// avoid inf's & nan's
double num = MAX(1. - p, DBL_MIN);
double denom = 1. - std::pow(1. - ep, modelPoints);
if( denom < DBL_MIN )
return 0;
num = std::log(num);
denom = std::log(denom);
return denom >= 0 || -num >= maxIters*(-denom) ? maxIters : cvRound(num/denom);
}
class RANSACPointSetRegistrator : public PointSetRegistrator
{
public:
RANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb=Ptr<PointSetRegistrator::Callback>(),
int _modelPoints=0, double _threshold=0, double _confidence=0.99, int _maxIters=1000)
: cb(_cb), modelPoints(_modelPoints), threshold(_threshold), confidence(_confidence), maxIters(_maxIters)
{
checkPartialSubsets = true;
}
int findInliers( const Mat& m1, const Mat& m2, const Mat& model, Mat& err, Mat& mask, double thresh ) const
{
cb->computeError( m1, m2, model, err );
mask.create(err.size(), CV_8U);
CV_Assert( err.isContinuous() && err.type() == CV_32F && mask.isContinuous() && mask.type() == CV_8U);
const float* errptr = err.ptr<float>();
uchar* maskptr = mask.ptr<uchar>();
float t = (float)(thresh*thresh);
int i, n = (int)err.total(), nz = 0;
for( i = 0; i < n; i++ )
{
int f = errptr[i] <= t;
maskptr[i] = (uchar)f;
nz += f;
}
return nz;
}
bool getSubset( const Mat& m1, const Mat& m2,
Mat& ms1, Mat& ms2, RNG& rng,
int maxAttempts=1000 ) const
{
cv::AutoBuffer<int> _idx(modelPoints);
int* idx = _idx;
int i = 0, j, k, iters = 0;
int esz1 = (int)m1.elemSize(), esz2 = (int)m2.elemSize();
int d1 = m1.channels() > 1 ? m1.channels() : m1.cols;
int d2 = m2.channels() > 1 ? m2.channels() : m2.cols;
int count = m1.checkVector(d1), count2 = m2.checkVector(d2);
const int *m1ptr = (const int*)m1.data, *m2ptr = (const int*)m2.data;
ms1.create(modelPoints, 1, CV_MAKETYPE(m1.depth(), d1));
ms2.create(modelPoints, 1, CV_MAKETYPE(m2.depth(), d2));
int *ms1ptr = (int*)ms1.data, *ms2ptr = (int*)ms2.data;
CV_Assert( count >= modelPoints && count == count2 );
CV_Assert( (esz1 % sizeof(int)) == 0 && (esz2 % sizeof(int)) == 0 );
esz1 /= sizeof(int);
esz2 /= sizeof(int);
for(; iters < maxAttempts; iters++)
{
for( i = 0; i < modelPoints && iters < maxAttempts; )
{
int idx_i = 0;
for(;;)
{
idx_i = idx[i] = rng.uniform(0, count);
for( j = 0; j < i; j++ )
if( idx_i == idx[j] )
break;
if( j == i )
break;
}
for( k = 0; k < esz1; k++ )
ms1ptr[i*esz1 + k] = m1ptr[idx_i*esz1 + k];
for( k = 0; k < esz2; k++ )
ms2ptr[i*esz2 + k] = m2ptr[idx_i*esz2 + k];
if( checkPartialSubsets && !cb->checkSubset( ms1, ms2, i+1 ))
{
iters++;
continue;
}
i++;
}
if( !checkPartialSubsets && i == modelPoints && !cb->checkSubset(ms1, ms2, i))
continue;
break;
}
return i == modelPoints && iters < maxAttempts;
}
bool run(InputArray _m1, InputArray _m2, OutputArray _model, OutputArray _mask) const
{
bool result = false;
Mat m1 = _m1.getMat(), m2 = _m2.getMat();
Mat err, mask, model, bestModel, ms1, ms2;
int iter, niters = MAX(maxIters, 1);
int d1 = m1.channels() > 1 ? m1.channels() : m1.cols;
int d2 = m2.channels() > 1 ? m2.channels() : m2.cols;
int count = m1.checkVector(d1), count2 = m2.checkVector(d2), maxGoodCount = 0;
RNG rng((uint64)-1);
CV_Assert( !cb.empty() );
CV_Assert( confidence > 0 && confidence < 1 );
CV_Assert( count >= 0 && count2 == count );
if( count < modelPoints )
return false;
Mat bestMask0, bestMask;
if( _mask.needed() )
{
_mask.create(count, 1, CV_8U, -1, true);
bestMask0 = bestMask = _mask.getMat();
CV_Assert( (bestMask.cols == 1 || bestMask.rows == 1) && (int)bestMask.total() == count );
}
else
{
bestMask.create(count, 1, CV_8U);
bestMask0 = bestMask;
}
if( count == modelPoints )
{
if( cb->runKernel(m1, m2, bestModel) <= 0 )
return false;
bestModel.copyTo(_model);
bestMask.setTo(Scalar::all(1));
return true;
}
for( iter = 0; iter < niters; iter++ )
{
int i, goodCount, nmodels;
if( count > modelPoints )
{
bool found = getSubset( m1, m2, ms1, ms2, rng );
if( !found )
{
if( iter == 0 )
return false;
break;
}
}
nmodels = cb->runKernel( ms1, ms2, model );
if( nmodels <= 0 )
continue;
CV_Assert( model.rows % nmodels == 0 );
Size modelSize(model.cols, model.rows/nmodels);
for( i = 0; i < nmodels; i++ )
{
Mat model_i = model.rowRange( i*modelSize.height, (i+1)*modelSize.height );
goodCount = findInliers( m1, m2, model_i, err, mask, threshold );
if( goodCount > MAX(maxGoodCount, modelPoints-1) )
{
std::swap(mask, bestMask);
model_i.copyTo(bestModel);
maxGoodCount = goodCount;
niters = RANSACUpdateNumIters( confidence, (double)(count - goodCount)/count, modelPoints, niters );
}
}
}
if( maxGoodCount > 0 )
{
if( bestMask.data != bestMask0.data )
{
if( bestMask.size() == bestMask0.size() )
bestMask.copyTo(bestMask0);
else
transpose(bestMask, bestMask0);
}
bestModel.copyTo(_model);
result = true;
}
else
_model.release();
return result;
}
void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<PointSetRegistrator::Callback> cb;
int modelPoints;
int maxBasicSolutions;
bool checkPartialSubsets;
double threshold;
double confidence;
int maxIters;
};
class LMeDSPointSetRegistrator : public RANSACPointSetRegistrator
{
public:
LMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb=Ptr<PointSetRegistrator::Callback>(),
int _modelPoints=0, double _confidence=0.99, int _maxIters=1000)
: RANSACPointSetRegistrator(_cb, _modelPoints, 0, _confidence, _maxIters) {}
bool run(InputArray _m1, InputArray _m2, OutputArray _model, OutputArray _mask) const
{
const double outlierRatio = 0.45;
bool result = false;
Mat m1 = _m1.getMat(), m2 = _m2.getMat();
Mat ms1, ms2, err, errf, model, bestModel, mask, mask0;
int d1 = m1.channels() > 1 ? m1.channels() : m1.cols;
int d2 = m2.channels() > 1 ? m2.channels() : m2.cols;
int count = m1.checkVector(d1), count2 = m2.checkVector(d2);
double minMedian = DBL_MAX, sigma;
RNG rng((uint64)-1);
CV_Assert( !cb.empty() );
CV_Assert( confidence > 0 && confidence < 1 );
CV_Assert( count >= 0 && count2 == count );
if( count < modelPoints )
return false;
if( _mask.needed() )
{
_mask.create(count, 1, CV_8U, -1, true);
mask0 = mask = _mask.getMat();
CV_Assert( (mask.cols == 1 || mask.rows == 1) && (int)mask.total() == count );
}
if( count == modelPoints )
{
if( cb->runKernel(m1, m2, bestModel) <= 0 )
return false;
bestModel.copyTo(_model);
mask.setTo(Scalar::all(1));
return true;
}
int iter, niters = RANSACUpdateNumIters(confidence, outlierRatio, modelPoints, maxIters);
niters = MAX(niters, 3);
for( iter = 0; iter < niters; iter++ )
{
int i, nmodels;
if( count > modelPoints )
{
bool found = getSubset( m1, m2, ms1, ms2, rng );
if( !found )
{
if( iter == 0 )
return false;
break;
}
}
nmodels = cb->runKernel( ms1, ms2, model );
if( nmodels <= 0 )
continue;
CV_Assert( model.rows % nmodels == 0 );
Size modelSize(model.cols, model.rows/nmodels);
for( i = 0; i < nmodels; i++ )
{
Mat model_i = model.rowRange( i*modelSize.height, (i+1)*modelSize.height );
cb->computeError( m1, m2, model_i, err );
if( err.depth() != CV_32F )
err.convertTo(errf, CV_32F);
else
errf = err;
CV_Assert( errf.isContinuous() && errf.type() == CV_32F && (int)errf.total() == count );
std::sort((int*)errf.data, (int*)errf.data + count);
double median = count % 2 != 0 ?
errf.at<float>(count/2) : (errf.at<float>(count/2-1) + errf.at<float>(count/2))*0.5;
if( median < minMedian )
{
minMedian = median;
model_i.copyTo(bestModel);
}
}
}
if( minMedian < DBL_MAX )
{
sigma = 2.5*1.4826*(1 + 5./(count - modelPoints))*std::sqrt(minMedian);
sigma = MAX( sigma, 0.001 );
count = findInliers( m1, m2, bestModel, err, mask, sigma );
if( _mask.needed() && mask0.data != mask.data )
{
if( mask0.size() == mask.size() )
mask.copyTo(mask0);
else
transpose(mask, mask0);
}
bestModel.copyTo(_model);
result = count >= modelPoints;
}
else
_model.release();
return result;
}
AlgorithmInfo* info() const;
};
CV_INIT_ALGORITHM(RANSACPointSetRegistrator, "PointSetRegistrator.RANSAC",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters));
CV_INIT_ALGORITHM(LMeDSPointSetRegistrator, "PointSetRegistrator.LMeDS",
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters));
Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _threshold,
double _confidence, int _maxIters)
{
CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() );
return new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters);
}
Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _confidence, int _maxIters)
{
CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() );
return new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters);
}
class Affine3DEstimatorCallback : public PointSetRegistrator::Callback
{
public:
int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const
{
Mat m1 = _m1.getMat(), m2 = _m2.getMat();
const Point3f* from = m1.ptr<Point3f>();
const Point3f* to = m2.ptr<Point3f>();
const int N = 12;
double buf[N*N + N + N];
Mat A(N, N, CV_64F, &buf[0]);
Mat B(N, 1, CV_64F, &buf[0] + N*N);
Mat X(N, 1, CV_64F, &buf[0] + N*N + N);
double* Adata = A.ptr<double>();
double* Bdata = B.ptr<double>();
A = Scalar::all(0);
for( int i = 0; i < (N/3); i++ )
{
Bdata[i*3] = to[i].x;
Bdata[i*3+1] = to[i].y;
Bdata[i*3+2] = to[i].z;
double *aptr = Adata + i*3*N;
for(int k = 0; k < 3; ++k)
{
aptr[0] = from[i].x;
aptr[1] = from[i].y;
aptr[2] = from[i].z;
aptr[3] = 1.0;
aptr += 16;
}
}
solve(A, B, X, DECOMP_SVD);
X.reshape(1, 3).copyTo(_model);
return 1;
}
void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const
{
Mat m1 = _m1.getMat(), m2 = _m2.getMat(), model = _model.getMat();
const Point3f* from = m1.ptr<Point3f>();
const Point3f* to = m2.ptr<Point3f>();
const double* F = model.ptr<double>();
int count = m1.checkVector(3);
CV_Assert( count > 0 );
_err.create(count, 1, CV_32F);
Mat err = _err.getMat();
float* errptr = err.ptr<float>();
for(int i = 0; i < count; i++ )
{
const Point3f& f = from[i];
const Point3f& t = to[i];
double a = F[0]*f.x + F[1]*f.y + F[ 2]*f.z + F[ 3] - t.x;
double b = F[4]*f.x + F[5]*f.y + F[ 6]*f.z + F[ 7] - t.y;
double c = F[8]*f.x + F[9]*f.y + F[10]*f.z + F[11] - t.z;
errptr[i] = (float)std::sqrt(a*a + b*b + c*c);
}
}
bool checkSubset( InputArray _ms1, InputArray _ms2, int count ) const
{
const float threshold = 0.996f;
Mat ms1 = _ms1.getMat(), ms2 = _ms2.getMat();
for( int inp = 1; inp <= 2; inp++ )
{
int j, k, i = count - 1;
const Mat* msi = inp == 1 ? &ms1 : &ms2;
const Point3f* ptr = msi->ptr<Point3f>();
CV_Assert( count <= msi->rows );
// check that the i-th selected point does not belong
// to a line connecting some previously selected points
for(j = 0; j < i; ++j)
{
Point3f d1 = ptr[j] - ptr[i];
float n1 = d1.x*d1.x + d1.y*d1.y;
for(k = 0; k < j; ++k)
{
Point3f d2 = ptr[k] - ptr[i];
float denom = (d2.x*d2.x + d2.y*d2.y)*n1;
float num = d1.x*d2.x + d1.y*d2.y;
if( num*num > threshold*threshold*denom )
return false;
}
}
}
return true;
}
};
}
int cv::estimateAffine3D(InputArray _from, InputArray _to,
OutputArray _out, OutputArray _inliers,
double param1, double param2)
{
Mat from = _from.getMat(), to = _to.getMat();
int count = from.checkVector(3);
CV_Assert( count >= 0 && to.checkVector(3) == count );
Mat dFrom, dTo;
from.convertTo(dFrom, CV_32F);
to.convertTo(dTo, CV_32F);
dFrom = dFrom.reshape(3, count);
dTo = dTo.reshape(3, count);
const double epsilon = DBL_EPSILON;
param1 = param1 <= 0 ? 3 : param1;
param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
return createRANSACPointSetRegistrator(new Affine3DEstimatorCallback, 4, param1, param2)->run(dFrom, dTo, _out, _inliers);
}

View File

@@ -47,46 +47,14 @@
#include <math.h>
//#define _SUBPIX_VERBOSE
#undef max
namespace cv {
// static void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
// {
// for(size_t i = 0; i < corners.size(); i++)
// {
// circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
// }
// }
// static int histQuantile(const Mat& hist, float quantile)
// {
// if(hist.dims > 1) return -1; // works for 1D histograms only
// float cur_sum = 0;
// float total_sum = (float)sum(hist).val[0];
// float quantile_sum = total_sum*quantile;
// for(int j = 0; j < hist.size[0]; j++)
// {
// cur_sum += (float)hist.at<float>(j);
// if(cur_sum > quantile_sum)
// {
// return j;
// }
// }
// return hist.size[0] - 1;
// }
inline bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
{
return p1.second < p2.second;
}
static void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
static void orderContours(const std::vector<std::vector<Point> >& contours, Point2f point, std::vector<std::pair<int, float> >& order)
{
order.clear();
size_t i, j, n = contours.size();
@@ -106,12 +74,12 @@ static void orderContours(const vector<vector<Point> >& contours, Point2f point,
}
// fit second order curve to a set of 2D points
inline void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
inline void fitCurve2Order(const std::vector<Point2f>& /*points*/, std::vector<float>& /*curve*/)
{
// TBD
}
inline void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
inline void findCurvesCross(const std::vector<float>& /*curve1*/, const std::vector<float>& /*curve2*/, Point2f& /*cross_point*/)
{
}
@@ -124,30 +92,7 @@ static void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2,
cross_point = origin1 + dir1*alpha;
}
// static void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
// {
// // find the nearest point
// double min_dist = std::numeric_limits<double>::max();
// int min_idx = -1;
// // find corner idx
// for(size_t i = 0; i < contour.size(); i++)
// {
// double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
// if(dist < min_dist)
// {
// min_dist = dist;
// min_idx = (int)i;
// }
// }
// assert(min_idx >= 0);
// // temporary solution, have to make something more precise
// corner = contour[min_idx];
// return;
// }
static void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
static void findCorner(const std::vector<Point2f>& contour, Point2f point, Point2f& corner)
{
// find the nearest point
double min_dist = std::numeric_limits<double>::max();
@@ -163,7 +108,7 @@ static void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& c
min_idx = (int)i;
}
}
assert(min_idx >= 0);
CV_Assert(min_idx >= 0);
// temporary solution, have to make something more precise
corner = contour[min_idx];
@@ -173,13 +118,7 @@ static void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& c
static int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
{
Mat bw;
//const double max_bell_width = 20; // we expect two bells with width bounded above
//const double min_bell_width = 5; // and below
double total_sum = sum(hist).val[0];
//double thresh = total_sum/(2*max_bell_width)*0.25f; // quarter of a bar inside a bell
// threshold(hist, bw, thresh, 255.0, CV_THRESH_BINARY);
double quantile_sum = 0.0;
//double min_quantile = 0.2;
@@ -233,12 +172,6 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
const float* _ranges = ranges;
Mat hist;
#if defined(_SUBPIX_VERBOSE)
vector<float> radius;
radius.assign(corners.size(), 0.0f);
#endif //_SUBPIX_VERBOSE
Mat black_comp, white_comp;
for(int i = 0; i < ncorners; i++)
{
@@ -248,44 +181,25 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
Mat img_roi = img(roi);
calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges);
#if 0
int black_thresh = histQuantile(hist, 0.45f);
int white_thresh = histQuantile(hist, 0.55f);
#else
int black_thresh = 0, white_thresh = 0;
segment_hist_max(hist, black_thresh, white_thresh);
#endif
threshold(img, black_comp, black_thresh, 255.0, CV_THRESH_BINARY_INV);
threshold(img, white_comp, white_thresh, 255.0, CV_THRESH_BINARY);
threshold(img, black_comp, black_thresh, 255.0, THRESH_BINARY_INV);
threshold(img, white_comp, white_thresh, 255.0, THRESH_BINARY);
const int erode_count = 1;
erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count);
erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count);
#if defined(_SUBPIX_VERBOSE)
namedWindow("roi", 1);
imshow("roi", img_roi);
imwrite("test.jpg", img);
namedWindow("black", 1);
imshow("black", black_comp);
namedWindow("white", 1);
imshow("white", white_comp);
cvWaitKey(0);
imwrite("black.jpg", black_comp);
imwrite("white.jpg", white_comp);
#endif
vector<vector<Point> > white_contours, black_contours;
vector<Vec4i> white_hierarchy, black_hierarchy;
findContours(black_comp, black_contours, black_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
findContours(white_comp, white_contours, white_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
std::vector<std::vector<Point> > white_contours, black_contours;
std::vector<Vec4i> white_hierarchy, black_hierarchy;
findContours(black_comp, black_contours, black_hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE);
findContours(white_comp, white_contours, white_hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE);
if(black_contours.size() < 5 || white_contours.size() < 5) continue;
// find two white and black blobs that are close to the input point
vector<std::pair<int, float> > white_order, black_order;
std::vector<std::pair<int, float> > white_order, black_order;
orderContours(black_contours, corners[i], black_order);
orderContours(white_contours, corners[i], white_order);
@@ -296,21 +210,17 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
continue; // there will be no improvement in this corner position
}
const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
const std::vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
&white_contours[white_order[0].first], &white_contours[white_order[1].first]};
vector<Point2f> quads_approx[4];
std::vector<Point2f> quads_approx[4];
Point2f quad_corners[4];
for(int k = 0; k < 4; k++)
{
#if 1
vector<Point2f> temp;
std::vector<Point2f> temp;
for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]);
approxPolyDP(Mat(temp), quads_approx[k], 0.5, true);
findCorner(quads_approx[k], corners[i], quad_corners[k]);
#else
findCorner(*quads[k], corners[i], quad_corners[k]);
#endif
quad_corners[k] += Point2f(0.5f, 0.5f);
}
@@ -323,44 +233,7 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue;
findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]);
#if defined(_SUBPIX_VERBOSE)
radius[i] = norm(corners[i] - ground_truth_corners[ground_truth_idx])*6;
#if 1
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
// line(test, quad_corners[0] - corners[i] + Point2f(30, 30), quad_corners[1] - corners[i] + Point2f(30, 30), cvScalar(0, 255, 0));
// line(test, quad_corners[2] - corners[i] + Point2f(30, 30), quad_corners[3] - corners[i] + Point2f(30, 30), cvScalar(0, 255, 0));
vector<vector<Point> > contrs;
contrs.resize(1);
for(int k = 0; k < 4; k++)
{
//contrs[0] = quads_approx[k];
contrs[0].clear();
for(size_t j = 0; j < quads_approx[k].size(); j++) contrs[0].push_back(quads_approx[k][j]);
drawContours(test, contrs, 0, CV_RGB(0, 0, 255), 1, 1, vector<Vec4i>(), 2);
circle(test, quad_corners[k], 0.5, CV_RGB(255, 0, 0));
}
Mat test1 = test(Rect(corners[i].x - 30, corners[i].y - 30, 60, 60));
namedWindow("1", 1);
imshow("1", test1);
imwrite("test.jpg", test);
waitKey(0);
#endif
#endif //_SUBPIX_VERBOSE
}
#if defined(_SUBPIX_VERBOSE)
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
drawCircles(test, corners, radius);
namedWindow("corners", 1);
imshow("corners", test);
waitKey();
#endif //_SUBPIX_VERBOSE
return true;
}

View File

@@ -43,6 +43,8 @@
#include "precomp.hpp"
#include "epnp.h"
#include "p3p.h"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>
using namespace cv;
@@ -57,7 +59,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
if (flags == CV_EPNP)
if (flags == EPNP)
{
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
@@ -68,7 +70,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
cv::Rodrigues(R, rvec);
return true;
}
else if (flags == CV_P3P)
else if (flags == P3P)
{
CV_Assert( npoints == 4);
cv::Mat undistortedPoints;
@@ -81,7 +83,7 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
cv::Rodrigues(R, rvec);
return result;
}
else if (flags == CV_ITERATIVE)
else if (flags == ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
@@ -162,8 +164,8 @@ namespace cv
CameraParameters camera;
};
static void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
static void pnpTask(const std::vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
const Parameters& params, std::vector<int>& inliers, Mat& rvec, Mat& tvec,
const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
{
Mat modelObjectPoints(1, MIN_POINTS_COUNT, CV_32FC3), modelImagePoints(1, MIN_POINTS_COUNT, CV_32FC2);
@@ -199,14 +201,14 @@ namespace cv
params.useExtrinsicGuess, params.flags);
vector<Point2f> projected_points;
std::vector<Point2f> projected_points;
projected_points.resize(objectPoints.cols);
projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
Mat rotatedPoints;
project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
vector<int> localInliers;
std::vector<int> localInliers;
for (int i = 0; i < objectPoints.cols; i++)
{
Point2f p(imagePoints.at<Vec2f>(0, i)[0], imagePoints.at<Vec2f>(0, i)[1]);
@@ -236,7 +238,7 @@ namespace cv
public:
void operator()( const BlockedRange& r ) const
{
vector<char> pointsMask(objectPoints.cols, 0);
std::vector<char> pointsMask(objectPoints.cols, 0);
memset(&pointsMask[0], 1, MIN_POINTS_COUNT );
for( int i=r.begin(); i!=r.end(); ++i )
{
@@ -254,7 +256,7 @@ namespace cv
}
}
PnPSolver(const Mat& _objectPoints, const Mat& _imagePoints, const Parameters& _parameters,
Mat& _rvec, Mat& _tvec, vector<int>& _inliers):
Mat& _rvec, Mat& _tvec, std::vector<int>& _inliers):
objectPoints(_objectPoints), imagePoints(_imagePoints), parameters(_parameters),
rvec(_rvec), tvec(_tvec), inliers(_inliers)
{
@@ -270,13 +272,13 @@ namespace cv
const Mat& imagePoints;
const Parameters& parameters;
Mat &rvec, &tvec;
vector<int>& inliers;
std::vector<int>& inliers;
Mat initRvec, initTvec;
static RNG generator;
static Mutex syncMutex;
void generateVar(vector<char>& mask) const
void generateVar(std::vector<char>& mask) const
{
int size = (int)mask.size();
for (int i = 0; i < size; i++)
@@ -329,7 +331,7 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
params.camera.init(cameraMatrix, distCoeffs);
params.flags = flags;
vector<int> localInliers;
std::vector<int> localInliers;
Mat localRvec, localTvec;
rvec.copyTo(localRvec);
tvec.copyTo(localTvec);
@@ -342,7 +344,7 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
{
if (flags != CV_P3P)
if (flags != P3P)
{
int i, pointsCount = (int)localInliers.size();
Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);

View File

@@ -7,10 +7,11 @@
// copy or use the software.
//
//
// Intel License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -23,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
@@ -46,58 +47,45 @@
#include "precomp.hpp"
#include <stdio.h>
//#undef CV_SSE2
//#define CV_SSE2 0
//#include "emmintrin.h"
#include <limits>
CV_IMPL CvStereoBMState* cvCreateStereoBMState( int /*preset*/, int numberOfDisparities )
{
CvStereoBMState* state = (CvStereoBMState*)cvAlloc( sizeof(*state) );
if( !state )
return 0;
state->preFilterType = CV_STEREO_BM_XSOBEL; //CV_STEREO_BM_NORMALIZED_RESPONSE;
state->preFilterSize = 9;
state->preFilterCap = 31;
state->SADWindowSize = 15;
state->minDisparity = 0;
state->numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : 64;
state->textureThreshold = 10;
state->uniquenessRatio = 15;
state->speckleRange = state->speckleWindowSize = 0;
state->trySmallerWindows = 0;
state->roi1 = state->roi2 = cvRect(0,0,0,0);
state->disp12MaxDiff = -1;
state->preFilteredImg0 = state->preFilteredImg1 = state->slidingSumBuf =
state->disp = state->cost = 0;
return state;
}
CV_IMPL void cvReleaseStereoBMState( CvStereoBMState** state )
{
if( !state )
CV_Error( CV_StsNullPtr, "" );
if( !*state )
return;
cvReleaseMat( &(*state)->preFilteredImg0 );
cvReleaseMat( &(*state)->preFilteredImg1 );
cvReleaseMat( &(*state)->slidingSumBuf );
cvReleaseMat( &(*state)->disp );
cvReleaseMat( &(*state)->cost );
cvFree( state );
}
namespace cv
{
struct StereoBMParams
{
StereoBMParams(int _numDisparities=64, int _SADWindowSize=21)
{
preFilterType = StereoBM::PREFILTER_XSOBEL;
preFilterSize = 9;
preFilterCap = 31;
SADWindowSize = _SADWindowSize;
minDisparity = 0;
numDisparities = _numDisparities > 0 ? _numDisparities : 64;
textureThreshold = 10;
uniquenessRatio = 15;
speckleRange = speckleWindowSize = 0;
roi1 = roi2 = Rect(0,0,0,0);
disp12MaxDiff = -1;
dispType = CV_16S;
}
int preFilterType;
int preFilterSize;
int preFilterCap;
int SADWindowSize;
int minDisparity;
int numDisparities;
int textureThreshold;
int uniquenessRatio;
int speckleRange;
int speckleWindowSize;
Rect roi1, roi2;
int disp12MaxDiff;
int dispType;
};
static void prefilterNorm( const Mat& src, Mat& dst, int winsize, int ftzero, uchar* buf )
{
int x, y, wsz2 = winsize/2;
@@ -191,11 +179,11 @@ prefilterXSobel( const Mat& src, Mat& dst, int ftzero )
dptr0[0] = dptr0[size.width-1] = dptr1[0] = dptr1[size.width-1] = val0;
x = 1;
#if CV_SSE2
#if CV_SSE2
if( useSIMD )
{
__m128i z = _mm_setzero_si128(), ftz = _mm_set1_epi16((short)ftzero),
ftz2 = _mm_set1_epi8(CV_CAST_8U(ftzero*2));
ftz2 = _mm_set1_epi8(cv::saturate_cast<uchar>(ftzero*2));
for( ; x <= size.width-9; x += 8 )
{
__m128i c0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow0 + x - 1)), z);
@@ -207,9 +195,9 @@ prefilterXSobel( const Mat& src, Mat& dst, int ftzero )
d1 = _mm_sub_epi16(d1, c1);
__m128i c2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x - 1)), z);
__m128i c3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x - 1)), z);
__m128i c3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow3 + x - 1)), z);
__m128i d2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x + 1)), z);
__m128i d3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x + 1)), z);
__m128i d3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow3 + x + 1)), z);
d2 = _mm_sub_epi16(d2, c2);
d3 = _mm_sub_epi16(d3, c3);
@@ -223,12 +211,12 @@ prefilterXSobel( const Mat& src, Mat& dst, int ftzero )
_mm_storel_epi64((__m128i*)(dptr1 + x), _mm_unpackhi_epi64(v0, v0));
}
}
#endif
#endif
for( ; x < size.width-1; x++ )
{
int d0 = srow0[x+1] - srow0[x-1], d1 = srow1[x+1] - srow1[x-1],
d2 = srow2[x+1] - srow2[x-1], d3 = srow3[x+1] - srow3[x-1];
d2 = srow2[x+1] - srow2[x-1], d3 = srow3[x+1] - srow3[x-1];
int v0 = tab[d0 + d1*2 + d2 + OFS];
int v1 = tab[d1 + d2*2 + d3 + OFS];
dptr0[x] = (uchar)v0;
@@ -249,14 +237,14 @@ static const int DISPARITY_SHIFT = 4;
#if CV_SSE2
static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right,
Mat& disp, Mat& cost, CvStereoBMState& state,
uchar* buf, int _dy0, int _dy1 )
Mat& disp, Mat& cost, StereoBMParams& state,
uchar* buf, int _dy0, int _dy1 )
{
const int ALIGN = 16;
int x, y, d;
int wsz = state.SADWindowSize, wsz2 = wsz/2;
int dy0 = MIN(_dy0, wsz2+1), dy1 = MIN(_dy1, wsz2+1);
int ndisp = state.numberOfDisparities;
int ndisp = state.numDisparities;
int mindisp = state.minDisparity;
int lofs = MAX(ndisp - 1 + mindisp, 0);
int rofs = -MIN(ndisp - 1 + mindisp, 0);
@@ -343,7 +331,7 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right,
rptr = rptr0 + MIN(MAX(x1, -rofs), width-1-rofs) - dy0*sstep;
for( y = -dy0; y < height + dy1; y++, cbuf += ndisp, cbuf_sub += ndisp,
hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep )
hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep )
{
int lval = lptr[0];
__m128i lv = _mm_set1_epi8((char)lval), z = _mm_setzero_si128();
@@ -464,7 +452,7 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right,
__m128i thresh8 = _mm_set1_epi16((short)(thresh + 1));
__m128i d1 = _mm_set1_epi16((short)(mind-1)), d2 = _mm_set1_epi16((short)(mind+1));
__m128i dd_16 = _mm_add_epi16(dd_8, dd_8);
d8 = _mm_sub_epi16(d0_8, dd_16);
d8 = _mm_sub_epi16(d0_8, dd_16);
for( d = 0; d < ndisp; d += 16 )
{
@@ -507,14 +495,14 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right,
static void
findStereoCorrespondenceBM( const Mat& left, const Mat& right,
Mat& disp, Mat& cost, const CvStereoBMState& state,
uchar* buf, int _dy0, int _dy1 )
Mat& disp, Mat& cost, const StereoBMParams& state,
uchar* buf, int _dy0, int _dy1 )
{
const int ALIGN = 16;
int x, y, d;
int wsz = state.SADWindowSize, wsz2 = wsz/2;
int dy0 = MIN(_dy0, wsz2+1), dy1 = MIN(_dy1, wsz2+1);
int ndisp = state.numberOfDisparities;
int ndisp = state.numDisparities;
int mindisp = state.minDisparity;
int lofs = MAX(ndisp - 1 + mindisp, 0);
int rofs = -MIN(ndisp - 1 + mindisp, 0);
@@ -592,7 +580,7 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
rptr = rptr0 + MIN(MAX(x1, -rofs), width-1-rofs) - dy0*sstep;
for( y = -dy0; y < height + dy1; y++, cbuf += ndisp, cbuf_sub += ndisp,
hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep )
hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep )
{
int lval = lptr[0];
for( d = 0; d < ndisp; d++ )
@@ -662,21 +650,21 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right,
}
{
sad[-1] = sad[1];
sad[ndisp] = sad[ndisp-2];
int p = sad[mind+1], n = sad[mind-1];
d = p + n - 2*sad[mind] + std::abs(p - n);
dptr[y*dstep] = (short)(((ndisp - mind - 1 + mindisp)*256 + (d != 0 ? (p-n)*256/d : 0) + 15) >> 4);
costptr[y*coststep] = sad[mind];
sad[-1] = sad[1];
sad[ndisp] = sad[ndisp-2];
int p = sad[mind+1], n = sad[mind-1];
d = p + n - 2*sad[mind] + std::abs(p - n);
dptr[y*dstep] = (short)(((ndisp - mind - 1 + mindisp)*256 + (d != 0 ? (p-n)*256/d : 0) + 15) >> 4);
costptr[y*coststep] = sad[mind];
}
}
}
}
struct PrefilterInvoker
struct PrefilterInvoker : public ParallelLoopBody
{
PrefilterInvoker(const Mat& left0, const Mat& right0, Mat& left, Mat& right,
uchar* buf0, uchar* buf1, CvStereoBMState* _state )
uchar* buf0, uchar* buf1, StereoBMParams* _state)
{
imgs0[0] = &left0; imgs0[1] = &right0;
imgs[0] = &left; imgs[1] = &right;
@@ -684,41 +672,47 @@ struct PrefilterInvoker
state = _state;
}
void operator()( int ind ) const
void operator()( const Range& range ) const
{
if( state->preFilterType == CV_STEREO_BM_NORMALIZED_RESPONSE )
prefilterNorm( *imgs0[ind], *imgs[ind], state->preFilterSize, state->preFilterCap, buf[ind] );
else
prefilterXSobel( *imgs0[ind], *imgs[ind], state->preFilterCap );
for( int i = range.start; i < range.end; i++ )
{
if( state->preFilterType == StereoBM::PREFILTER_NORMALIZED_RESPONSE )
prefilterNorm( *imgs0[i], *imgs[i], state->preFilterSize, state->preFilterCap, buf[i] );
else
prefilterXSobel( *imgs0[i], *imgs[i], state->preFilterCap );
}
}
const Mat* imgs0[2];
Mat* imgs[2];
uchar* buf[2];
CvStereoBMState *state;
StereoBMParams* state;
};
struct FindStereoCorrespInvoker
struct FindStereoCorrespInvoker : public ParallelLoopBody
{
FindStereoCorrespInvoker( const Mat& _left, const Mat& _right,
Mat& _disp, CvStereoBMState* _state,
int _nstripes, int _stripeBufSize,
bool _useShorts, Rect _validDisparityRect )
Mat& _disp, StereoBMParams* _state,
int _nstripes, size_t _stripeBufSize,
bool _useShorts, Rect _validDisparityRect,
Mat& _slidingSumBuf, Mat& _cost )
{
left = &_left; right = &_right;
disp = &_disp; state = _state;
nstripes = _nstripes; stripeBufSize = _stripeBufSize;
useShorts = _useShorts;
validDisparityRect = _validDisparityRect;
slidingSumBuf = &_slidingSumBuf;
cost = &_cost;
}
void operator()( const BlockedRange& range ) const
void operator()( const Range& range ) const
{
int cols = left->cols, rows = left->rows;
int _row0 = min(cvRound(range.begin() * rows / nstripes), rows);
int _row1 = min(cvRound(range.end() * rows / nstripes), rows);
uchar *ptr = state->slidingSumBuf->data.ptr + range.begin() * stripeBufSize;
int _row0 = std::min(cvRound(range.start * rows / nstripes), rows);
int _row1 = std::min(cvRound(range.end * rows / nstripes), rows);
uchar *ptr = slidingSumBuf->data + range.start * stripeBufSize;
int FILTERED = (state->minDisparity - 1)*16;
Rect roi = validDisparityRect & Rect(0, _row0, cols, _row1 - _row0);
@@ -742,7 +736,7 @@ struct FindStereoCorrespInvoker
Mat left_i = left->rowRange(row0, row1);
Mat right_i = right->rowRange(row0, row1);
Mat disp_i = disp->rowRange(row0, row1);
Mat cost_i = state->disp12MaxDiff >= 0 ? Mat(state->cost).rowRange(row0, row1) : Mat();
Mat cost_i = state->disp12MaxDiff >= 0 ? cost->rowRange(row0, row1) : Mat();
#if CV_SSE2
if( useShorts )
@@ -752,7 +746,7 @@ struct FindStereoCorrespInvoker
findStereoCorrespondenceBM( left_i, right_i, disp_i, cost_i, *state, ptr, row0, rows - row1 );
if( state->disp12MaxDiff >= 0 )
validateDisparity( disp_i, cost_i, state->minDisparity, state->numberOfDisparities, state->disp12MaxDiff );
validateDisparity( disp_i, cost_i, state->minDisparity, state->numDisparities, state->disp12MaxDiff );
if( roi.x > 0 )
{
@@ -768,185 +762,236 @@ struct FindStereoCorrespInvoker
protected:
const Mat *left, *right;
Mat* disp;
CvStereoBMState *state;
Mat* disp, *slidingSumBuf, *cost;
StereoBMParams *state;
int nstripes;
int stripeBufSize;
size_t stripeBufSize;
bool useShorts;
Rect validDisparityRect;
};
static void findStereoCorrespondenceBM( const Mat& left0, const Mat& right0, Mat& disp0, CvStereoBMState* state)
class StereoBMImpl : public StereoBM
{
if (left0.size() != right0.size() || disp0.size() != left0.size())
CV_Error( CV_StsUnmatchedSizes, "All the images must have the same size" );
if (left0.type() != CV_8UC1 || right0.type() != CV_8UC1)
CV_Error( CV_StsUnsupportedFormat, "Both input images must have CV_8UC1" );
if (disp0.type() != CV_16SC1 && disp0.type() != CV_32FC1)
CV_Error( CV_StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
if( !state )
CV_Error( CV_StsNullPtr, "Stereo BM state is NULL." );
if( state->preFilterType != CV_STEREO_BM_NORMALIZED_RESPONSE && state->preFilterType != CV_STEREO_BM_XSOBEL )
CV_Error( CV_StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE" );
if( state->preFilterSize < 5 || state->preFilterSize > 255 || state->preFilterSize % 2 == 0 )
CV_Error( CV_StsOutOfRange, "preFilterSize must be odd and be within 5..255" );
if( state->preFilterCap < 1 || state->preFilterCap > 63 )
CV_Error( CV_StsOutOfRange, "preFilterCap must be within 1..63" );
if( state->SADWindowSize < 5 || state->SADWindowSize > 255 || state->SADWindowSize % 2 == 0 ||
state->SADWindowSize >= min(left0.cols, left0.rows) )
CV_Error( CV_StsOutOfRange, "SADWindowSize must be odd, be within 5..255 and be not larger than image width or height" );
if( state->numberOfDisparities <= 0 || state->numberOfDisparities % 16 != 0 )
CV_Error( CV_StsOutOfRange, "numberOfDisparities must be positive and divisble by 16" );
if( state->textureThreshold < 0 )
CV_Error( CV_StsOutOfRange, "texture threshold must be non-negative" );
if( state->uniquenessRatio < 0 )
CV_Error( CV_StsOutOfRange, "uniqueness ratio must be non-negative" );
if( !state->preFilteredImg0 || state->preFilteredImg0->cols * state->preFilteredImg0->rows < left0.cols * left0.rows )
public:
StereoBMImpl()
{
cvReleaseMat( &state->preFilteredImg0 );
cvReleaseMat( &state->preFilteredImg1 );
cvReleaseMat( &state->cost );
state->preFilteredImg0 = cvCreateMat( left0.rows, left0.cols, CV_8U );
state->preFilteredImg1 = cvCreateMat( left0.rows, left0.cols, CV_8U );
state->cost = cvCreateMat( left0.rows, left0.cols, CV_16S );
}
Mat left(left0.size(), CV_8U, state->preFilteredImg0->data.ptr);
Mat right(right0.size(), CV_8U, state->preFilteredImg1->data.ptr);
int mindisp = state->minDisparity;
int ndisp = state->numberOfDisparities;
int width = left0.cols;
int height = left0.rows;
int lofs = max(ndisp - 1 + mindisp, 0);
int rofs = -min(ndisp - 1 + mindisp, 0);
int width1 = width - rofs - ndisp + 1;
int FILTERED = (state->minDisparity - 1) << DISPARITY_SHIFT;
if( lofs >= width || rofs >= width || width1 < 1 )
{
disp0 = Scalar::all( FILTERED * ( disp0.type() < CV_32F ? 1 : 1./(1 << DISPARITY_SHIFT) ) );
return;
params = StereoBMParams();
}
Mat disp = disp0;
if( disp0.type() == CV_32F)
StereoBMImpl( int _numDisparities, int _SADWindowSize )
{
if( !state->disp || state->disp->rows != disp0.rows || state->disp->cols != disp0.cols )
params = StereoBMParams(_numDisparities, _SADWindowSize);
}
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
{
Mat left0 = leftarr.getMat(), right0 = rightarr.getMat();
int dtype = disparr.fixedType() ? disparr.type() : params.dispType;
if (left0.size() != right0.size())
CV_Error( Error::StsUnmatchedSizes, "All the images must have the same size" );
if (left0.type() != CV_8UC1 || right0.type() != CV_8UC1)
CV_Error( Error::StsUnsupportedFormat, "Both input images must have CV_8UC1" );
if (dtype != CV_16SC1 && dtype != CV_32FC1)
CV_Error( Error::StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
disparr.create(left0.size(), dtype);
Mat disp0 = disparr.getMat();
if( params.preFilterType != PREFILTER_NORMALIZED_RESPONSE &&
params.preFilterType != PREFILTER_XSOBEL )
CV_Error( Error::StsOutOfRange, "preFilterType must be = CV_STEREO_BM_NORMALIZED_RESPONSE" );
if( params.preFilterSize < 5 || params.preFilterSize > 255 || params.preFilterSize % 2 == 0 )
CV_Error( Error::StsOutOfRange, "preFilterSize must be odd and be within 5..255" );
if( params.preFilterCap < 1 || params.preFilterCap > 63 )
CV_Error( Error::StsOutOfRange, "preFilterCap must be within 1..63" );
if( params.SADWindowSize < 5 || params.SADWindowSize > 255 || params.SADWindowSize % 2 == 0 ||
params.SADWindowSize >= std::min(left0.cols, left0.rows) )
CV_Error( Error::StsOutOfRange, "SADWindowSize must be odd, be within 5..255 and be not larger than image width or height" );
if( params.numDisparities <= 0 || params.numDisparities % 16 != 0 )
CV_Error( Error::StsOutOfRange, "numDisparities must be positive and divisble by 16" );
if( params.textureThreshold < 0 )
CV_Error( Error::StsOutOfRange, "texture threshold must be non-negative" );
if( params.uniquenessRatio < 0 )
CV_Error( Error::StsOutOfRange, "uniqueness ratio must be non-negative" );
preFilteredImg0.create( left0.size(), CV_8U );
preFilteredImg1.create( left0.size(), CV_8U );
cost.create( left0.size(), CV_16S );
Mat left = preFilteredImg0, right = preFilteredImg1;
int mindisp = params.minDisparity;
int ndisp = params.numDisparities;
int width = left0.cols;
int height = left0.rows;
int lofs = std::max(ndisp - 1 + mindisp, 0);
int rofs = -std::min(ndisp - 1 + mindisp, 0);
int width1 = width - rofs - ndisp + 1;
int FILTERED = (params.minDisparity - 1) << DISPARITY_SHIFT;
if( lofs >= width || rofs >= width || width1 < 1 )
{
cvReleaseMat( &state->disp );
state->disp = cvCreateMat(disp0.rows, disp0.cols, CV_16S);
disp0 = Scalar::all( FILTERED * ( disp0.type() < CV_32F ? 1 : 1./(1 << DISPARITY_SHIFT) ) );
return;
}
disp = cv::cvarrToMat(state->disp);
}
int wsz = state->SADWindowSize;
int bufSize0 = (int)((ndisp + 2)*sizeof(int));
bufSize0 += (int)((height+wsz+2)*ndisp*sizeof(int));
bufSize0 += (int)((height + wsz + 2)*sizeof(int));
bufSize0 += (int)((height+wsz+2)*ndisp*(wsz+2)*sizeof(uchar) + 256);
Mat disp = disp0;
if( dtype == CV_32F )
{
dispbuf.create(disp0.size(), CV_16S);
disp = dispbuf;
}
int bufSize1 = (int)((width + state->preFilterSize + 2) * sizeof(int) + 256);
int bufSize2 = 0;
if( state->speckleRange >= 0 && state->speckleWindowSize > 0 )
bufSize2 = width*height*(sizeof(cv::Point_<short>) + sizeof(int) + sizeof(uchar));
int wsz = params.SADWindowSize;
int bufSize0 = (int)((ndisp + 2)*sizeof(int));
bufSize0 += (int)((height+wsz+2)*ndisp*sizeof(int));
bufSize0 += (int)((height + wsz + 2)*sizeof(int));
bufSize0 += (int)((height+wsz+2)*ndisp*(wsz+2)*sizeof(uchar) + 256);
int bufSize1 = (int)((width + params.preFilterSize + 2) * sizeof(int) + 256);
int bufSize2 = 0;
if( params.speckleRange >= 0 && params.speckleWindowSize > 0 )
bufSize2 = width*height*(sizeof(Point_<short>) + sizeof(int) + sizeof(uchar));
#if CV_SSE2
bool useShorts = state->preFilterCap <= 31 && state->SADWindowSize <= 21 && checkHardwareSupport(CV_CPU_SSE2);
bool useShorts = params.preFilterCap <= 31 && params.SADWindowSize <= 21 && checkHardwareSupport(CV_CPU_SSE2);
#else
const bool useShorts = false;
const bool useShorts = false;
#endif
#ifdef HAVE_TBB
const double SAD_overhead_coeff = 10.0;
double N0 = 8000000 / (useShorts ? 1 : 4); // approx tbb's min number instructions reasonable for one thread
double maxStripeSize = min(max(N0 / (width * ndisp), (wsz-1) * SAD_overhead_coeff), (double)height);
int nstripes = cvCeil(height / maxStripeSize);
#else
const int nstripes = 1;
#endif
const double SAD_overhead_coeff = 10.0;
double N0 = 8000000 / (useShorts ? 1 : 4); // approx tbb's min number instructions reasonable for one thread
double maxStripeSize = std::min(std::max(N0 / (width * ndisp), (wsz-1) * SAD_overhead_coeff), (double)height);
int nstripes = cvCeil(height / maxStripeSize);
int bufSize = std::max(bufSize0 * nstripes, std::max(bufSize1 * 2, bufSize2));
int bufSize = max(bufSize0 * nstripes, max(bufSize1 * 2, bufSize2));
if( slidingSumBuf.cols < bufSize )
slidingSumBuf.create( 1, bufSize, CV_8U );
if( !state->slidingSumBuf || state->slidingSumBuf->cols < bufSize )
{
cvReleaseMat( &state->slidingSumBuf );
state->slidingSumBuf = cvCreateMat( 1, bufSize, CV_8U );
uchar *_buf = slidingSumBuf.data;
parallel_for_(Range(0, 2), PrefilterInvoker(left0, right0, left, right, _buf, _buf + bufSize1, &params), 1);
Rect validDisparityRect(0, 0, width, height), R1 = params.roi1, R2 = params.roi2;
validDisparityRect = getValidDisparityROI(R1.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
R2.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
params.minDisparity, params.numDisparities,
params.SADWindowSize);
parallel_for_(Range(0, nstripes),
FindStereoCorrespInvoker(left, right, disp, &params, nstripes,
bufSize0, useShorts, validDisparityRect,
slidingSumBuf, cost));
if( params.speckleRange >= 0 && params.speckleWindowSize > 0 )
filterSpeckles(disp, FILTERED, params.speckleWindowSize, params.speckleRange, slidingSumBuf);
if (disp0.data != disp.data)
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
}
uchar *_buf = state->slidingSumBuf->data.ptr;
int idx[] = {0,1};
parallel_do(idx, idx+2, PrefilterInvoker(left0, right0, left, right, _buf, _buf + bufSize1, state));
AlgorithmInfo* info() const { return 0; }
Rect validDisparityRect(0, 0, width, height), R1 = state->roi1, R2 = state->roi2;
validDisparityRect = getValidDisparityROI(R1.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
R2.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
state->minDisparity, state->numberOfDisparities,
state->SADWindowSize);
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
parallel_for(BlockedRange(0, nstripes),
FindStereoCorrespInvoker(left, right, disp, state, nstripes,
bufSize0, useShorts, validDisparityRect));
int getNumDisparities() const { return params.numDisparities; }
void setNumDisparities(int numDisparities) { params.numDisparities = numDisparities; }
if( state->speckleRange >= 0 && state->speckleWindowSize > 0 )
int getBlockSize() const { return params.SADWindowSize; }
void setBlockSize(int blockSize) { params.SADWindowSize = blockSize; }
int getSpeckleWindowSize() const { return params.speckleWindowSize; }
void setSpeckleWindowSize(int speckleWindowSize) { params.speckleWindowSize = speckleWindowSize; }
int getSpeckleRange() const { return params.speckleRange; }
void setSpeckleRange(int speckleRange) { params.speckleRange = speckleRange; }
int getDisp12MaxDiff() const { return params.disp12MaxDiff; }
void setDisp12MaxDiff(int disp12MaxDiff) { params.disp12MaxDiff = disp12MaxDiff; }
int getPreFilterType() const { return params.preFilterType; }
void setPreFilterType(int preFilterType) { params.preFilterType = preFilterType; }
int getPreFilterSize() const { return params.preFilterSize; }
void setPreFilterSize(int preFilterSize) { params.preFilterSize = preFilterSize; }
int getPreFilterCap() const { return params.preFilterCap; }
void setPreFilterCap(int preFilterCap) { params.preFilterCap = preFilterCap; }
int getTextureThreshold() const { return params.textureThreshold; }
void setTextureThreshold(int textureThreshold) { params.textureThreshold = textureThreshold; }
int getUniquenessRatio() const { return params.uniquenessRatio; }
void setUniquenessRatio(int uniquenessRatio) { params.uniquenessRatio = uniquenessRatio; }
int getSmallerBlockSize() const { return 0; }
void setSmallerBlockSize(int) {}
Rect getROI1() const { return params.roi1; }
void setROI1(Rect roi1) { params.roi1 = roi1; }
Rect getROI2() const { return params.roi2; }
void setROI2(Rect roi2) { params.roi2 = roi2; }
void write(FileStorage& fs) const
{
Mat buf(state->slidingSumBuf);
filterSpeckles(disp, FILTERED, state->speckleWindowSize, state->speckleRange, buf);
fs << "name" << name_
<< "minDisparity" << params.minDisparity
<< "numDisparities" << params.numDisparities
<< "blockSize" << params.SADWindowSize
<< "speckleWindowSize" << params.speckleWindowSize
<< "speckleRange" << params.speckleRange
<< "disp12MaxDiff" << params.disp12MaxDiff
<< "preFilterType" << params.preFilterType
<< "preFilterSize" << params.preFilterSize
<< "preFilterCap" << params.preFilterCap
<< "textureThreshold" << params.textureThreshold
<< "uniquenessRatio" << params.uniquenessRatio;
}
if (disp0.data != disp.data)
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
void read(const FileNode& fn)
{
FileNode n = fn["name"];
CV_Assert( n.isString() && String(n) == name_ );
params.minDisparity = (int)fn["minDisparity"];
params.numDisparities = (int)fn["numDisparities"];
params.SADWindowSize = (int)fn["blockSize"];
params.speckleWindowSize = (int)fn["speckleWindowSize"];
params.speckleRange = (int)fn["speckleRange"];
params.disp12MaxDiff = (int)fn["disp12MaxDiff"];
params.preFilterType = (int)fn["preFilterType"];
params.preFilterSize = (int)fn["preFilterSize"];
params.preFilterCap = (int)fn["preFilterCap"];
params.textureThreshold = (int)fn["textureThreshold"];
params.uniquenessRatio = (int)fn["uniquenessRatio"];
params.roi1 = params.roi2 = Rect();
}
StereoBMParams params;
Mat preFilteredImg0, preFilteredImg1, cost, dispbuf;
Mat slidingSumBuf;
static const char* name_;
};
const char* StereoBMImpl::name_ = "StereoMatcher.BM";
}
StereoBM::StereoBM()
{ state = cvCreateStereoBMState(); }
StereoBM::StereoBM(int _preset, int _ndisparities, int _SADWindowSize)
{ init(_preset, _ndisparities, _SADWindowSize); }
void StereoBM::init(int _preset, int _ndisparities, int _SADWindowSize)
cv::Ptr<cv::StereoBM> cv::createStereoBM(int _numDisparities, int _SADWindowSize)
{
state = cvCreateStereoBMState(_preset, _ndisparities);
state->SADWindowSize = _SADWindowSize;
}
void StereoBM::operator()( InputArray _left, InputArray _right,
OutputArray _disparity, int disptype )
{
Mat left = _left.getMat(), right = _right.getMat();
CV_Assert( disptype == CV_16S || disptype == CV_32F );
_disparity.create(left.size(), disptype);
Mat disparity = _disparity.getMat();
findStereoCorrespondenceBM(left, right, disparity, state);
}
template<> void Ptr<CvStereoBMState>::delete_obj()
{ cvReleaseStereoBMState(&obj); }
}
CV_IMPL void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
CvArr* disparr, CvStereoBMState* state )
{
cv::Mat left = cv::cvarrToMat(leftarr),
right = cv::cvarrToMat(rightarr),
disp = cv::cvarrToMat(disparr);
cv::findStereoCorrespondenceBM(left, right, disp, state);
return new StereoBMImpl(_numDisparities, _SADWindowSize);
}
/* End of file. */

View File

@@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -61,42 +62,52 @@ typedef short DispType;
enum { NR = 16, NR2 = NR/2 };
StereoSGBM::StereoSGBM()
struct StereoSGBMParams
{
minDisparity = numberOfDisparities = 0;
SADWindowSize = 0;
P1 = P2 = 0;
disp12MaxDiff = 0;
preFilterCap = 0;
uniquenessRatio = 0;
speckleWindowSize = 0;
speckleRange = 0;
fullDP = false;
}
StereoSGBMParams()
{
minDisparity = numDisparities = 0;
SADWindowSize = 0;
P1 = P2 = 0;
disp12MaxDiff = 0;
preFilterCap = 0;
uniquenessRatio = 0;
speckleWindowSize = 0;
speckleRange = 0;
mode = StereoSGBM::MODE_SGBM;
}
StereoSGBMParams( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
{
minDisparity = _minDisparity;
numDisparities = _numDisparities;
SADWindowSize = _SADWindowSize;
P1 = _P1;
P2 = _P2;
disp12MaxDiff = _disp12MaxDiff;
preFilterCap = _preFilterCap;
uniquenessRatio = _uniquenessRatio;
speckleWindowSize = _speckleWindowSize;
speckleRange = _speckleRange;
mode = _mode;
}
StereoSGBM::StereoSGBM( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
bool _fullDP )
{
minDisparity = _minDisparity;
numberOfDisparities = _numDisparities;
SADWindowSize = _SADWindowSize;
P1 = _P1;
P2 = _P2;
disp12MaxDiff = _disp12MaxDiff;
preFilterCap = _preFilterCap;
uniquenessRatio = _uniquenessRatio;
speckleWindowSize = _speckleWindowSize;
speckleRange = _speckleRange;
fullDP = _fullDP;
}
StereoSGBM::~StereoSGBM()
{
}
int minDisparity;
int numDisparities;
int SADWindowSize;
int preFilterCap;
int uniquenessRatio;
int P1;
int P2;
int speckleWindowSize;
int speckleRange;
int disp12MaxDiff;
int mode;
};
/*
For each pixel row1[x], max(-maxD, 0) <= minX <= x < maxX <= width - max(0, -minD),
@@ -114,8 +125,8 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int tabOfs, int )
{
int x, c, width = img1.cols, cn = img1.channels();
int minX1 = max(maxD, 0), maxX1 = width + min(minD, 0);
int minX2 = max(minX1 - maxD, 0), maxX2 = min(maxX1 - minD, width);
int minX1 = std::max(maxD, 0), maxX1 = width + std::min(minD, 0);
int minX2 = std::max(minX1 - maxD, 0), maxX2 = std::min(maxX1 - minD, width);
int D = maxD - minD, width1 = maxX1 - minX1, width2 = maxX2 - minX2;
const PixType *row1 = img1.ptr<PixType>(y), *row2 = img2.ptr<PixType>(y);
PixType *prow1 = buffer + width2*2, *prow2 = prow1 + width*cn*2;
@@ -186,8 +197,8 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int v = prow2[x];
int vl = x > 0 ? (v + prow2[x-1])/2 : v;
int vr = x < width-1 ? (v + prow2[x+1])/2 : v;
int v0 = min(vl, vr); v0 = min(v0, v);
int v1 = max(vl, vr); v1 = max(v1, v);
int v0 = std::min(vl, vr); v0 = std::min(v0, v);
int v1 = std::max(vl, vr); v1 = std::max(v1, v);
buffer[x] = (PixType)v0;
buffer[x + width2] = (PixType)v1;
}
@@ -197,8 +208,8 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int u = prow1[x];
int ul = x > 0 ? (u + prow1[x-1])/2 : u;
int ur = x < width-1 ? (u + prow1[x+1])/2 : u;
int u0 = min(ul, ur); u0 = min(u0, u);
int u1 = max(ul, ur); u1 = max(u1, u);
int u0 = std::min(ul, ur); u0 = std::min(u0, u);
int u1 = std::max(ul, ur); u1 = std::max(u1, u);
#if CV_SSE2
if( useSIMD )
@@ -231,10 +242,10 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int v = prow2[width-x-1 + d];
int v0 = buffer[width-x-1 + d];
int v1 = buffer[width-x-1 + d + width2];
int c0 = max(0, u - v1); c0 = max(c0, v0 - u);
int c1 = max(0, v - u1); c1 = max(c1, u0 - v);
int c0 = std::max(0, u - v1); c0 = std::max(c0, v0 - u);
int c1 = std::max(0, v - u1); c1 = std::max(c1, u0 - v);
cost[x*D + d] = (CostType)(cost[x*D+d] + (min(c0, c1) >> diff_scale));
cost[x*D + d] = (CostType)(cost[x*D+d] + (std::min(c0, c1) >> diff_scale));
}
}
}
@@ -289,7 +300,7 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
final after all the tiles are processed.
the disparity in disp1buf is written with sub-pixel accuracy
(4 fractional bits, see CvStereoSGBM::DISP_SCALE),
(4 fractional bits, see StereoSGBM::DISP_SCALE),
using quadratic interpolation, while the disparity in disp2buf
is written as is, without interpolation.
@@ -297,7 +308,7 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
It contains the minimum current cost, used to find the best disparity, corresponding to the minimal cost.
*/
static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
Mat& disp1, const StereoSGBM& params,
Mat& disp1, const StereoSGBMParams& params,
Mat& buffer )
{
#if CV_SSE2
@@ -317,28 +328,29 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
#endif
const int ALIGN = 16;
const int DISP_SHIFT = StereoSGBM::DISP_SHIFT;
const int DISP_SCALE = StereoSGBM::DISP_SCALE;
const int DISP_SHIFT = StereoMatcher::DISP_SHIFT;
const int DISP_SCALE = (1 << DISP_SHIFT);
const CostType MAX_COST = SHRT_MAX;
int minD = params.minDisparity, maxD = minD + params.numberOfDisparities;
int minD = params.minDisparity, maxD = minD + params.numDisparities;
Size SADWindowSize;
SADWindowSize.width = SADWindowSize.height = params.SADWindowSize > 0 ? params.SADWindowSize : 5;
int ftzero = max(params.preFilterCap, 15) | 1;
int ftzero = std::max(params.preFilterCap, 15) | 1;
int uniquenessRatio = params.uniquenessRatio >= 0 ? params.uniquenessRatio : 10;
int disp12MaxDiff = params.disp12MaxDiff > 0 ? params.disp12MaxDiff : 1;
int P1 = params.P1 > 0 ? params.P1 : 2, P2 = max(params.P2 > 0 ? params.P2 : 5, P1+1);
int P1 = params.P1 > 0 ? params.P1 : 2, P2 = std::max(params.P2 > 0 ? params.P2 : 5, P1+1);
int k, width = disp1.cols, height = disp1.rows;
int minX1 = max(maxD, 0), maxX1 = width + min(minD, 0);
int minX1 = std::max(maxD, 0), maxX1 = width + std::min(minD, 0);
int D = maxD - minD, width1 = maxX1 - minX1;
int INVALID_DISP = minD - 1, INVALID_DISP_SCALED = INVALID_DISP*DISP_SCALE;
int SW2 = SADWindowSize.width/2, SH2 = SADWindowSize.height/2;
int npasses = params.fullDP ? 2 : 1;
bool fullDP = params.mode == StereoSGBM::MODE_HH;
int npasses = fullDP ? 2 : 1;
const int TAB_OFS = 256*4, TAB_SIZE = 256 + TAB_OFS*2;
PixType clipTab[TAB_SIZE];
for( k = 0; k < TAB_SIZE; k++ )
clipTab[k] = (PixType)(min(max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
clipTab[k] = (PixType)(std::min(std::max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
if( minX1 >= maxX1 )
{
@@ -362,7 +374,7 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
// we keep pixel difference cost (C) and the summary cost over NR directions (S).
// we also keep all the partial costs for the previous line L_r(x,d) and also min_k L_r(x, k)
size_t costBufSize = width1*D;
size_t CSBufSize = costBufSize*(params.fullDP ? height : 1);
size_t CSBufSize = costBufSize*(fullDP ? height : 1);
size_t minLrSize = (width1 + LrBorder*2)*NR2, LrSize = minLrSize*D2;
int hsumBufNRows = SH2*2 + 2;
size_t totalBufSize = (LrSize + minLrSize)*NLR*sizeof(CostType) + // minLr[] and Lr[]
@@ -423,8 +435,8 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
{
int x, d;
DispType* disp1ptr = disp1.ptr<DispType>(y);
CostType* C = Cbuf + (!params.fullDP ? 0 : y*costBufSize);
CostType* S = Sbuf + (!params.fullDP ? 0 : y*costBufSize);
CostType* C = Cbuf + (!fullDP ? 0 : y*costBufSize);
CostType* S = Sbuf + (!fullDP ? 0 : y*costBufSize);
if( pass == 1 ) // compute C on the first pass, and reuse it on the second pass, if any.
{
@@ -432,7 +444,7 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
for( k = dy1; k <= dy2; k++ )
{
CostType* hsumAdd = hsumBuf + (min(k, height-1) % hsumBufNRows)*costBufSize;
CostType* hsumAdd = hsumBuf + (std::min(k, height-1) % hsumBufNRows)*costBufSize;
if( k < height )
{
@@ -448,13 +460,13 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
if( y > 0 )
{
const CostType* hsumSub = hsumBuf + (max(y - SH2 - 1, 0) % hsumBufNRows)*costBufSize;
const CostType* Cprev = !params.fullDP || y == 0 ? C : C - costBufSize;
const CostType* hsumSub = hsumBuf + (std::max(y - SH2 - 1, 0) % hsumBufNRows)*costBufSize;
const CostType* Cprev = !fullDP || y == 0 ? C : C - costBufSize;
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + max(x - (SW2+1)*D, 0);
const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0);
#if CV_SSE2
if( useSIMD )
@@ -488,8 +500,8 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
{
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + max(x - (SW2+1)*D, 0);
const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0);
for( d = 0; d < D; d++ )
hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
@@ -630,22 +642,22 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
{
int Cpd = Cp[d], L0, L1, L2, L3;
L0 = Cpd + min((int)Lr_p0[d], min(Lr_p0[d-1] + P1, min(Lr_p0[d+1] + P1, delta0))) - delta0;
L1 = Cpd + min((int)Lr_p1[d], min(Lr_p1[d-1] + P1, min(Lr_p1[d+1] + P1, delta1))) - delta1;
L2 = Cpd + min((int)Lr_p2[d], min(Lr_p2[d-1] + P1, min(Lr_p2[d+1] + P1, delta2))) - delta2;
L3 = Cpd + min((int)Lr_p3[d], min(Lr_p3[d-1] + P1, min(Lr_p3[d+1] + P1, delta3))) - delta3;
L0 = Cpd + std::min((int)Lr_p0[d], std::min(Lr_p0[d-1] + P1, std::min(Lr_p0[d+1] + P1, delta0))) - delta0;
L1 = Cpd + std::min((int)Lr_p1[d], std::min(Lr_p1[d-1] + P1, std::min(Lr_p1[d+1] + P1, delta1))) - delta1;
L2 = Cpd + std::min((int)Lr_p2[d], std::min(Lr_p2[d-1] + P1, std::min(Lr_p2[d+1] + P1, delta2))) - delta2;
L3 = Cpd + std::min((int)Lr_p3[d], std::min(Lr_p3[d-1] + P1, std::min(Lr_p3[d+1] + P1, delta3))) - delta3;
Lr_p[d] = (CostType)L0;
minL0 = min(minL0, L0);
minL0 = std::min(minL0, L0);
Lr_p[d + D2] = (CostType)L1;
minL1 = min(minL1, L1);
minL1 = std::min(minL1, L1);
Lr_p[d + D2*2] = (CostType)L2;
minL2 = min(minL2, L2);
minL2 = std::min(minL2, L2);
Lr_p[d + D2*3] = (CostType)L3;
minL3 = min(minL3, L3);
minL3 = std::min(minL3, L3);
Sp[d] = saturate_cast<CostType>(Sp[d] + L0 + L1 + L2 + L3);
}
@@ -737,10 +749,10 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
{
for( d = 0; d < D; d++ )
{
int L0 = Cp[d] + min((int)Lr_p0[d], min(Lr_p0[d-1] + P1, min(Lr_p0[d+1] + P1, delta0))) - delta0;
int L0 = Cp[d] + std::min((int)Lr_p0[d], std::min(Lr_p0[d-1] + P1, std::min(Lr_p0[d+1] + P1, delta0))) - delta0;
Lr_p[d] = (CostType)L0;
minL0 = min(minL0, L0);
minL0 = std::min(minL0, L0);
int Sval = Sp[d] = saturate_cast<CostType>(Sp[d] + L0);
if( Sval < minS )
@@ -785,7 +797,7 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
// do subpixel quadratic interpolation:
// fit parabola into (x1=d-1, y1=Sp[d-1]), (x2=d, y2=Sp[d]), (x3=d+1, y3=Sp[d+1])
// then find minimum of the parabola.
int denom2 = max(Sp[d-1] + Sp[d+1] - 2*Sp[d], 1);
int denom2 = std::max(Sp[d-1] + Sp[d+1] - 2*Sp[d], 1);
d = d*DISP_SCALE + ((Sp[d-1] - Sp[d+1])*DISP_SCALE + denom2)/(denom2*2);
}
else
@@ -817,26 +829,131 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
}
}
typedef cv::Point_<short> Point2s;
void StereoSGBM::operator ()( InputArray _left, InputArray _right,
OutputArray _disp )
class StereoSGBMImpl : public StereoSGBM
{
Mat left = _left.getMat(), right = _right.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == DataType<PixType>::depth );
public:
StereoSGBMImpl()
{
params = StereoSGBMParams();
}
_disp.create( left.size(), CV_16S );
Mat disp = _disp.getMat();
StereoSGBMImpl( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
{
params = StereoSGBMParams( _minDisparity, _numDisparities, _SADWindowSize,
_P1, _P2, _disp12MaxDiff, _preFilterCap,
_uniquenessRatio, _speckleWindowSize, _speckleRange,
_mode );
}
computeDisparitySGBM( left, right, disp, *this, buffer );
medianBlur(disp, disp, 3);
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
{
Mat left = leftarr.getMat(), right = rightarr.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == CV_8U );
if( speckleWindowSize > 0 )
filterSpeckles(disp, (minDisparity - 1)*DISP_SCALE, speckleWindowSize, DISP_SCALE*speckleRange, buffer);
disparr.create( left.size(), CV_16S );
Mat disp = disparr.getMat();
computeDisparitySGBM( left, right, disp, params, buffer );
medianBlur(disp, disp, 3);
if( params.speckleWindowSize > 0 )
filterSpeckles(disp, (params.minDisparity - 1)*StereoMatcher::DISP_SCALE, params.speckleWindowSize,
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
}
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
int getNumDisparities() const { return params.numDisparities; }
void setNumDisparities(int numDisparities) { params.numDisparities = numDisparities; }
int getBlockSize() const { return params.SADWindowSize; }
void setBlockSize(int blockSize) { params.SADWindowSize = blockSize; }
int getSpeckleWindowSize() const { return params.speckleWindowSize; }
void setSpeckleWindowSize(int speckleWindowSize) { params.speckleWindowSize = speckleWindowSize; }
int getSpeckleRange() const { return params.speckleRange; }
void setSpeckleRange(int speckleRange) { params.speckleRange = speckleRange; }
int getDisp12MaxDiff() const { return params.disp12MaxDiff; }
void setDisp12MaxDiff(int disp12MaxDiff) { params.disp12MaxDiff = disp12MaxDiff; }
int getPreFilterCap() const { return params.preFilterCap; }
void setPreFilterCap(int preFilterCap) { params.preFilterCap = preFilterCap; }
int getUniquenessRatio() const { return params.uniquenessRatio; }
void setUniquenessRatio(int uniquenessRatio) { params.uniquenessRatio = uniquenessRatio; }
int getP1() const { return params.P1; }
void setP1(int P1) { params.P1 = P1; }
int getP2() const { return params.P2; }
void setP2(int P2) { params.P2 = P2; }
int getMode() const { return params.mode; }
void setMode(int mode) { params.mode = mode; }
void write(FileStorage& fs) const
{
fs << "name" << name_
<< "minDisparity" << params.minDisparity
<< "numDisparities" << params.numDisparities
<< "blockSize" << params.SADWindowSize
<< "speckleWindowSize" << params.speckleWindowSize
<< "speckleRange" << params.speckleRange
<< "disp12MaxDiff" << params.disp12MaxDiff
<< "preFilterCap" << params.preFilterCap
<< "uniquenessRatio" << params.uniquenessRatio
<< "P1" << params.P1
<< "P2" << params.P2
<< "mode" << params.mode;
}
void read(const FileNode& fn)
{
FileNode n = fn["name"];
CV_Assert( n.isString() && String(n) == name_ );
params.minDisparity = (int)fn["minDisparity"];
params.numDisparities = (int)fn["numDisparities"];
params.SADWindowSize = (int)fn["blockSize"];
params.speckleWindowSize = (int)fn["speckleWindowSize"];
params.speckleRange = (int)fn["speckleRange"];
params.disp12MaxDiff = (int)fn["disp12MaxDiff"];
params.preFilterCap = (int)fn["preFilterCap"];
params.uniquenessRatio = (int)fn["uniquenessRatio"];
params.P1 = (int)fn["P1"];
params.P2 = (int)fn["P2"];
params.mode = (int)fn["mode"];
}
StereoSGBMParams params;
Mat buffer;
static const char* name_;
};
const char* StereoSGBMImpl::name_ = "StereoMatcher.SGBM";
Ptr<StereoSGBM> createStereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
int P1, int P2, int disp12MaxDiff,
int preFilterCap, int uniquenessRatio,
int speckleWindowSize, int speckleRange,
int mode)
{
return new StereoSGBMImpl(minDisparity, numDisparities, SADWindowSize,
P1, P2, disp12MaxDiff,
preFilterCap, uniquenessRatio,
speckleWindowSize, speckleRange,
mode);
}
Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity,
int numberOfDisparities,
@@ -845,34 +962,30 @@ Rect getValidDisparityROI( Rect roi1, Rect roi2,
int SW2 = SADWindowSize/2;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities - 1;
int xmin = max(roi1.x, roi2.x + maxD) + SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int ymin = max(roi1.y, roi2.y) + SW2;
int ymax = min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;
int xmin = std::max(roi1.x, roi2.x + maxD) + SW2;
int xmax = std::min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int ymin = std::max(roi1.y, roi2.y) + SW2;
int ymax = std::min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;
Rect r(xmin, ymin, xmax - xmin, ymax - ymin);
return r.width > 0 && r.height > 0 ? r : Rect();
}
}
typedef cv::Point_<short> Point2s;
void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSize,
double _maxDiff, InputOutputArray __buf )
template <typename T>
void filterSpecklesImpl(cv::Mat& img, int newVal, int maxSpeckleSize, int maxDiff, cv::Mat& _buf)
{
Mat img = _img.getMat();
Mat temp, &_buf = __buf.needed() ? __buf.getMatRef() : temp;
CV_Assert( img.type() == CV_16SC1 );
using namespace cv;
int newVal = cvRound(_newval);
int maxDiff = cvRound(_maxDiff);
int width = img.cols, height = img.rows, npixels = width*height;
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
_buf.create(1, (int)bufSize, CV_8U);
uchar* buf = _buf.data;
int i, j, dstep = (int)(img.step/sizeof(short));
int i, j, dstep = (int)(img.step/sizeof(T));
int* labels = (int*)buf;
buf += npixels*sizeof(labels[0]);
Point2s* wbuf = (Point2s*)buf;
@@ -885,7 +998,7 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
for( i = 0; i < height; i++ )
{
short* ds = img.ptr<short>(i);
T* ds = img.ptr<T>(i);
int* ls = labels + width*i;
for( j = 0; j < width; j++ )
@@ -895,7 +1008,7 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
if( ls[j] ) // has a label, check for bad label
{
if( rtype[ls[j]] ) // small region, zero out disparity
ds[j] = (short)newVal;
ds[j] = (T)newVal;
}
// no label, assign and propagate
else
@@ -911,8 +1024,8 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
{
count++;
// put neighbors onto wavefront
short* dpp = &img.at<short>(p.y, p.x);
short dp = *dpp;
T* dpp = &img.at<T>(p.y, p.x);
T dp = *dpp;
int* lpp = labels + width*p.y + p.x;
if( p.x < width-1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff )
@@ -948,7 +1061,7 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
if( count <= maxSpeckleSize ) // speckle region
{
rtype[ls[j]] = 1; // small region label
ds[j] = (short)newVal;
ds[j] = (T)newVal;
}
else
rtype[ls[j]] = 0; // large region label
@@ -958,13 +1071,31 @@ void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSi
}
}
}
void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSize,
double _maxDiff, InputOutputArray __buf )
{
Mat img = _img.getMat();
Mat temp, &_buf = __buf.needed() ? __buf.getMatRef() : temp;
CV_Assert( img.type() == CV_8UC1 || img.type() == CV_16SC1 );
int newVal = cvRound(_newval);
int maxDiff = cvRound(_maxDiff);
if (img.type() == CV_8UC1)
filterSpecklesImpl<uchar>(img, newVal, maxSpeckleSize, maxDiff, _buf);
else
filterSpecklesImpl<short>(img, newVal, maxSpeckleSize, maxDiff, _buf);
}
void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDisparity,
int numberOfDisparities, int disp12MaxDiff )
{
Mat disp = _disp.getMat(), cost = _cost.getMat();
int cols = disp.cols, rows = disp.rows;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities;
int x, minX1 = max(maxD, 0), maxX1 = cols + min(minD, 0);
int x, minX1 = std::max(maxD, 0), maxX1 = cols + std::min(minD, 0);
AutoBuffer<int> _disp2buf(cols*2);
int* disp2buf = _disp2buf;
int* disp2cost = disp2buf + cols;
@@ -1039,16 +1170,3 @@ void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDis
}
}
CvRect cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
int numberOfDisparities, int SADWindowSize )
{
return (CvRect)cv::getValidDisparityROI( roi1, roi2, minDisparity,
numberOfDisparities, SADWindowSize );
}
void cvValidateDisparity( CvArr* _disp, const CvArr* _cost, int minDisparity,
int numberOfDisparities, int disp12MaxDiff )
{
cv::Mat disp = cv::cvarrToMat(_disp), cost = cv::cvarrToMat(_cost);
cv::validateDisparity( disp, cost, minDisparity, numberOfDisparities, disp12MaxDiff );
}

View File

@@ -40,6 +40,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
// cvCorrectMatches function is Copyright (C) 2009, Jostein Austvik Jacobsen.
// cvTriangulatePoints function is derived from icvReconstructPointsFor3View, originally by Valery Mosyagin.

View File

@@ -0,0 +1,81 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2013, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and / or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/core/affine.hpp"
#include "opencv2/calib3d.hpp"
#include <iostream>
TEST(Calib3d_Affine3f, accuracy)
{
cv::Vec3d rvec(0.2, 0.5, 0.3);
cv::Affine3d affine(rvec);
cv::Mat expected;
cv::Rodrigues(rvec, expected);
ASSERT_EQ(0, norm(cv::Mat(affine.matrix, false).colRange(0, 3).rowRange(0, 3) != expected));
ASSERT_EQ(0, norm(cv::Mat(affine.linear()) != expected));
cv::Matx33d R = cv::Matx33d::eye();
double angle = 50;
R.val[0] = R.val[4] = std::cos(CV_PI*angle/180.0);
R.val[3] = std::sin(CV_PI*angle/180.0);
R.val[1] = -R.val[3];
cv::Affine3d affine1(cv::Mat(cv::Vec3d(0.2, 0.5, 0.3)).reshape(1, 1), cv::Vec3d(4, 5, 6));
cv::Affine3d affine2(R, cv::Vec3d(1, 1, 0.4));
cv::Affine3d result = affine1.inv() * affine2;
expected = cv::Mat(affine1.matrix.inv(cv::DECOMP_SVD)) * cv::Mat(affine2.matrix, false);
cv::Mat diff;
cv::absdiff(expected, result.matrix, diff);
ASSERT_LT(cv::norm(diff, cv::NORM_INF), 1e-15);
}

View File

@@ -163,6 +163,8 @@ bool CV_Affine3D_EstTest::testNPoints()
const double thres = 1e-4;
if (norm(aff_est, aff, NORM_INF) > thres)
{
cout << "aff est: " << aff_est << endl;
cout << "aff ref: " << aff << endl;
ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH);
return false;
}

View File

@@ -40,9 +40,13 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <limits>
using namespace std;
using namespace cv;
#if 0
class CV_ProjectPointsTest : public cvtest::ArrayTest
{
@@ -241,8 +245,6 @@ CV_ProjectPointsTest ProjectPoints_test;
#endif
using namespace cv;
// --------------------------------- CV_CameraCalibrationTest --------------------------------------------
class CV_CameraCalibrationTest : public cvtest::BaseTest

View File

@@ -304,7 +304,7 @@ protected:
for(size_t i = 0; i < brdsNum; ++i)
{
Mat gray;
cvtColor(boards[i], gray, CV_BGR2GRAY);
cvtColor(boards[i], gray, COLOR_BGR2GRAY);
vector<Point2f> tmp = imagePoints_findCb[i];
cornerSubPix(gray, tmp, Size(5, 5), Size(-1,-1), tc);
imagePoints.push_back(tmp);
@@ -314,7 +314,7 @@ protected:
for(size_t i = 0; i < brdsNum; ++i)
{
Mat gray;
cvtColor(boards[i], gray, CV_BGR2GRAY);
cvtColor(boards[i], gray, COLOR_BGR2GRAY);
vector<Point2f> tmp = imagePoints_findCb[i];
find4QuadCornerSubpix(gray, tmp, Size(5, 5));
imagePoints.push_back(tmp);
@@ -327,7 +327,7 @@ protected:
Mat camMat_est = Mat::eye(3, 3, CV_64F), distCoeffs_est = Mat::zeros(1, 5, CV_64F);
vector<Mat> rvecs_est, tvecs_est;
int flags = /*CV_CALIB_FIX_K3|*/CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6; //CALIB_FIX_K3; //CALIB_FIX_ASPECT_RATIO | | CALIB_ZERO_TANGENT_DIST;
int flags = /*CALIB_FIX_K3|*/CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6; //CALIB_FIX_K3; //CALIB_FIX_ASPECT_RATIO | | CALIB_ZERO_TANGENT_DIST;
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, DBL_EPSILON);
double rep_error = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs_est, tvecs_est, flags, criteria);
rep_error /= brdsNum * cornersSize.area();
@@ -426,4 +426,4 @@ protected:
}
};
TEST(Calib3d_CalibrateCamera_CPP, accuracy_on_artificial_data) { CV_CalibrateCameraArtificialTest test; test.safe_run(); }
TEST(Calib3d_CalibrateCamera_CPP, DISABLED_accuracy_on_artificial_data) { CV_CalibrateCameraArtificialTest test; test.safe_run(); }

View File

@@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "test_chessboardgenerator.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>

View File

@@ -51,7 +51,7 @@ using namespace cv;
using namespace std;
ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5),
squareEdgePointsNum(200), min_cos(std::sqrt(2.f)*0.5f), cov(0.5),
patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
{
Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
@@ -161,15 +161,15 @@ Mat cv::ChessBoardGenerator::generateChessBoard(const Mat& bg, const Mat& camMat
if (rendererResolutionMultiplier == 1)
{
result = bg.clone();
drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
drawContours(result, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(result, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
}
else
{
Mat tmp;
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
drawContours(tmp, whole_contour, -1, Scalar::all(255), FILLED, LINE_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), FILLED, LINE_AA);
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
}
@@ -178,7 +178,7 @@ Mat cv::ChessBoardGenerator::generateChessBoard(const Mat& bg, const Mat& camMat
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
{
cov = min(cov, 0.8);
cov = std::min(cov, 0.8);
double fovx, fovy, focalLen;
Point2d principalPoint;
double aspect;
@@ -199,7 +199,7 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
Point3f pb1, pb2;
generateBasis(pb1, pb2);
float cbHalfWidth = static_cast<float>(norm(p) * sin( min(fovx, fovy) * 0.5 * CV_PI / 180));
float cbHalfWidth = static_cast<float>(norm(p) * sin( std::min(fovx, fovy) * 0.5 * CV_PI / 180));
float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
float cbHalfWidthEx = cbHalfWidth * ( patternSize.width + 1) / patternSize.width;
@@ -243,7 +243,7 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Size2f& squareSize, vector<Point2f>& corners) const
{
cov = min(cov, 0.8);
cov = std::min(cov, 0.8);
double fovx, fovy, focalLen;
Point2d principalPoint;
double aspect;
@@ -302,7 +302,7 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Size2f& squareSize, const Point3f& pos, vector<Point2f>& corners) const
{
cov = min(cov, 0.8);
cov = std::min(cov, 0.8);
Point3f p = pos;
Point3f pb1, pb2;
generateBasis(pb1, pb2);

View File

@@ -1,7 +1,7 @@
#ifndef CV_CHESSBOARDGENERATOR_H143KJTVYM389YTNHKFDHJ89NYVMO3VLMEJNTBGUEIYVCM203P
#define CV_CHESSBOARDGENERATOR_H143KJTVYM389YTNHKFDHJ89NYVMO3VLMEJNTBGUEIYVCM203P
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/calib3d.hpp"
namespace cv
{
@@ -18,17 +18,17 @@ public:
int rendererResolutionMultiplier;
ChessBoardGenerator(const Size& patternSize = Size(8, 6));
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, const Size2f& squareSize, vector<Point2f>& corners) const;
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, const Size2f& squareSize, const Point3f& pos, vector<Point2f>& corners) const;
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, std::vector<Point2f>& corners) const;
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, const Size2f& squareSize, std::vector<Point2f>& corners) const;
Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, const Size2f& squareSize, const Point3f& pos, std::vector<Point2f>& corners) const;
Size cornersSize() const;
mutable vector<Point3f> corners3d;
mutable std::vector<Point3f> corners3d;
private:
void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
void generateEdge(const Point3f& p1, const Point3f& p2, std::vector<Point3f>& out) const;
Mat generateChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
float sqWidth, float sqHeight, const std::vector<Point3f>& whole, std::vector<Point2f>& corners) const;
void generateBasis(Point3f& pb1, Point3f& pb2) const;
Mat rvec, tvec;

View File

@@ -57,14 +57,14 @@ void show_points( const Mat& gray, const Mat& u, const vector<Point2f>& v, Size
merge(vector<Mat>(3, gray), rgb);
for(size_t i = 0; i < v.size(); i++ )
circle( rgb, v[i], 3, CV_RGB(255, 0, 0), CV_FILLED);
circle( rgb, v[i], 3, Scalar(255, 0, 0), FILLED);
if( !u.empty() )
{
const Point2f* u_data = u.ptr<Point2f>();
size_t count = u.cols * u.rows;
for(size_t i = 0; i < count; i++ )
circle( rgb, u_data[i], 3, CV_RGB(0, 255, 0), CV_FILLED);
circle( rgb, u_data[i], 3, Scalar(0, 255, 0), FILLED);
}
if (!v.empty())
{
@@ -208,7 +208,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
}
int progress = 0;
int max_idx = board_list.node->data.seq->total/2;
int max_idx = board_list.size()/2;
double sum_error = 0.0;
int count = 0;
@@ -217,7 +217,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
ts->update_context( this, idx, true );
/* read the image */
string img_file = board_list[idx * 2];
String img_file = board_list[idx * 2];
Mat gray = imread( folder + img_file, 0);
if( gray.empty() )
@@ -227,7 +227,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
return;
}
string _filename = folder + (string)board_list[idx * 2 + 1];
String _filename = folder + (String)board_list[idx * 2 + 1];
bool doesContatinChessboard;
Mat expected;
{
@@ -244,7 +244,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
switch( pattern )
{
case CHESSBOARD:
result = findChessboardCorners(gray, pattern_size, v, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
result = findChessboardCorners(gray, pattern_size, v, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
break;
case CIRCLES_GRID:
result = findCirclesGrid(gray, pattern_size, v);
@@ -366,6 +366,9 @@ bool validateData(const ChessBoardGenerator& cbg, const Size& imgSz,
bool CV_ChessboardDetectorTest::checkByGenerator()
{
bool res = true;
// for some reason, this test sometimes fails on Ubuntu
#if (defined __APPLE__ && defined __x86_64__) || defined _MSC_VER
//theRNG() = 0x58e6e895b9913160;
//cv::DefaultRngAuto dra;
//theRNG() = *ts->get_rng();
@@ -456,7 +459,7 @@ bool CV_ChessboardDetectorTest::checkByGenerator()
vector<Point>& cnt = cnts[0];
cnt.push_back(cg[ 0]); cnt.push_back(cg[0+2]);
cnt.push_back(cg[7+0]); cnt.push_back(cg[7+2]);
cv::drawContours(cb, cnts, -1, Scalar::all(128), CV_FILLED);
cv::drawContours(cb, cnts, -1, Scalar::all(128), FILLED);
found = findChessboardCorners(cb, cbg.cornersSize(), corners_found);
if (found)
@@ -464,6 +467,7 @@ bool CV_ChessboardDetectorTest::checkByGenerator()
cv::drawChessboardCorners(cb, cbg.cornersSize(), Mat(corners_found), found);
}
#endif
return res;
}

View File

@@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "test_chessboardgenerator.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <limits>

View File

@@ -40,6 +40,8 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d_c.h"
class CV_ChessboardDetectorTimingTest : public cvtest::BaseTest
{
@@ -66,7 +68,7 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
CvMat* _v = 0;
CvPoint2D32f* v;
IplImage* img = 0;
IplImage img;
IplImage* gray = 0;
IplImage* thresh = 0;
@@ -105,9 +107,10 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
/* read the image */
sprintf( filename, "%s%s", filepath, imgname );
img = cvLoadImage( filename );
cv::Mat img2 = cv::imread( filename );
img = img2;
if( !img )
if( img2.empty() )
{
ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename );
if( max_idx == 1 )
@@ -120,9 +123,9 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);
gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
cvCvtColor( img, gray, CV_BGR2GRAY );
gray = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
thresh = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
cvCvtColor( &img, gray, CV_BGR2GRAY );
count0 = pattern_size.width*pattern_size.height;
@@ -164,7 +167,6 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
find_chessboard_time*1e-6, find_chessboard_time/num_pixels);
cvReleaseMat( &_v );
cvReleaseImage( &img );
cvReleaseImage( &gray );
cvReleaseImage( &thresh );
progress = update_progress( progress, idx-1, max_idx, 0 );
@@ -175,7 +177,6 @@ _exit_:
/* release occupied memory */
cvReleaseMat( &_v );
cvReleaseFileStorage( &fs );
cvReleaseImage( &img );
cvReleaseImage( &gray );
cvReleaseImage( &thresh );

View File

@@ -40,9 +40,11 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <limits>
#include "test_chessboardgenerator.hpp"
using namespace std;
using namespace cv;
class CV_ChessboardSubpixelTest : public cvtest::BaseTest

View File

@@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
using namespace cv;
using namespace std;
@@ -1020,7 +1021,7 @@ void CV_FundamentalMatTest::prepare_to_validation( int test_case_idx )
F0 *= 1./f0[8];
uchar* status = test_mat[TEMP][1].data;
double err_level = get_success_error_level( test_case_idx, OUTPUT, 1 );
double err_level = method <= CV_FM_8POINT ? 1 : get_success_error_level( test_case_idx, OUTPUT, 1 );
uchar* mtfm1 = test_mat[REF_OUTPUT][1].data;
uchar* mtfm2 = test_mat[OUTPUT][1].data;
double* f_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
@@ -1064,6 +1065,366 @@ void CV_FundamentalMatTest::prepare_to_validation( int test_case_idx )
f_prop2[1] = f[8];
f_prop2[2] = cv::determinant( F );
}
/******************************* find essential matrix ***********************************/
class CV_EssentialMatTest : public cvtest::ArrayTest
{
public:
CV_EssentialMatTest();
protected:
int read_params( CvFileStorage* fs );
void fill_array( int test_case_idx, int i, int j, Mat& arr );
int prepare_test_case( int test_case_idx );
void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
double get_success_error_level( int test_case_idx, int i, int j );
void run_func();
void prepare_to_validation( int );
double sampson_error(const double* f, double x1, double y1, double x2, double y2);
int method;
int img_size;
int cube_size;
int dims;
int e_result;
double min_f, max_f;
double sigma;
};
CV_EssentialMatTest::CV_EssentialMatTest()
{
// input arrays:
// 0, 1 - arrays of 2d points that are passed to %func%.
// Can have different data type, layout, be stored in homogeneous coordinates or not.
// 2 - array of 3d points that are projected to both view planes
// 3 - [R|t] matrix for the second view plane (for the first one it is [I|0]
// 4 - intrinsic matrix for both camera
test_array[INPUT].push_back(NULL);
test_array[INPUT].push_back(NULL);
test_array[INPUT].push_back(NULL);
test_array[INPUT].push_back(NULL);
test_array[INPUT].push_back(NULL);
test_array[TEMP].push_back(NULL);
test_array[TEMP].push_back(NULL);
test_array[TEMP].push_back(NULL);
test_array[TEMP].push_back(NULL);
test_array[TEMP].push_back(NULL);
test_array[OUTPUT].push_back(NULL); // Essential Matrix singularity
test_array[OUTPUT].push_back(NULL); // Inliers mask
test_array[OUTPUT].push_back(NULL); // Translation error
test_array[OUTPUT].push_back(NULL); // Positive depth count
test_array[REF_OUTPUT].push_back(NULL);
test_array[REF_OUTPUT].push_back(NULL);
test_array[REF_OUTPUT].push_back(NULL);
test_array[REF_OUTPUT].push_back(NULL);
element_wise_relative_error = false;
method = 0;
img_size = 10;
cube_size = 10;
min_f = 1;
max_f = 3;
}
int CV_EssentialMatTest::read_params( CvFileStorage* fs )
{
int code = cvtest::ArrayTest::read_params( fs );
return code;
}
void CV_EssentialMatTest::get_test_array_types_and_sizes( int /*test_case_idx*/,
vector<vector<Size> >& sizes, vector<vector<int> >& types )
{
RNG& rng = ts->get_rng();
int pt_depth = cvtest::randInt(rng) % 2 == 0 ? CV_32F : CV_64F;
double pt_count_exp = cvtest::randReal(rng)*6 + 1;
int pt_count = MAX(5, cvRound(exp(pt_count_exp)));
dims = cvtest::randInt(rng) % 2 + 2;
dims = 2;
method = CV_LMEDS << (cvtest::randInt(rng) % 2);
types[INPUT][0] = CV_MAKETYPE(pt_depth, 1);
if( 0 && cvtest::randInt(rng) % 2 )
sizes[INPUT][0] = cvSize(pt_count, dims);
else
{
sizes[INPUT][0] = cvSize(dims, pt_count);
if( cvtest::randInt(rng) % 2 )
{
types[INPUT][0] = CV_MAKETYPE(pt_depth, dims);
if( cvtest::randInt(rng) % 2 )
sizes[INPUT][0] = cvSize(pt_count, 1);
else
sizes[INPUT][0] = cvSize(1, pt_count);
}
}
sizes[INPUT][1] = sizes[INPUT][0];
types[INPUT][1] = types[INPUT][0];
sizes[INPUT][2] = cvSize(pt_count, 1 );
types[INPUT][2] = CV_64FC3;
sizes[INPUT][3] = cvSize(4,3);
types[INPUT][3] = CV_64FC1;
sizes[INPUT][4] = cvSize(3,3);
types[INPUT][4] = CV_MAKETYPE(CV_64F, 1);
sizes[TEMP][0] = cvSize(3,3);
types[TEMP][0] = CV_64FC1;
sizes[TEMP][1] = cvSize(pt_count,1);
types[TEMP][1] = CV_8UC1;
sizes[TEMP][2] = cvSize(3,3);
types[TEMP][2] = CV_64FC1;
sizes[TEMP][3] = cvSize(3, 1);
types[TEMP][3] = CV_64FC1;
sizes[TEMP][4] = cvSize(pt_count,1);
types[TEMP][4] = CV_8UC1;
sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(3,1);
types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_64FC1;
sizes[OUTPUT][1] = sizes[REF_OUTPUT][1] = cvSize(pt_count,1);
types[OUTPUT][1] = types[REF_OUTPUT][1] = CV_8UC1;
sizes[OUTPUT][2] = sizes[REF_OUTPUT][2] = cvSize(1,1);
types[OUTPUT][2] = types[REF_OUTPUT][2] = CV_64FC1;
sizes[OUTPUT][3] = sizes[REF_OUTPUT][3] = cvSize(1,1);
types[OUTPUT][3] = types[REF_OUTPUT][3] = CV_8UC1;
}
double CV_EssentialMatTest::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )
{
return 1e-2;
}
void CV_EssentialMatTest::fill_array( int test_case_idx, int i, int j, Mat& arr )
{
double t[12]={0};
RNG& rng = ts->get_rng();
if( i != INPUT )
{
cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr );
return;
}
switch( j )
{
case 0:
case 1:
return; // fill them later in prepare_test_case
case 2:
{
double* p = arr.ptr<double>();
for( i = 0; i < arr.cols*3; i += 3 )
{
p[i] = cvtest::randReal(rng)*cube_size;
p[i+1] = cvtest::randReal(rng)*cube_size;
p[i+2] = cvtest::randReal(rng)*cube_size + cube_size;
}
}
break;
case 3:
{
double r[3];
Mat rot_vec( 3, 1, CV_64F, r );
Mat rot_mat( 3, 3, CV_64F, t, 4*sizeof(t[0]) );
r[0] = cvtest::randReal(rng)*CV_PI*2;
r[1] = cvtest::randReal(rng)*CV_PI*2;
r[2] = cvtest::randReal(rng)*CV_PI*2;
cvtest::Rodrigues( rot_vec, rot_mat );
t[3] = cvtest::randReal(rng)*cube_size;
t[7] = cvtest::randReal(rng)*cube_size;
t[11] = cvtest::randReal(rng)*cube_size;
Mat( 3, 4, CV_64F, t ).convertTo(arr, arr.type());
}
break;
case 4:
t[0] = t[4] = cvtest::randReal(rng)*(max_f - min_f) + min_f;
t[2] = (img_size*0.5 + cvtest::randReal(rng)*4. - 2.)*t[0];
t[5] = (img_size*0.5 + cvtest::randReal(rng)*4. - 2.)*t[4];
t[8] = 1.;
Mat( 3, 3, CV_64F, t ).convertTo( arr, arr.type() );
break;
}
}
int CV_EssentialMatTest::prepare_test_case( int test_case_idx )
{
int code = cvtest::ArrayTest::prepare_test_case( test_case_idx );
if( code > 0 )
{
const Mat& _3d = test_mat[INPUT][2];
RNG& rng = ts->get_rng();
double Idata[] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0 };
Mat I( 3, 4, CV_64F, Idata );
int k;
for( k = 0; k < 2; k++ )
{
const Mat& Rt = k == 0 ? I : test_mat[INPUT][3];
const Mat& A = test_mat[INPUT][4];
Mat& _2d = test_mat[INPUT][k];
test_projectPoints( _3d, Rt, A, _2d, &rng, sigma );
}
}
return code;
}
void CV_EssentialMatTest::run_func()
{
Mat _input0(test_mat[INPUT][0]), _input1(test_mat[INPUT][1]);
Mat K(test_mat[INPUT][4]);
double focal(K.at<double>(0, 0));
cv::Point2d pp(K.at<double>(0, 2), K.at<double>(1, 2));
RNG& rng = ts->get_rng();
Mat E, mask1(test_mat[TEMP][1]);
E = cv::findEssentialMat( _input0, _input1, focal, pp, method, 0.99, MAX(sigma*3, 0.0001), mask1 );
if (E.rows > 3)
{
int count = E.rows / 3;
int row = (cvtest::randInt(rng) % count) * 3;
E = E.rowRange(row, row + 3) * 1.0;
}
E.copyTo(test_mat[TEMP][0]);
Mat R, t, mask2;
recoverPose( E, _input0, _input1, R, t, focal, pp, mask2 );
R.copyTo(test_mat[TEMP][2]);
t.copyTo(test_mat[TEMP][3]);
mask2.copyTo(test_mat[TEMP][4]);
}
double CV_EssentialMatTest::sampson_error(const double * f, double x1, double y1, double x2, double y2)
{
double Fx1[3] = {
f[0] * x1 + f[1] * y1 + f[2],
f[3] * x1 + f[4] * y1 + f[5],
f[6] * x1 + f[7] * y1 + f[8]
};
double Ftx2[3] = {
f[0] * x2 + f[3] * y2 + f[6],
f[1] * x2 + f[4] * y2 + f[7],
f[2] * x2 + f[5] * y2 + f[8]
};
double x2tFx1 = Fx1[0] * x2 + Fx1[1] * y2 + Fx1[2];
double error = x2tFx1 * x2tFx1 / (Fx1[0] * Fx1[0] + Fx1[1] * Fx1[1] + Ftx2[0] * Ftx2[0] + Ftx2[1] * Ftx2[1]);
error = sqrt(error);
return error;
}
void CV_EssentialMatTest::prepare_to_validation( int test_case_idx )
{
const Mat& Rt0 = test_mat[INPUT][3];
const Mat& A = test_mat[INPUT][4];
double f0[9], f[9], e[9];
Mat F0(3, 3, CV_64FC1, f0), F(3, 3, CV_64F, f);
Mat E(3, 3, CV_64F, e);
Mat invA, R=Rt0.colRange(0, 3), T1, T2;
cv::invert(A, invA, CV_SVD);
double tx = Rt0.at<double>(0, 3);
double ty = Rt0.at<double>(1, 3);
double tz = Rt0.at<double>(2, 3);
double _t_x[] = { 0, -tz, ty, tz, 0, -tx, -ty, tx, 0 };
// F = (A2^-T)*[t]_x*R*(A1^-1)
cv::gemm( invA, Mat( 3, 3, CV_64F, _t_x ), 1, Mat(), 0, T1, CV_GEMM_A_T );
cv::gemm( R, invA, 1, Mat(), 0, T2 );
cv::gemm( T1, T2, 1, Mat(), 0, F0 );
F0 *= 1./f0[8];
uchar* status = test_mat[TEMP][1].data;
double err_level = get_success_error_level( test_case_idx, OUTPUT, 1 );
uchar* mtfm1 = test_mat[REF_OUTPUT][1].data;
uchar* mtfm2 = test_mat[OUTPUT][1].data;
double* e_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
double* e_prop2 = (double*)test_mat[OUTPUT][0].data;
Mat E_prop2 = Mat(3, 1, CV_64F, e_prop2);
int i, pt_count = test_mat[INPUT][2].cols;
Mat p1( 1, pt_count, CV_64FC2 );
Mat p2( 1, pt_count, CV_64FC2 );
test_convertHomogeneous( test_mat[INPUT][0], p1 );
test_convertHomogeneous( test_mat[INPUT][1], p2 );
cvtest::convert(test_mat[TEMP][0], E, E.type());
cv::gemm( invA, E, 1, Mat(), 0, T1, CV_GEMM_A_T );
cv::gemm( T1, invA, 1, Mat(), 0, F );
for( i = 0; i < pt_count; i++ )
{
double x1 = p1.at<Point2d>(i).x;
double y1 = p1.at<Point2d>(i).y;
double x2 = p2.at<Point2d>(i).x;
double y2 = p2.at<Point2d>(i).y;
// double t0 = sampson_error(f0, x1, y1, x2, y2);
// double t = sampson_error(f, x1, y1, x2, y2);
double n1 = 1./sqrt(x1*x1 + y1*y1 + 1);
double n2 = 1./sqrt(x2*x2 + y2*y2 + 1);
double t0 = fabs(f0[0]*x2*x1 + f0[1]*x2*y1 + f0[2]*x2 +
f0[3]*y2*x1 + f0[4]*y2*y1 + f0[5]*y2 +
f0[6]*x1 + f0[7]*y1 + f0[8])*n1*n2;
double t = fabs(f[0]*x2*x1 + f[1]*x2*y1 + f[2]*x2 +
f[3]*y2*x1 + f[4]*y2*y1 + f[5]*y2 +
f[6]*x1 + f[7]*y1 + f[8])*n1*n2;
mtfm1[i] = 1;
mtfm2[i] = !status[i] || t0 > err_level || t < err_level;
}
e_prop1[0] = sqrt(0.5);
e_prop1[1] = sqrt(0.5);
e_prop1[2] = 0;
e_prop2[0] = 0;
e_prop2[1] = 0;
e_prop2[2] = 0;
SVD::compute(E, E_prop2);
double* pose_prop1 = (double*)test_mat[REF_OUTPUT][2].data;
double* pose_prop2 = (double*)test_mat[OUTPUT][2].data;
double terr1 = norm(Rt0.col(3) / norm(Rt0.col(3)) + test_mat[TEMP][3]);
double terr2 = norm(Rt0.col(3) / norm(Rt0.col(3)) - test_mat[TEMP][3]);
Mat rvec;
Rodrigues(Rt0.colRange(0, 3), rvec);
pose_prop1[0] = 0;
// No check for CV_LMeDS on translation. Since it
// involves with some degraded problem, when data is exact inliers.
pose_prop2[0] = method == CV_LMEDS || pt_count == 5 ? 0 : MIN(terr1, terr2);
// int inliers_count = countNonZero(test_mat[TEMP][1]);
// int good_count = countNonZero(test_mat[TEMP][4]);
test_mat[OUTPUT][3] = true; //good_count >= inliers_count / 2;
test_mat[REF_OUTPUT][3] = true;
}
/********************************** convert homogeneous *********************************/
@@ -1359,6 +1720,6 @@ TEST(Calib3d_Rodrigues, accuracy) { CV_RodriguesTest test; test.safe_run(); }
TEST(Calib3d_FindFundamentalMat, accuracy) { CV_FundamentalMatTest test; test.safe_run(); }
TEST(Calib3d_ConvertHomogeneoous, accuracy) { CV_ConvertHomogeneousTest test; test.safe_run(); }
TEST(Calib3d_ComputeEpilines, accuracy) { CV_ComputeEpilinesTest test; test.safe_run(); }
TEST(Calib3d_FindEssentialMat, accuracy) { CV_EssentialMatTest test; test.safe_run(); }
/* End of file. */

View File

@@ -65,7 +65,7 @@
#define METHODS_COUNT 3
int NORM_TYPE[COUNT_NORM_TYPES] = {cv::NORM_L1, cv::NORM_L2, cv::NORM_INF};
int METHOD[METHODS_COUNT] = {0, CV_RANSAC, CV_LMEDS};
int METHOD[METHODS_COUNT] = {0, cv::RANSAC, cv::LMEDS};
using namespace cv;
using namespace std;
@@ -309,7 +309,7 @@ void CV_HomographyTest::run(int)
switch (method)
{
case 0:
case CV_LMEDS:
case LMEDS:
{
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, method),
cv::findHomography(src_mat_2f, dst_vec, method),
@@ -339,14 +339,14 @@ void CV_HomographyTest::run(int)
continue;
}
case CV_RANSAC:
case RANSAC:
{
cv::Mat mask [4]; double diff;
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, CV_RANSAC, reproj_threshold, mask[0]),
cv::findHomography(src_mat_2f, dst_vec, CV_RANSAC, reproj_threshold, mask[1]),
cv::findHomography(src_vec, dst_mat_2f, CV_RANSAC, reproj_threshold, mask[2]),
cv::findHomography(src_vec, dst_vec, CV_RANSAC, reproj_threshold, mask[3]) };
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask[0]),
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask[1]),
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask[2]),
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask[3]) };
for (int j = 0; j < 4; ++j)
{
@@ -411,7 +411,7 @@ void CV_HomographyTest::run(int)
switch (method)
{
case 0:
case CV_LMEDS:
case LMEDS:
{
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f),
cv::findHomography(src_mat_2f, dst_vec),
@@ -466,14 +466,14 @@ void CV_HomographyTest::run(int)
continue;
}
case CV_RANSAC:
case RANSAC:
{
cv::Mat mask_res [4];
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, CV_RANSAC, reproj_threshold, mask_res[0]),
cv::findHomography(src_mat_2f, dst_vec, CV_RANSAC, reproj_threshold, mask_res[1]),
cv::findHomography(src_vec, dst_mat_2f, CV_RANSAC, reproj_threshold, mask_res[2]),
cv::findHomography(src_vec, dst_vec, CV_RANSAC, reproj_threshold, mask_res[3]) };
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f, RANSAC, reproj_threshold, mask_res[0]),
cv::findHomography(src_mat_2f, dst_vec, RANSAC, reproj_threshold, mask_res[1]),
cv::findHomography(src_vec, dst_mat_2f, RANSAC, reproj_threshold, mask_res[2]),
cv::findHomography(src_vec, dst_vec, RANSAC, reproj_threshold, mask_res[3]) };
for (int j = 0; j < 4; ++j)
{

View File

@@ -0,0 +1,232 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#if 0
#include "_modelest.h"
using namespace std;
using namespace cv;
class BareModelEstimator : public CvModelEstimator2
{
public:
BareModelEstimator(int modelPoints, CvSize modelSize, int maxBasicSolutions);
virtual int runKernel( const CvMat*, const CvMat*, CvMat* );
virtual void computeReprojError( const CvMat*, const CvMat*,
const CvMat*, CvMat* );
bool checkSubsetPublic( const CvMat* ms1, int count, bool checkPartialSubset );
};
BareModelEstimator::BareModelEstimator(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions)
:CvModelEstimator2(_modelPoints, _modelSize, _maxBasicSolutions)
{
}
int BareModelEstimator::runKernel( const CvMat*, const CvMat*, CvMat* )
{
return 0;
}
void BareModelEstimator::computeReprojError( const CvMat*, const CvMat*,
const CvMat*, CvMat* )
{
}
bool BareModelEstimator::checkSubsetPublic( const CvMat* ms1, int count, bool checkPartialSubset )
{
checkPartialSubsets = checkPartialSubset;
return checkSubset(ms1, count);
}
class CV_ModelEstimator2_Test : public cvtest::ArrayTest
{
public:
CV_ModelEstimator2_Test();
protected:
void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
void fill_array( int test_case_idx, int i, int j, Mat& arr );
double get_success_error_level( int test_case_idx, int i, int j );
void run_func();
void prepare_to_validation( int test_case_idx );
bool checkPartialSubsets;
int usedPointsCount;
bool checkSubsetResult;
int generalPositionsCount;
int maxPointsCount;
};
CV_ModelEstimator2_Test::CV_ModelEstimator2_Test()
{
generalPositionsCount = get_test_case_count() / 2;
maxPointsCount = 100;
test_array[INPUT].push_back(NULL);
test_array[OUTPUT].push_back(NULL);
test_array[REF_OUTPUT].push_back(NULL);
}
void CV_ModelEstimator2_Test::get_test_array_types_and_sizes( int /*test_case_idx*/,
vector<vector<Size> > &sizes, vector<vector<int> > &types )
{
RNG &rng = ts->get_rng();
checkPartialSubsets = (cvtest::randInt(rng) % 2 == 0);
int pointsCount = cvtest::randInt(rng) % maxPointsCount;
usedPointsCount = pointsCount == 0 ? 0 : cvtest::randInt(rng) % pointsCount;
sizes[INPUT][0] = cvSize(1, pointsCount);
types[INPUT][0] = CV_64FC2;
sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(1, 1);
types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_8UC1;
}
void CV_ModelEstimator2_Test::fill_array( int test_case_idx, int i, int j, Mat& arr )
{
if( i != INPUT )
{
cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr );
return;
}
if (test_case_idx < generalPositionsCount)
{
//generate points in a general position (i.e. no three points can lie on the same line.)
bool isGeneralPosition;
do
{
ArrayTest::fill_array(test_case_idx, i, j, arr);
//a simple check that the position is general:
// for each line check that all other points don't belong to it
isGeneralPosition = true;
for (int startPointIndex = 0; startPointIndex < usedPointsCount && isGeneralPosition; startPointIndex++)
{
for (int endPointIndex = startPointIndex + 1; endPointIndex < usedPointsCount && isGeneralPosition; endPointIndex++)
{
for (int testPointIndex = 0; testPointIndex < usedPointsCount && isGeneralPosition; testPointIndex++)
{
if (testPointIndex == startPointIndex || testPointIndex == endPointIndex)
{
continue;
}
CV_Assert(arr.type() == CV_64FC2);
Point2d tangentVector_1 = arr.at<Point2d>(endPointIndex) - arr.at<Point2d>(startPointIndex);
Point2d tangentVector_2 = arr.at<Point2d>(testPointIndex) - arr.at<Point2d>(startPointIndex);
const float eps = 1e-4f;
//TODO: perhaps it is better to normalize the cross product by norms of the tangent vectors
if (fabs(tangentVector_1.cross(tangentVector_2)) < eps)
{
isGeneralPosition = false;
}
}
}
}
}
while(!isGeneralPosition);
}
else
{
//create points in a degenerate position (there are at least 3 points belonging to the same line)
ArrayTest::fill_array(test_case_idx, i, j, arr);
if (usedPointsCount <= 2)
{
return;
}
RNG &rng = ts->get_rng();
int startPointIndex, endPointIndex, modifiedPointIndex;
do
{
startPointIndex = cvtest::randInt(rng) % usedPointsCount;
endPointIndex = cvtest::randInt(rng) % usedPointsCount;
modifiedPointIndex = checkPartialSubsets ? usedPointsCount - 1 : cvtest::randInt(rng) % usedPointsCount;
}
while (startPointIndex == endPointIndex || startPointIndex == modifiedPointIndex || endPointIndex == modifiedPointIndex);
double startWeight = cvtest::randReal(rng);
CV_Assert(arr.type() == CV_64FC2);
arr.at<Point2d>(modifiedPointIndex) = startWeight * arr.at<Point2d>(startPointIndex) + (1.0 - startWeight) * arr.at<Point2d>(endPointIndex);
}
}
double CV_ModelEstimator2_Test::get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ )
{
return 0;
}
void CV_ModelEstimator2_Test::prepare_to_validation( int test_case_idx )
{
test_mat[OUTPUT][0].at<uchar>(0) = checkSubsetResult;
test_mat[REF_OUTPUT][0].at<uchar>(0) = test_case_idx < generalPositionsCount || usedPointsCount <= 2;
}
void CV_ModelEstimator2_Test::run_func()
{
//make the input continuous
Mat input = test_mat[INPUT][0].clone();
CvMat _input = input;
RNG &rng = ts->get_rng();
int modelPoints = cvtest::randInt(rng);
CvSize modelSize = cvSize(2, modelPoints);
int maxBasicSolutions = cvtest::randInt(rng);
BareModelEstimator modelEstimator(modelPoints, modelSize, maxBasicSolutions);
checkSubsetResult = modelEstimator.checkSubsetPublic(&_input, usedPointsCount, checkPartialSubsets);
}
TEST(Calib3d_ModelEstimator2, accuracy) { CV_ModelEstimator2_Test test; test.safe_run(); }
#endif

View File

@@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
using namespace cv;
using namespace std;
@@ -189,13 +190,13 @@ void CV_POSITTest::run( int start_from )
rotation->data.fl, translation->data.fl );
cvReleasePOSITObject( &object );
//Mat _rotation = cvarrToMat(rotation), _true_rotation = cvarrToMat(true_rotation);
//Mat _translation = cvarrToMat(translation), _true_translation = cvarrToMat(true_translation);
code = cvtest::cmpEps2( ts, rotation, true_rotation, flEpsilon, false, "rotation matrix" );
Mat _rotation = cvarrToMat(rotation), _true_rotation = cvarrToMat(true_rotation);
Mat _translation = cvarrToMat(translation), _true_translation = cvarrToMat(true_translation);
code = cvtest::cmpEps2( ts, _rotation, _true_rotation, flEpsilon, false, "rotation matrix" );
if( code < 0 )
break;
code = cvtest::cmpEps2( ts, translation, true_translation, flEpsilon, false, "translation vector" );
code = cvtest::cmpEps2( ts, _translation, _true_translation, flEpsilon, false, "translation vector" );
if( code < 0 )
break;
}

View File

@@ -1,17 +1,19 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# pragma GCC diagnostic ignored "-Wmissing-prototypes" //OSX
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts/ts.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/highgui.hpp"
namespace cvtest
{

View File

@@ -41,6 +41,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <string>
#include <limits>

View File

@@ -41,7 +41,10 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/core/internal.hpp"
#ifdef HAVE_TBB
#include "tbb/task_scheduler_init.h"
#endif
using namespace cv;
using namespace std;
@@ -51,9 +54,9 @@ class CV_solvePnPRansac_Test : public cvtest::BaseTest
public:
CV_solvePnPRansac_Test()
{
eps[CV_ITERATIVE] = 1.0e-2;
eps[CV_EPNP] = 1.0e-2;
eps[CV_P3P] = 1.0e-2;
eps[ITERATIVE] = 1.0e-2;
eps[EPNP] = 1.0e-2;
eps[P3P] = 1.0e-2;
totalTestsCount = 10;
}
~CV_solvePnPRansac_Test() {}
@@ -190,9 +193,9 @@ class CV_solvePnP_Test : public CV_solvePnPRansac_Test
public:
CV_solvePnP_Test()
{
eps[CV_ITERATIVE] = 1.0e-6;
eps[CV_EPNP] = 1.0e-6;
eps[CV_P3P] = 1.0e-4;
eps[ITERATIVE] = 1.0e-6;
eps[EPNP] = 1.0e-6;
eps[P3P] = 1.0e-4;
totalTestsCount = 1000;
}
@@ -236,7 +239,7 @@ protected:
}
};
TEST(Calib3d_SolvePnPRansac, accuracy) { CV_solvePnPRansac_Test test; test.safe_run(); }
TEST(DISABLED_Calib3d_SolvePnPRansac, accuracy) { CV_solvePnPRansac_Test test; test.safe_run(); }
TEST(Calib3d_SolvePnP, accuracy) { CV_solvePnP_Test test; test.safe_run(); }

View File

@@ -48,6 +48,7 @@
#include "test_precomp.hpp"
#include <limits>
#include <cstdio>
#include <map>
using namespace std;
using namespace cv;
@@ -74,12 +75,12 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
if( !texturelessMask && !texturedMask )
return;
if( _img.empty() )
CV_Error( CV_StsBadArg, "img is empty" );
CV_Error( Error::StsBadArg, "img is empty" );
Mat img = _img;
if( _img.channels() > 1)
{
Mat tmp; cvtColor( _img, tmp, CV_BGR2GRAY ); img = tmp;
Mat tmp; cvtColor( _img, tmp, COLOR_BGR2GRAY ); img = tmp;
}
Mat dxI; Sobel( img, dxI, CV_32FC1, 1, 0, 3 );
Mat dxI2; pow( dxI / 8.f/*normalize*/, 2, dxI2 );
@@ -94,21 +95,21 @@ void computeTextureBasedMasks( const Mat& _img, Mat* texturelessMask, Mat* textu
void checkTypeAndSizeOfDisp( const Mat& dispMap, const Size* sz )
{
if( dispMap.empty() )
CV_Error( CV_StsBadArg, "dispMap is empty" );
CV_Error( Error::StsBadArg, "dispMap is empty" );
if( dispMap.type() != CV_32FC1 )
CV_Error( CV_StsBadArg, "dispMap must have CV_32FC1 type" );
CV_Error( Error::StsBadArg, "dispMap must have CV_32FC1 type" );
if( sz && (dispMap.rows != sz->height || dispMap.cols != sz->width) )
CV_Error( CV_StsBadArg, "dispMap has incorrect size" );
CV_Error( Error::StsBadArg, "dispMap has incorrect size" );
}
void checkTypeAndSizeOfMask( const Mat& mask, Size sz )
{
if( mask.empty() )
CV_Error( CV_StsBadArg, "mask is empty" );
CV_Error( Error::StsBadArg, "mask is empty" );
if( mask.type() != CV_8UC1 )
CV_Error( CV_StsBadArg, "mask must have CV_8UC1 type" );
CV_Error( Error::StsBadArg, "mask must have CV_8UC1 type" );
if( mask.rows != sz.height || mask.cols != sz.width )
CV_Error( CV_StsBadArg, "mask has incorrect size" );
CV_Error( Error::StsBadArg, "mask has incorrect size" );
}
void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDispMap,
@@ -142,7 +143,7 @@ void checkDispMapsAndUnknDispMasks( const Mat& leftDispMap, const Mat& rightDisp
minMaxLoc( rightDispMap, &rightMinVal, 0, 0, 0, ~rightUnknDispMask );
}
if( leftMinVal < 0 || rightMinVal < 0)
CV_Error( CV_StsBadArg, "known disparity values must be positive" );
CV_Error( Error::StsBadArg, "known disparity values must be positive" );
}
/*
@@ -162,7 +163,7 @@ void computeOcclusionBasedMasks( const Mat& leftDisp, const Mat& _rightDisp,
if( _rightDisp.empty() )
{
if( !rightUnknDispMask.empty() )
CV_Error( CV_StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
CV_Error( Error::StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
rightDisp.create(leftDisp.size(), CV_32FC1);
rightDisp.setTo(Scalar::all(0) );
for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
@@ -229,9 +230,9 @@ void computeDepthDiscontMask( const Mat& disp, Mat& depthDiscontMask, const Mat&
float dispGap = EVAL_DISP_GAP, int discontWidth = EVAL_DISCONT_WIDTH )
{
if( disp.empty() )
CV_Error( CV_StsBadArg, "disp is empty" );
CV_Error( Error::StsBadArg, "disp is empty" );
if( disp.type() != CV_32FC1 )
CV_Error( CV_StsBadArg, "disp must have CV_32FC1 type" );
CV_Error( Error::StsBadArg, "disp must have CV_32FC1 type" );
if( !unknDispMask.empty() )
checkTypeAndSizeOfMask( unknDispMask, disp.size() );
@@ -459,14 +460,29 @@ void CV_StereoMatchingTest::run(int)
continue;
}
int dispScaleFactor = datasetsParams[datasetName].dispScaleFactor;
Mat tmp; trueLeftDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor ); trueLeftDisp = tmp; tmp.release();
Mat tmp;
trueLeftDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor );
trueLeftDisp = tmp;
tmp.release();
if( !trueRightDisp.empty() )
trueRightDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor ); trueRightDisp = tmp; tmp.release();
{
trueRightDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor );
trueRightDisp = tmp;
tmp.release();
}
Mat leftDisp, rightDisp;
int ignBorder = max(runStereoMatchingAlgorithm(leftImg, rightImg, leftDisp, rightDisp, ci), EVAL_IGNORE_BORDER);
leftDisp.convertTo( tmp, CV_32FC1 ); leftDisp = tmp; tmp.release();
rightDisp.convertTo( tmp, CV_32FC1 ); rightDisp = tmp; tmp.release();
leftDisp.convertTo( tmp, CV_32FC1 );
leftDisp = tmp;
tmp.release();
rightDisp.convertTo( tmp, CV_32FC1 );
rightDisp = tmp;
tmp.release();
int tempCode = processStereoMatchingResults( resFS, ci, isWrite,
leftImg, rightImg, trueLeftDisp, trueRightDisp, leftDisp, rightDisp, QualityEvalParams(ignBorder));
@@ -530,7 +546,8 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
// rightDisp is not used in current test virsion
int code = cvtest::TS::OK;
assert( fs.isOpened() );
assert( trueLeftDisp.type() == CV_32FC1 && trueRightDisp.type() == CV_32FC1 );
assert( trueLeftDisp.type() == CV_32FC1 );
assert( trueRightDisp.empty() || trueRightDisp.type() == CV_32FC1 );
assert( leftDisp.type() == CV_32FC1 && rightDisp.type() == CV_32FC1 );
// get masks for unknown ground truth disparity values
@@ -554,9 +571,9 @@ int CV_StereoMatchingTest::processStereoMatchingResults( FileStorage& fs, int ca
if( isWrite )
{
fs << caseNames[caseIdx] << "{";
cvWriteComment( fs.fs, RMS_STR.c_str(), 0 );
//cvWriteComment( fs.fs, RMS_STR.c_str(), 0 );
writeErrors( RMS_STR, rmss, &fs );
cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
//cvWriteComment( fs.fs, BAD_PXLS_FRACTION_STR.c_str(), 0 );
writeErrors( BAD_PXLS_FRACTION_STR, badPxlsFractions, &fs );
fs << "}"; // datasetName
}
@@ -593,10 +610,10 @@ int CV_StereoMatchingTest::readDatasetsParams( FileStorage& fs )
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=3 )
{
string _name = fn[i];
String _name = fn[i];
DatasetParams params;
string sf = fn[i+1]; params.dispScaleFactor = atoi(sf.c_str());
string uv = fn[i+2]; params.dispUnknVal = atoi(uv.c_str());
String sf = fn[i+1]; params.dispScaleFactor = atoi(sf.c_str());
String uv = fn[i+2]; params.dispUnknVal = atoi(uv.c_str());
datasetsParams[_name] = params;
}
return cvtest::TS::OK;
@@ -680,10 +697,10 @@ protected:
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=4 )
{
string caseName = fn[i], datasetName = fn[i+1];
String caseName = fn[i], datasetName = fn[i+1];
RunParams params;
string ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
string winSize = fn[i+3]; params.winSize = atoi(winSize.c_str());
String ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
String winSize = fn[i+3]; params.winSize = atoi(winSize.c_str());
caseNames.push_back( caseName );
caseDatasets.push_back( datasetName );
caseRunParams.push_back( params );
@@ -697,11 +714,13 @@ protected:
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
assert( _leftImg.type() == CV_8UC3 && _rightImg.type() == CV_8UC3 );
Mat leftImg; cvtColor( _leftImg, leftImg, CV_BGR2GRAY );
Mat rightImg; cvtColor( _rightImg, rightImg, CV_BGR2GRAY );
Mat leftImg; cvtColor( _leftImg, leftImg, COLOR_BGR2GRAY );
Mat rightImg; cvtColor( _rightImg, rightImg, COLOR_BGR2GRAY );
StereoBM bm( StereoBM::BASIC_PRESET, params.ndisp, params.winSize );
bm( leftImg, rightImg, leftDisp, CV_32F );
Ptr<StereoBM> bm = createStereoBM( params.ndisp, params.winSize );
Mat tempDisp;
bm->compute( leftImg, rightImg, tempDisp );
tempDisp.convertTo(leftDisp, CV_32F, 1./StereoMatcher::DISP_SCALE);
return params.winSize/2;
}
};
@@ -734,11 +753,11 @@ protected:
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=5 )
{
string caseName = fn[i], datasetName = fn[i+1];
String caseName = fn[i], datasetName = fn[i+1];
RunParams params;
string ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
string winSize = fn[i+3]; params.winSize = atoi(winSize.c_str());
string fullDP = fn[i+4]; params.fullDP = atoi(fullDP.c_str()) == 0 ? false : true;
String ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str());
String winSize = fn[i+3]; params.winSize = atoi(winSize.c_str());
String fullDP = fn[i+4]; params.fullDP = atoi(fullDP.c_str()) == 0 ? false : true;
caseNames.push_back( caseName );
caseDatasets.push_back( datasetName );
caseRunParams.push_back( params );
@@ -751,10 +770,13 @@ protected:
{
RunParams params = caseRunParams[caseIdx];
assert( params.ndisp%16 == 0 );
StereoSGBM sgbm( 0, params.ndisp, params.winSize, 10*params.winSize*params.winSize, 40*params.winSize*params.winSize,
1, 63, 10, 100, 32, params.fullDP );
sgbm( leftImg, rightImg, leftDisp );
assert( leftDisp.type() == CV_16SC1 );
Ptr<StereoSGBM> sgbm = createStereoSGBM( 0, params.ndisp, params.winSize,
10*params.winSize*params.winSize,
40*params.winSize*params.winSize,
1, 63, 10, 100, 32, params.fullDP ?
StereoSGBM::MODE_HH : StereoSGBM::MODE_SGBM );
sgbm->compute( leftImg, rightImg, leftDisp );
CV_Assert( leftDisp.type() == CV_16SC1 );
leftDisp/=16;
return 0;
}

View File

@@ -41,6 +41,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
using namespace cv;
using namespace std;
@@ -491,7 +492,7 @@ void CV_UndistortPointsTest::distortPoints(const CvMat* _src, CvMat* _dst, const
__P = cvCreateMat(3,4,CV_64F);
if (matP)
{
cvTsConvert(matP,__P);
cvtest::convert(cvarrToMat(matP), cvarrToMat(__P), -1);
}
else
{
@@ -500,7 +501,7 @@ void CV_UndistortPointsTest::distortPoints(const CvMat* _src, CvMat* _dst, const
__P->data.db[4] = 1;
__P->data.db[8] = 1;
}
CvMat* __R = cvCreateMat(3,3,CV_64F);;
CvMat* __R = cvCreateMat(3,3,CV_64F);
if (matR)
{
cvCopy(matR,__R);
@@ -847,16 +848,16 @@ void CV_InitUndistortRectifyMapTest::prepare_to_validation(int/* test_case_idx*/
CvMat _input3 = test_mat[INPUT][3];
CvMat _input4 = test_mat[INPUT][4];
cvTsConvert(&_input1,&_camera);
cvTsConvert(&_input2,&_distort);
cvTsConvert(&_input3,&_rot);
cvTsConvert(&_input4,&_new_cam);
cvtest::convert(cvarrToMat(&_input1), cvarrToMat(&_camera), -1);
cvtest::convert(cvarrToMat(&_input2), cvarrToMat(&_distort), -1);
cvtest::convert(cvarrToMat(&_input3), cvarrToMat(&_rot), -1);
cvtest::convert(cvarrToMat(&_input4), cvarrToMat(&_new_cam), -1);
//Applying precalculated undistort rectify map
if (!useCPlus)
{
mapx = cv::Mat(_mapx);
mapy = cv::Mat(_mapy);
mapx = cv::cvarrToMat(_mapx);
mapy = cv::cvarrToMat(_mapy);
}
cv::Mat map1,map2;
cv::convertMaps(mapx,mapy,map1,map2,CV_32FC1);
@@ -876,7 +877,7 @@ void CV_InitUndistortRectifyMapTest::prepare_to_validation(int/* test_case_idx*/
zero_distortion ? 0 : &_distort, zero_R ? 0 : &_rot, zero_new_cam ? &_camera : &_new_cam);
//cvTsDistortPoints(&_points,&ref_points,&_camera,&_distort,&_rot,&_new_cam);
CvMat dst = test_mat[REF_OUTPUT][0];
cvTsConvert(&ref_points,&dst);
cvtest::convert(cvarrToMat(&ref_points), cvarrToMat(&dst), -1);
cvtest::copy(test_mat[INPUT][0],test_mat[OUTPUT][0]);

View File

@@ -40,6 +40,7 @@
//M*/
#include "test_precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
class CV_UndistortPointsBadArgTest : public cvtest::BadArgTest
{
@@ -243,27 +244,27 @@ void CV_UndistortPointsBadArgTest::run(int)
//C++ tests
useCPlus = true;
camera_mat = cv::Mat(&_camera_mat_orig);
distortion_coeffs = cv::Mat(&_distortion_coeffs_orig);
P = cv::Mat(&_P_orig);
R = cv::Mat(&_R_orig);
src_points = cv::Mat(&_src_points_orig);
camera_mat = cv::cvarrToMat(&_camera_mat_orig);
distortion_coeffs = cv::cvarrToMat(&_distortion_coeffs_orig);
P = cv::cvarrToMat(&_P_orig);
R = cv::cvarrToMat(&_R_orig);
src_points = cv::cvarrToMat(&_src_points_orig);
temp = cvCreateMat(2,2,CV_32FC2);
src_points = cv::Mat(temp);
src_points = cv::cvarrToMat(temp);
errcount += run_test_case( CV_StsAssert, "Invalid input data matrix size" );
src_points = cv::Mat(&_src_points_orig);
src_points = cv::cvarrToMat(&_src_points_orig);
cvReleaseMat(&temp);
temp = cvCreateMat(1,4,CV_64FC2);
src_points = cv::Mat(temp);
src_points = cv::cvarrToMat(temp);
errcount += run_test_case( CV_StsAssert, "Invalid input data matrix type" );
src_points = cv::Mat(&_src_points_orig);
src_points = cv::cvarrToMat(&_src_points_orig);
cvReleaseMat(&temp);
src_points = cv::Mat();
errcount += run_test_case( CV_StsAssert, "Input data matrix is not continuous" );
src_points = cv::Mat(&_src_points_orig);
src_points = cv::cvarrToMat(&_src_points_orig);
cvReleaseMat(&temp);
@@ -360,12 +361,12 @@ void CV_InitUndistortRectifyMapBadArgTest::run(int)
//C++ tests
useCPlus = true;
camera_mat = cv::Mat(&_camera_mat_orig);
distortion_coeffs = cv::Mat(&_distortion_coeffs_orig);
new_camera_mat = cv::Mat(&_new_camera_mat_orig);
R = cv::Mat(&_R_orig);
mapx = cv::Mat(&_mapx_orig);
mapy = cv::Mat(&_mapy_orig);
camera_mat = cv::cvarrToMat(&_camera_mat_orig);
distortion_coeffs = cv::cvarrToMat(&_distortion_coeffs_orig);
new_camera_mat = cv::cvarrToMat(&_new_camera_mat_orig);
R = cv::cvarrToMat(&_R_orig);
mapx = cv::cvarrToMat(&_mapx_orig);
mapy = cv::cvarrToMat(&_mapy_orig);
mat_type = CV_64F;
@@ -373,21 +374,21 @@ void CV_InitUndistortRectifyMapBadArgTest::run(int)
mat_type = mat_type_orig;
temp = cvCreateMat(3,2,CV_32FC1);
camera_mat = cv::Mat(temp);
camera_mat = cv::cvarrToMat(temp);
errcount += run_test_case( CV_StsAssert, "Invalid camera data matrix size" );
camera_mat = cv::Mat(&_camera_mat_orig);
camera_mat = cv::cvarrToMat(&_camera_mat_orig);
cvReleaseMat(&temp);
temp = cvCreateMat(4,3,CV_32FC1);
R = cv::Mat(temp);
R = cv::cvarrToMat(temp);
errcount += run_test_case( CV_StsAssert, "Invalid R data matrix size" );
R = cv::Mat(&_R_orig);
R = cv::cvarrToMat(&_R_orig);
cvReleaseMat(&temp);
temp = cvCreateMat(6,1,CV_32FC1);
distortion_coeffs = cv::Mat(temp);
distortion_coeffs = cv::cvarrToMat(temp);
errcount += run_test_case( CV_StsAssert, "Invalid distortion coefficients data matrix size" );
distortion_coeffs = cv::Mat(&_distortion_coeffs_orig);
distortion_coeffs = cv::cvarrToMat(&_distortion_coeffs_orig);
cvReleaseMat(&temp);
//------------
@@ -499,11 +500,11 @@ void CV_UndistortBadArgTest::run(int)
//C++ tests
useCPlus = true;
camera_mat = cv::Mat(&_camera_mat_orig);
distortion_coeffs = cv::Mat(&_distortion_coeffs_orig);
new_camera_mat = cv::Mat(&_new_camera_mat_orig);
src = cv::Mat(&_src_orig);
dst = cv::Mat(&_dst_orig);
camera_mat = cv::cvarrToMat(&_camera_mat_orig);
distortion_coeffs = cv::cvarrToMat(&_distortion_coeffs_orig);
new_camera_mat = cv::cvarrToMat(&_new_camera_mat_orig);
src = cv::cvarrToMat(&_src_orig);
dst = cv::cvarrToMat(&_dst_orig);
//------------
delete[] arr_src;

View File

@@ -1 +1 @@
ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_features2d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui)
ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui)

View File

@@ -10,3 +10,4 @@ The module contains some recently added functionality that has not been stabiliz
stereo
FaceRecognizer Documentation <facerec/index>
Retina Documentation <retina/index>
openfabmap

View File

@@ -42,18 +42,18 @@ In OpenCV 2.4 you only need :ocv:func:`applyColorMap` to apply a colormap on a g
.. code-block:: cpp
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/contrib.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
int main(int argc, const char *argv[]) {
// Get the path to the image, if it was given
// if no arguments were given.
string filename;
String filename;
if (argc > 1) {
filename = string(argv[1]);
filename = String(argv[1]);
}
// The following lines show how to apply a colormap on a given image
// and show it with cv::imshow example with an image. An exception is

View File

@@ -30,10 +30,10 @@ a unified access to all face recongition algorithms in OpenCV. ::
virtual void predict(InputArray src, int &label, double &confidence) const = 0;
// Serializes this object to a given filename.
virtual void save(const string& filename) const;
virtual void save(const String& filename) const;
// Deserializes this object from a given filename.
virtual void load(const string& filename);
virtual void load(const String& filename);
// Serializes this object to a given cv::FileStorage.
virtual void save(FileStorage& fs) const = 0;
@@ -52,7 +52,7 @@ I'll go a bit more into detail explaining :ocv:class:`FaceRecognizer`, because i
* So called “virtual constructor”. That is, each Algorithm derivative is registered at program start and you can get the list of registered algorithms and create instance of a particular algorithm by its name (see :ocv:func:`Algorithm::create`). If you plan to add your own algorithms, it is good practice to add a unique prefix to your algorithms to distinguish them from other algorithms.
* Setting/Retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with :ocv:cfunc:`cvSetCaptureProperty`, :ocv:cfunc:`cvGetCaptureProperty`, :ocv:func:`VideoCapture::set` and :ocv:func:`VideoCapture::get`. :ocv:class:`Algorithm` provides similar method where instead of integer id's you specify the parameter names as text strings. See :ocv:func:`Algorithm::set` and :ocv:func:`Algorithm::get` for details.
* Setting/Retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with :ocv:cfunc:`cvSetCaptureProperty`, :ocv:cfunc:`cvGetCaptureProperty`, :ocv:func:`VideoCapture::set` and :ocv:func:`VideoCapture::get`. :ocv:class:`Algorithm` provides similar method where instead of integer id's you specify the parameter names as text Strings. See :ocv:func:`Algorithm::set` and :ocv:func:`Algorithm::get` for details.
* Reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store all its parameters and then read them back. There is no need to re-implement it each time.
@@ -113,7 +113,7 @@ Since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm`, you can use
// Create a FaceRecognizer:
Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
// And here's how to get its name:
std::string name = model->name();
String name = model->name();
FaceRecognizer::train
@@ -251,7 +251,7 @@ FaceRecognizer::save
Saves a :ocv:class:`FaceRecognizer` and its model state.
.. ocv:function:: void FaceRecognizer::save(const string& filename) const
.. ocv:function:: void FaceRecognizer::save(const String& filename) const
Saves this model to a given filename, either as XML or YAML.
@@ -265,7 +265,7 @@ Saves a :ocv:class:`FaceRecognizer` and its model state.
Every :ocv:class:`FaceRecognizer` overwrites ``FaceRecognizer::save(FileStorage& fs)``
to save the internal model state. ``FaceRecognizer::save(const string& filename)`` saves
to save the internal model state. ``FaceRecognizer::save(const String& filename)`` saves
the state of a model to the given filename.
The suffix ``const`` means that prediction does not affect the internal model
@@ -276,13 +276,13 @@ FaceRecognizer::load
Loads a :ocv:class:`FaceRecognizer` and its model state.
.. ocv:function:: void FaceRecognizer::load( const string& filename )
.. ocv:function:: void FaceRecognizer::load( const String& filename )
.. ocv:function:: void FaceRecognizer::load( const FileStorage& fs ) = 0
Loads a persisted model and state from a given XML or YAML file . Every
:ocv:class:`FaceRecognizer` has to overwrite ``FaceRecognizer::load(FileStorage& fs)``
to enable loading the model state. ``FaceRecognizer::load(FileStorage& fs)`` in
turn gets called by ``FaceRecognizer::load(const string& filename)``, to ease
turn gets called by ``FaceRecognizer::load(const String& filename)``, to ease
saving a model.
createEigenFaceRecognizer

View File

@@ -7,7 +7,7 @@ Face Recognition with OpenCV
Introduction
============
`OpenCV (Open Source Computer Vision) <http://opencv.willowgarage.com>`_ is a popular computer vision library started by `Intel <http://www.intel.com>`_ in 1999. The cross-platform library sets its focus on real-time image processing and includes patent-free implementations of the latest computer vision algorithms. In 2008 `Willow Garage <http://www.willowgarage.com>`_ took over support and OpenCV 2.3.1 now comes with a programming interface to C, C++, `Python <http://www.python.org>`_ and `Android <http://www.android.com>`_. OpenCV is released under a BSD license so it is used in academic projects and commercial products alike.
`OpenCV (Open Source Computer Vision) <http://opencv.org>`_ is a popular computer vision library started by `Intel <http://www.intel.com>`_ in 1999. The cross-platform library sets its focus on real-time image processing and includes patent-free implementations of the latest computer vision algorithms. In 2008 `Willow Garage <http://www.willowgarage.com>`_ took over support and OpenCV 2.3.1 now comes with a programming interface to C, C++, `Python <http://www.python.org>`_ and `Android <http://www.android.com>`_. OpenCV is released under a BSD license so it is used in academic projects and commercial products alike.
OpenCV 2.4 now comes with the very new :ocv:class:`FaceRecognizer` class for face recognition, so you can start experimenting with face recognition right away. This document is the guide I've wished for, when I was working myself into face recognition. It shows you how to perform face recognition with :ocv:class:`FaceRecognizer` in OpenCV (with full source code listings) and gives you an introduction into the algorithms behind. I'll also show how to create the visualizations you can find in many publications, because a lot of people asked for.

View File

@@ -6,7 +6,7 @@ project(facerec_cpp_samples)
#SET(OpenCV_DIR /path/to/your/opencv/installation)
# packages
find_package(OpenCV REQUIRED) # http://opencv.willowgarage.com
find_package(OpenCV REQUIRED) # http://opencv.org
# probably you should loop through the sample files here
add_executable(facerec_demo facerec_demo.cpp)

View File

@@ -1,3 +1,5 @@
#!/usr/bin/env python
import sys
import os.path

View File

@@ -16,9 +16,9 @@
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>

View File

@@ -16,9 +16,9 @@
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <fstream>

View File

@@ -16,9 +16,9 @@
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <fstream>

View File

@@ -16,9 +16,9 @@
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <fstream>

View File

@@ -16,9 +16,9 @@
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib.hpp"
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <fstream>

View File

@@ -16,11 +16,11 @@
* See <http://www.opensource.org/licenses/bsd-license>
*/
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/core.hpp"
#include "opencv2/contrib.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect.hpp"
#include <iostream>
#include <fstream>

View File

@@ -1,4 +1,4 @@
openFABMAP
OpenFABMAP
========================================
.. highlight:: cpp

View File

@@ -16,7 +16,7 @@ Class which provides the main controls to the Gipsa/Listic labs human retina mo
**NOTE : See the Retina tutorial in the tutorial/contrib section for complementary explanations.**
The retina can be settled up with various parameters, by default, the retina cancels mean luminance and enforces all details of the visual scene. In order to use your own parameters, you can use at least one time the *write(std::string fs)* method which will write a proper XML file with all default parameters. Then, tweak it on your own and reload them at any time using method *setup(std::string fs)*. These methods update a *Retina::RetinaParameters* member structure that is described hereafter. ::
The retina can be settled up with various parameters, by default, the retina cancels mean luminance and enforces all details of the visual scene. In order to use your own parameters, you can use at least one time the *write(String fs)* method which will write a proper XML file with all default parameters. Then, tweak it on your own and reload them at any time using method *setup(String fs)*. These methods update a *Retina::RetinaParameters* member structure that is described hereafter. ::
class Retina
{
@@ -49,12 +49,12 @@ The retina can be settled up with various parameters, by default, the retina can
Size getOutputSize ();
// setup methods with specific parameters specification of global xml config file loading/write
void setup (std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true);
void setup (String retinaParameterFile="", const bool applyDefaultSetupOnFailure=true);
void setup (FileStorage &fs, const bool applyDefaultSetupOnFailure=true);
void setup (RetinaParameters newParameters);
struct Retina::RetinaParameters getParameters ();
const std::string printSetup ();
virtual void write (std::string fs) const;
const String printSetup ();
virtual void write (String fs) const;
virtual void write (FileStorage &fs) const;
void setupOPLandIPLParvoChannel (const bool colorMode=true, const bool normaliseOutput=true, const float photoreceptorsLocalAdaptationSensitivity=0.7, const float photoreceptorsTemporalConstant=0.5, const float photoreceptorsSpatialConstant=0.53, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7);
void setupIPLMagnoChannel (const bool normaliseOutput=true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2, const float V0CompressionParameter=0.95, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7);
@@ -208,7 +208,7 @@ Retina::getMagno
Retina::getParameters
+++++++++++++++++++++
.. ocv:function:: struct Retina::RetinaParameters Retina::getParameters()
.. ocv:function:: Retina::RetinaParameters Retina::getParameters()
Retrieve the current parameters values in a *Retina::RetinaParameters* structure
@@ -248,7 +248,7 @@ Retina::getOutputSize
Retina::printSetup
++++++++++++++++++
.. ocv:function:: const std::string Retina::printSetup()
.. ocv:function:: const String Retina::printSetup()
Outputs a string showing the used parameters setup
@@ -277,7 +277,7 @@ Retina::setColorSaturation
Retina::setup
+++++++++++++
.. ocv:function:: void Retina::setup(std::string retinaParameterFile = "", const bool applyDefaultSetupOnFailure = true )
.. ocv:function:: void Retina::setup(String retinaParameterFile = "", const bool applyDefaultSetupOnFailure = true )
.. ocv:function:: void Retina::setup(FileStorage & fs, const bool applyDefaultSetupOnFailure = true )
.. ocv:function:: void Retina::setup(RetinaParameters newParameters)
@@ -291,7 +291,7 @@ Retina::setup
Retina::write
+++++++++++++
.. ocv:function:: void Retina::write( std::string fs ) const
.. ocv:function:: void Retina::write( String fs ) const
.. ocv:function:: void Retina::write( FileStorage& fs ) const
Write xml/yml formated parameters information
@@ -336,8 +336,9 @@ Retina::RetinaParameters
========================
.. ocv:struct:: Retina::RetinaParameters
This structure merges all the parameters that can be adjusted threw the **Retina::setup()**, **Retina::setupOPLandIPLParvoChannel** and **Retina::setupIPLMagnoChannel** setup methods
Parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel. ::
This structure merges all the parameters that can be adjusted threw the **Retina::setup()**, **Retina::setupOPLandIPLParvoChannel** and **Retina::setupIPLMagnoChannel** setup methods
Parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel. ::
class RetinaParameters{
struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters

View File

@@ -0,0 +1,639 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CONTRIB_HPP__
#define __OPENCV_CONTRIB_HPP__
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/objdetect.hpp"
namespace cv
{
class CV_EXPORTS Octree
{
public:
struct Node
{
Node() {}
int begin, end;
float x_min, x_max, y_min, y_max, z_min, z_max;
int maxLevels;
bool isLeaf;
int children[8];
};
Octree();
Octree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
virtual ~Octree();
virtual void buildTree( const std::vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
virtual void getPointsWithinSphere( const Point3f& center, float radius,
std::vector<Point3f>& points ) const;
const std::vector<Node>& getNodes() const { return nodes; }
private:
int minPoints;
std::vector<Point3f> points;
std::vector<Node> nodes;
virtual void buildNext(size_t node_ind);
};
class CV_EXPORTS Mesh3D
{
public:
struct EmptyMeshException {};
Mesh3D();
Mesh3D(const std::vector<Point3f>& vtx);
~Mesh3D();
void buildOctree();
void clearOctree();
float estimateResolution(float tryRatio = 0.1f);
void computeNormals(float normalRadius, int minNeighbors = 20);
void computeNormals(const std::vector<int>& subset, float normalRadius, int minNeighbors = 20);
void writeAsVrml(const String& file, const std::vector<Scalar>& colors = std::vector<Scalar>()) const;
std::vector<Point3f> vtx;
std::vector<Point3f> normals;
float resolution;
Octree octree;
const static Point3f allzero;
};
class CV_EXPORTS SpinImageModel
{
public:
/* model parameters, leave unset for default or auto estimate */
float normalRadius;
int minNeighbors;
float binSize;
int imageWidth;
float lambda;
float gamma;
float T_GeometriccConsistency;
float T_GroupingCorespondances;
/* public interface */
SpinImageModel();
explicit SpinImageModel(const Mesh3D& mesh);
~SpinImageModel();
void selectRandomSubset(float ratio);
void setSubset(const std::vector<int>& subset);
void compute();
void match(const SpinImageModel& scene, std::vector< std::vector<Vec2i> >& result);
Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
size_t getSpinCount() const { return spinImages.rows; }
Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
const Mesh3D& getMesh() const { return mesh; }
Mesh3D& getMesh() { return mesh; }
/* static utility functions */
static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
const Point3f& pointScene2, const Point3f& normalScene2,
const Point3f& pointModel2, const Point3f& normalModel2);
static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
const Point3f& pointScene2, const Point3f& normalScene2,
const Point3f& pointModel2, const Point3f& normalModel2,
float gamma);
protected:
void defaultParams();
void matchSpinToModel(const Mat& spin, std::vector<int>& indeces,
std::vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
void repackSpinImages(const std::vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
std::vector<int> subset;
Mesh3D mesh;
Mat spinImages;
};
class CV_EXPORTS TickMeter
{
public:
TickMeter();
void start();
void stop();
int64 getTimeTicks() const;
double getTimeMicro() const;
double getTimeMilli() const;
double getTimeSec() const;
int64 getCounter() const;
void reset();
private:
int64 counter;
int64 sumTime;
int64 startTime;
};
//CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
class CV_EXPORTS SelfSimDescriptor
{
public:
SelfSimDescriptor();
SelfSimDescriptor(int _ssize, int _lsize,
int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET,
int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS,
int _nangles=DEFAULT_NUM_ANGLES);
SelfSimDescriptor(const SelfSimDescriptor& ss);
virtual ~SelfSimDescriptor();
SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
size_t getDescriptorSize() const;
Size getGridSize( Size imgsize, Size winStride ) const;
virtual void compute(const Mat& img, std::vector<float>& descriptors, Size winStride=Size(),
const std::vector<Point>& locations=std::vector<Point>()) const;
virtual void computeLogPolarMapping(Mat& mappingMask) const;
virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
int smallSize;
int largeSize;
int startDistanceBucket;
int numberOfDistanceBuckets;
int numberOfAngles;
enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
};
CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
CV_OUT std::vector<std::vector<Point> >& results, CV_OUT std::vector<float>& cost,
double templScale=1, int maxMatches = 20,
double minMatchDistance = 1.0, int padX = 3,
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
double orientationWeight = 0.5, double truncate = 20);
class CV_EXPORTS_W StereoVar
{
public:
// Flags
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
enum {CYCLE_O, CYCLE_V};
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
//! the default constructor
CV_WRAP StereoVar();
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
//! the destructor
virtual ~StereoVar();
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp);
CV_PROP_RW int levels;
CV_PROP_RW double pyrScale;
CV_PROP_RW int nIt;
CV_PROP_RW int minDisp;
CV_PROP_RW int maxDisp;
CV_PROP_RW int poly_n;
CV_PROP_RW double poly_sigma;
CV_PROP_RW float fi;
CV_PROP_RW float lambda;
CV_PROP_RW int penalization;
CV_PROP_RW int cycle;
CV_PROP_RW int flags;
private:
void autoParams();
void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
};
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
class CV_EXPORTS Directory
{
public:
static std::vector<String> GetListFiles ( const String& path, const String & exten = "*", bool addPath = true );
static std::vector<String> GetListFilesR ( const String& path, const String & exten = "*", bool addPath = true );
static std::vector<String> GetListFolders( const String& path, const String & exten = "*", bool addPath = true );
};
/*
* Generation of a set of different colors by the following way:
* 1) generate more then need colors (in "factor" times) in RGB,
* 2) convert them to Lab,
* 3) choose the needed count of colors from the set that are more different from
* each other,
* 4) convert the colors back to RGB
*/
CV_EXPORTS void generateColors( std::vector<Scalar>& colors, size_t count, size_t factor=100 );
/*
* Estimate the rigid body motion from frame0 to frame1. The method is based on the paper
* "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
*/
enum { ROTATION = 1,
TRANSLATION = 2,
RIGID_BODY_MOTION = 4
};
CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt,
const Mat& image0, const Mat& depth0, const Mat& mask0,
const Mat& image1, const Mat& depth1, const Mat& mask1,
const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f,
const std::vector<int>& iterCounts=std::vector<int>(),
const std::vector<float>& minGradientMagnitudes=std::vector<float>(),
int transformType=RIGID_BODY_MOTION );
/**
*Bilinear interpolation technique.
*
*The value of a desired cortical pixel is obtained through a bilinear interpolation of the values
*of the four nearest neighbouring Cartesian pixels to the center of the RF.
*The same principle is applied to the inverse transformation.
*
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Interp
{
public:
LogPolar_Interp() {}
/**
*Constructor
*\param w the width of the input image
*\param h the height of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
int interp=INTER_LINEAR, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
*\param source the Cartesian image
*\return the transformed image (cortical image)
*/
const Mat to_cortical(const Mat &source);
/**
*Transformation from cortical image to retinal (inverse log-polar) image.
*\param source the cortical image
*\return the transformed image (retinal image)
*/
const Mat to_cartesian(const Mat &source);
/**
*Destructor
*/
~LogPolar_Interp();
protected:
Mat Rsri;
Mat Csri;
int S, R, M, N;
int top, bottom,left,right;
double ro0, romax, a, q;
int interp;
Mat ETAyx;
Mat CSIyx;
void create_map(int M, int N, int R, int S, double ro0);
};
/**
*Overlapping circular receptive fields technique
*
*The Cartesian plane is divided in two regions: the fovea and the periphery.
*The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in
*the periphery we use the overlapping Gaussian circular RFs.
*
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Overlapping
{
public:
LogPolar_Overlapping() {}
/**
*Constructor
*\param w the width of the input image
*\param h the height of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
double ro0=3.0, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
*\param source the Cartesian image
*\return the transformed image (cortical image)
*/
const Mat to_cortical(const Mat &source);
/**
*Transformation from cortical image to retinal (inverse log-polar) image.
*\param source the cortical image
*\return the transformed image (retinal image)
*/
const Mat to_cartesian(const Mat &source);
/**
*Destructor
*/
~LogPolar_Overlapping();
protected:
Mat Rsri;
Mat Csri;
std::vector<int> Rsr;
std::vector<int> Csr;
std::vector<double> Wsr;
int S, R, M, N, ind1;
int top, bottom,left,right;
double ro0, romax, a, q;
struct kernel
{
kernel() { w = 0; }
std::vector<double> weights;
int w;
};
Mat ETAyx;
Mat CSIyx;
std::vector<kernel> w_ker_2D;
void create_map(int M, int N, int R, int S, double ro0);
};
/**
* Adjacent receptive fields technique
*
*All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF.
*The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF.
*This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements
*to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370
*
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Adjacent
{
public:
LogPolar_Adjacent() {}
/**
*Constructor
*\param w the width of the input image
*\param h the height of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param smin the size of the subpixel (default value 0.25 pixel)
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
*\param source the Cartesian image
*\return the transformed image (cortical image)
*/
const Mat to_cortical(const Mat &source);
/**
*Transformation from cortical image to retinal (inverse log-polar) image.
*\param source the cortical image
*\return the transformed image (retinal image)
*/
const Mat to_cartesian(const Mat &source);
/**
*Destructor
*/
~LogPolar_Adjacent();
protected:
struct pixel
{
pixel() { u = v = 0; a = 0.; }
int u;
int v;
double a;
};
int S, R, M, N;
int top, bottom,left,right;
double ro0, romax, a, q;
std::vector<std::vector<pixel> > L;
std::vector<double> A;
void subdivide_recursively(double x, double y, int i, int j, double length, double smin);
bool get_uv(double x, double y, int&u, int&v);
void create_map(int M, int N, int R, int S, double ro0, double smin);
};
CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
class CV_EXPORTS LDA
{
public:
// Initializes a LDA with num_components (default 0) and specifies how
// samples are aligned (default dataAsRow=true).
LDA(int num_components = 0) :
_num_components(num_components) {};
// Initializes and performs a Discriminant Analysis with Fisher's
// Optimization Criterion on given data in src and corresponding labels
// in labels. If 0 (or less) number of components are given, they are
// automatically determined for given data in computation.
LDA(InputArrayOfArrays src, InputArray labels,
int num_components = 0) :
_num_components(num_components)
{
this->compute(src, labels); //! compute eigenvectors and eigenvalues
}
// Serializes this object to a given filename.
void save(const String& filename) const;
// Deserializes this object from a given filename.
void load(const String& filename);
// Serializes this object to a given cv::FileStorage.
void save(FileStorage& fs) const;
// Deserializes this object from a given cv::FileStorage.
void load(const FileStorage& node);
// Destructor.
~LDA() {}
//! Compute the discriminants for data in src and labels.
void compute(InputArrayOfArrays src, InputArray labels);
// Projects samples into the LDA subspace.
Mat project(InputArray src);
// Reconstructs projections from the LDA subspace.
Mat reconstruct(InputArray src);
// Returns the eigenvectors of this LDA.
Mat eigenvectors() const { return _eigenvectors; };
// Returns the eigenvalues of this LDA.
Mat eigenvalues() const { return _eigenvalues; }
protected:
bool _dataAsRow;
int _num_components;
Mat _eigenvectors;
Mat _eigenvalues;
void lda(InputArrayOfArrays src, InputArray labels);
};
class CV_EXPORTS_W FaceRecognizer : public Algorithm
{
public:
//! virtual destructor
virtual ~FaceRecognizer() {}
// Trains a FaceRecognizer.
CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
// Updates a FaceRecognizer.
CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels);
// Gets a prediction from a FaceRecognizer.
virtual int predict(InputArray src) const = 0;
// Predicts the label and confidence for a given sample.
CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
// Serializes this object to a given filename.
CV_WRAP virtual void save(const String& filename) const;
// Deserializes this object from a given filename.
CV_WRAP virtual void load(const String& filename);
// Serializes this object to a given cv::FileStorage.
virtual void save(FileStorage& fs) const = 0;
// Deserializes this object from a given cv::FileStorage.
virtual void load(const FileStorage& fs) = 0;
};
CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
enum
{
COLORMAP_AUTUMN = 0,
COLORMAP_BONE = 1,
COLORMAP_JET = 2,
COLORMAP_WINTER = 3,
COLORMAP_RAINBOW = 4,
COLORMAP_OCEAN = 5,
COLORMAP_SUMMER = 6,
COLORMAP_SPRING = 7,
COLORMAP_COOL = 8,
COLORMAP_HSV = 9,
COLORMAP_PINK = 10,
COLORMAP_HOT = 11
};
CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
CV_EXPORTS bool initModule_contrib();
}
#include "opencv2/contrib/retina.hpp"
#include "opencv2/contrib/openfabmap.hpp"
#endif

View File

@@ -0,0 +1,384 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CONTRIB_COMPAT_HPP__
#define __OPENCV_CONTRIB_COMPAT_HPP__
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
/****************************************************************************************\
* Adaptive Skin Detector *
\****************************************************************************************/
class CV_EXPORTS CvAdaptiveSkinDetector
{
private:
enum {
GSD_HUE_LT = 3,
GSD_HUE_UT = 33,
GSD_INTENSITY_LT = 15,
GSD_INTENSITY_UT = 250
};
class CV_EXPORTS Histogram
{
private:
enum {
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
};
protected:
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
public:
CvHistogram *fHistogram;
Histogram();
virtual ~Histogram();
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
void mergeWith(Histogram *source, double weight);
};
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
double fHistogramMergeFactor, fHuePercentCovered;
Histogram histogramHueMotion, skinHueHistogram;
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
protected:
void initData(IplImage *src, int widthDivider, int heightDivider);
void adaptiveFilter();
public:
enum {
MORPHING_METHOD_NONE = 0,
MORPHING_METHOD_ERODE = 1,
MORPHING_METHOD_ERODE_ERODE = 2,
MORPHING_METHOD_ERODE_DILATE = 3
};
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
virtual ~CvAdaptiveSkinDetector();
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
};
/****************************************************************************************\
* Fuzzy MeanShift Tracker *
\****************************************************************************************/
class CV_EXPORTS CvFuzzyPoint {
public:
double x, y, value;
CvFuzzyPoint(double _x, double _y);
};
class CV_EXPORTS CvFuzzyCurve {
private:
std::vector<CvFuzzyPoint> points;
double value, centre;
bool between(double x, double x1, double x2);
public:
CvFuzzyCurve();
~CvFuzzyCurve();
void setCentre(double _centre);
double getCentre();
void clear();
void addPoint(double x, double y);
double calcValue(double param);
double getValue();
void setValue(double _value);
};
class CV_EXPORTS CvFuzzyFunction {
public:
std::vector<CvFuzzyCurve> curves;
CvFuzzyFunction();
~CvFuzzyFunction();
void addCurve(CvFuzzyCurve *curve, double value = 0);
void resetValues();
double calcValue();
CvFuzzyCurve *newCurve();
};
class CV_EXPORTS CvFuzzyRule {
private:
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
CvFuzzyCurve *fuzzyOutput;
public:
CvFuzzyRule();
~CvFuzzyRule();
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
double calcValue(double param1, double param2);
CvFuzzyCurve *getOutputCurve();
};
class CV_EXPORTS CvFuzzyController {
private:
std::vector<CvFuzzyRule*> rules;
public:
CvFuzzyController();
~CvFuzzyController();
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
double calcOutput(double param1, double param2);
};
class CV_EXPORTS CvFuzzyMeanShiftTracker
{
private:
class FuzzyResizer
{
private:
CvFuzzyFunction iInput, iOutput;
CvFuzzyController fuzzyController;
public:
FuzzyResizer();
int calcOutput(double edgeDensity, double density);
};
class SearchWindow
{
public:
FuzzyResizer *fuzzyResizer;
int x, y;
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
int ldx, ldy, ldw, ldh, numShifts, numIters;
int xGc, yGc;
long m00, m01, m10, m11, m02, m20;
double ellipseAngle;
double density;
unsigned int depthLow, depthHigh;
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
SearchWindow();
~SearchWindow();
void setSize(int _x, int _y, int _width, int _height);
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
bool shift();
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
};
public:
enum TrackingState
{
tsNone = 0,
tsSearching = 1,
tsTracking = 2,
tsSetWindow = 3,
tsDisabled = 10
};
enum ResizeMethod {
rmEdgeDensityLinear = 0,
rmEdgeDensityFuzzy = 1,
rmInnerDensity = 2
};
enum {
MinKernelMass = 1000
};
SearchWindow kernel;
int searchMode;
private:
enum
{
MaxMeanShiftIteration = 5,
MaxSetSizeIteration = 5
};
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
public:
CvFuzzyMeanShiftTracker();
~CvFuzzyMeanShiftTracker();
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
};
namespace cv
{
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
class CV_EXPORTS LevMarqSparse {
public:
LevMarqSparse();
LevMarqSparse(int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (*fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (*func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data, // user-specific data passed to the callbacks
BundleAdjustCallback cb, void* user_data
);
virtual ~LevMarqSparse();
virtual void run( int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data // user-specific data passed to the callbacks
);
virtual void clear();
// useful function to do simple bundle adjustment tasks
static void bundleAdjust(std::vector<Point3d>& points, // positions of points in global coordinate system (input and output)
const std::vector<std::vector<Point2d> >& imagePoints, // projections of 3d points for every camera
const std::vector<std::vector<int> >& visibility, // visibility of 3d points for every camera
std::vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
std::vector<Mat>& R, // rotation matrices of all cameras (input and output)
std::vector<Mat>& T, // translation vector of all cameras (input and output)
std::vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
const TermCriteria& criteria=
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
BundleAdjustCallback cb = 0, void* user_data = 0);
public:
virtual void optimize(CvMat &_vis); //main function that runs minimization
//iteratively asks for measurement for visible camera-point pairs
void ask_for_proj(CvMat &_vis,bool once=false);
//iteratively asks for Jacobians for every camera_point pair
void ask_for_projac(CvMat &_vis);
CvMat* err; //error X-hX
double prevErrNorm, errNorm;
double lambda;
CvTermCriteria criteria;
int iters;
CvMat** U; //size of array is equal to number of cameras
CvMat** V; //size of array is equal to number of points
CvMat** inv_V_star; //inverse of V*
CvMat** A;
CvMat** B;
CvMat** W;
CvMat* X; //measurement
CvMat* hX; //current measurement extimation given new parameter vector
CvMat* prevP; //current already accepted parameter.
CvMat* P; // parameters used to evaluate function with new params
// this parameters may be rejected
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
// length of array is j = number of cameras
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
// length of array is i = number of points
CvMat** Yj; //length of array is i = num_points
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
int num_cams;
int num_points;
int num_err_param;
int num_cam_param;
int num_point_param;
//target function and jacobian pointers, which needs to be initialized
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
void* data;
BundleAdjustCallback cb;
void* user_data;
};
} // cv
#endif /* __cplusplus */
#endif /* __OPENCV_CONTRIB_COMPAT_HPP__ */

View File

@@ -7,11 +7,12 @@
// copy or use the software.
//
//
// License Agreement
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@@ -40,936 +41,8 @@
//
//M*/
#ifndef __OPENCV_CONTRIB_HPP__
#define __OPENCV_CONTRIB_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#ifdef __cplusplus
/****************************************************************************************\
* Adaptive Skin Detector *
\****************************************************************************************/
class CV_EXPORTS CvAdaptiveSkinDetector
{
private:
enum {
GSD_HUE_LT = 3,
GSD_HUE_UT = 33,
GSD_INTENSITY_LT = 15,
GSD_INTENSITY_UT = 250
};
class CV_EXPORTS Histogram
{
private:
enum {
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
};
protected:
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
public:
CvHistogram *fHistogram;
Histogram();
virtual ~Histogram();
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
void mergeWith(Histogram *source, double weight);
};
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
double fHistogramMergeFactor, fHuePercentCovered;
Histogram histogramHueMotion, skinHueHistogram;
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
protected:
void initData(IplImage *src, int widthDivider, int heightDivider);
void adaptiveFilter();
public:
enum {
MORPHING_METHOD_NONE = 0,
MORPHING_METHOD_ERODE = 1,
MORPHING_METHOD_ERODE_ERODE = 2,
MORPHING_METHOD_ERODE_DILATE = 3
};
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
virtual ~CvAdaptiveSkinDetector();
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
};
/****************************************************************************************\
* Fuzzy MeanShift Tracker *
\****************************************************************************************/
class CV_EXPORTS CvFuzzyPoint {
public:
double x, y, value;
CvFuzzyPoint(double _x, double _y);
};
class CV_EXPORTS CvFuzzyCurve {
private:
std::vector<CvFuzzyPoint> points;
double value, centre;
bool between(double x, double x1, double x2);
public:
CvFuzzyCurve();
~CvFuzzyCurve();
void setCentre(double _centre);
double getCentre();
void clear();
void addPoint(double x, double y);
double calcValue(double param);
double getValue();
void setValue(double _value);
};
class CV_EXPORTS CvFuzzyFunction {
public:
std::vector<CvFuzzyCurve> curves;
CvFuzzyFunction();
~CvFuzzyFunction();
void addCurve(CvFuzzyCurve *curve, double value = 0);
void resetValues();
double calcValue();
CvFuzzyCurve *newCurve();
};
class CV_EXPORTS CvFuzzyRule {
private:
CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
CvFuzzyCurve *fuzzyOutput;
public:
CvFuzzyRule();
~CvFuzzyRule();
void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
double calcValue(double param1, double param2);
CvFuzzyCurve *getOutputCurve();
};
class CV_EXPORTS CvFuzzyController {
private:
std::vector<CvFuzzyRule*> rules;
public:
CvFuzzyController();
~CvFuzzyController();
void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
double calcOutput(double param1, double param2);
};
class CV_EXPORTS CvFuzzyMeanShiftTracker
{
private:
class FuzzyResizer
{
private:
CvFuzzyFunction iInput, iOutput;
CvFuzzyController fuzzyController;
public:
FuzzyResizer();
int calcOutput(double edgeDensity, double density);
};
class SearchWindow
{
public:
FuzzyResizer *fuzzyResizer;
int x, y;
int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
int ldx, ldy, ldw, ldh, numShifts, numIters;
int xGc, yGc;
long m00, m01, m10, m11, m02, m20;
double ellipseAngle;
double density;
unsigned int depthLow, depthHigh;
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
SearchWindow();
~SearchWindow();
void setSize(int _x, int _y, int _width, int _height);
void initDepthValues(IplImage *maskImage, IplImage *depthMap);
bool shift();
void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
};
public:
enum TrackingState
{
tsNone = 0,
tsSearching = 1,
tsTracking = 2,
tsSetWindow = 3,
tsDisabled = 10
};
enum ResizeMethod {
rmEdgeDensityLinear = 0,
rmEdgeDensityFuzzy = 1,
rmInnerDensity = 2
};
enum {
MinKernelMass = 1000
};
SearchWindow kernel;
int searchMode;
private:
enum
{
MaxMeanShiftIteration = 5,
MaxSetSizeIteration = 5
};
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
public:
CvFuzzyMeanShiftTracker();
~CvFuzzyMeanShiftTracker();
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
};
namespace cv
{
class CV_EXPORTS Octree
{
public:
struct Node
{
Node() {}
int begin, end;
float x_min, x_max, y_min, y_max, z_min, z_max;
int maxLevels;
bool isLeaf;
int children[8];
};
Octree();
Octree( const vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
virtual ~Octree();
virtual void buildTree( const vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
virtual void getPointsWithinSphere( const Point3f& center, float radius,
vector<Point3f>& points ) const;
const vector<Node>& getNodes() const { return nodes; }
private:
int minPoints;
vector<Point3f> points;
vector<Node> nodes;
virtual void buildNext(size_t node_ind);
};
class CV_EXPORTS Mesh3D
{
public:
struct EmptyMeshException {};
Mesh3D();
Mesh3D(const vector<Point3f>& vtx);
~Mesh3D();
void buildOctree();
void clearOctree();
float estimateResolution(float tryRatio = 0.1f);
void computeNormals(float normalRadius, int minNeighbors = 20);
void computeNormals(const vector<int>& subset, float normalRadius, int minNeighbors = 20);
void writeAsVrml(const String& file, const vector<Scalar>& colors = vector<Scalar>()) const;
vector<Point3f> vtx;
vector<Point3f> normals;
float resolution;
Octree octree;
const static Point3f allzero;
};
class CV_EXPORTS SpinImageModel
{
public:
/* model parameters, leave unset for default or auto estimate */
float normalRadius;
int minNeighbors;
float binSize;
int imageWidth;
float lambda;
float gamma;
float T_GeometriccConsistency;
float T_GroupingCorespondances;
/* public interface */
SpinImageModel();
explicit SpinImageModel(const Mesh3D& mesh);
~SpinImageModel();
void setLogger(std::ostream* log);
void selectRandomSubset(float ratio);
void setSubset(const vector<int>& subset);
void compute();
void match(const SpinImageModel& scene, vector< vector<Vec2i> >& result);
Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
size_t getSpinCount() const { return spinImages.rows; }
Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
const Mesh3D& getMesh() const { return mesh; }
Mesh3D& getMesh() { return mesh; }
/* static utility functions */
static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
const Point3f& pointScene2, const Point3f& normalScene2,
const Point3f& pointModel2, const Point3f& normalModel2);
static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
const Point3f& pointScene2, const Point3f& normalScene2,
const Point3f& pointModel2, const Point3f& normalModel2,
float gamma);
protected:
void defaultParams();
void matchSpinToModel(const Mat& spin, vector<int>& indeces,
vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
void repackSpinImages(const vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
vector<int> subset;
Mesh3D mesh;
Mat spinImages;
std::ostream* out;
};
class CV_EXPORTS TickMeter
{
public:
TickMeter();
void start();
void stop();
int64 getTimeTicks() const;
double getTimeMicro() const;
double getTimeMilli() const;
double getTimeSec() const;
int64 getCounter() const;
void reset();
private:
int64 counter;
int64 sumTime;
int64 startTime;
};
CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
class CV_EXPORTS SelfSimDescriptor
{
public:
SelfSimDescriptor();
SelfSimDescriptor(int _ssize, int _lsize,
int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET,
int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS,
int _nangles=DEFAULT_NUM_ANGLES);
SelfSimDescriptor(const SelfSimDescriptor& ss);
virtual ~SelfSimDescriptor();
SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
size_t getDescriptorSize() const;
Size getGridSize( Size imgsize, Size winStride ) const;
virtual void compute(const Mat& img, vector<float>& descriptors, Size winStride=Size(),
const vector<Point>& locations=vector<Point>()) const;
virtual void computeLogPolarMapping(Mat& mappingMask) const;
virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
int smallSize;
int largeSize;
int startDistanceBucket;
int numberOfDistanceBuckets;
int numberOfAngles;
enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
};
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
class LevMarqSparse {
public:
LevMarqSparse();
LevMarqSparse(int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data, // user-specific data passed to the callbacks
BundleAdjustCallback cb, void* user_data
);
virtual ~LevMarqSparse();
virtual void run( int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
int nCameraParams, // number of parameters per one camera
int nErrParams, // number of parameters in measurement vector
// for 1 point at one camera (2 in case of 2D projections)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
// callback for estimation of backprojection errors
void (CV_CDECL * func)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& estim, void* data),
void* data // user-specific data passed to the callbacks
);
virtual void clear();
// useful function to do simple bundle adjustment tasks
static void bundleAdjust(vector<Point3d>& points, // positions of points in global coordinate system (input and output)
const vector<vector<Point2d> >& imagePoints, // projections of 3d points for every camera
const vector<vector<int> >& visibility, // visibility of 3d points for every camera
vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
vector<Mat>& R, // rotation matrices of all cameras (input and output)
vector<Mat>& T, // translation vector of all cameras (input and output)
vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
const TermCriteria& criteria=
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
BundleAdjustCallback cb = 0, void* user_data = 0);
public:
virtual void optimize(CvMat &_vis); //main function that runs minimization
//iteratively asks for measurement for visible camera-point pairs
void ask_for_proj(CvMat &_vis,bool once=false);
//iteratively asks for Jacobians for every camera_point pair
void ask_for_projac(CvMat &_vis);
CvMat* err; //error X-hX
double prevErrNorm, errNorm;
double lambda;
CvTermCriteria criteria;
int iters;
CvMat** U; //size of array is equal to number of cameras
CvMat** V; //size of array is equal to number of points
CvMat** inv_V_star; //inverse of V*
CvMat** A;
CvMat** B;
CvMat** W;
CvMat* X; //measurement
CvMat* hX; //current measurement extimation given new parameter vector
CvMat* prevP; //current already accepted parameter.
CvMat* P; // parameters used to evaluate function with new params
// this parameters may be rejected
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
// length of array is j = number of cameras
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
// length of array is i = number of points
CvMat** Yj; //length of array is i = num_points
CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
int num_cams;
int num_points;
int num_err_param;
int num_cam_param;
int num_point_param;
//target function and jacobian pointers, which needs to be initialized
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
void* data;
BundleAdjustCallback cb;
void* user_data;
};
CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
CV_OUT vector<vector<Point> >& results, CV_OUT vector<float>& cost,
double templScale=1, int maxMatches = 20,
double minMatchDistance = 1.0, int padX = 3,
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
double orientationWeight = 0.5, double truncate = 20);
class CV_EXPORTS_W StereoVar
{
public:
// Flags
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
enum {CYCLE_O, CYCLE_V};
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
//! the default constructor
CV_WRAP StereoVar();
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
//! the destructor
virtual ~StereoVar();
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, Mat& disp);
CV_PROP_RW int levels;
CV_PROP_RW double pyrScale;
CV_PROP_RW int nIt;
CV_PROP_RW int minDisp;
CV_PROP_RW int maxDisp;
CV_PROP_RW int poly_n;
CV_PROP_RW double poly_sigma;
CV_PROP_RW float fi;
CV_PROP_RW float lambda;
CV_PROP_RW int penalization;
CV_PROP_RW int cycle;
CV_PROP_RW int flags;
private:
void autoParams();
void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
};
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
class CV_EXPORTS Directory
{
public:
static std::vector<std::string> GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true );
static std::vector<std::string> GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true );
static std::vector<std::string> GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true );
};
/*
* Generation of a set of different colors by the following way:
* 1) generate more then need colors (in "factor" times) in RGB,
* 2) convert them to Lab,
* 3) choose the needed count of colors from the set that are more different from
* each other,
* 4) convert the colors back to RGB
*/
CV_EXPORTS void generateColors( std::vector<Scalar>& colors, size_t count, size_t factor=100 );
/*
* Estimate the rigid body motion from frame0 to frame1. The method is based on the paper
* "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
*/
enum { ROTATION = 1,
TRANSLATION = 2,
RIGID_BODY_MOTION = 4
};
CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt,
const Mat& image0, const Mat& depth0, const Mat& mask0,
const Mat& image1, const Mat& depth1, const Mat& mask1,
const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f,
const std::vector<int>& iterCounts=std::vector<int>(),
const std::vector<float>& minGradientMagnitudes=std::vector<float>(),
int transformType=RIGID_BODY_MOTION );
/**
*Bilinear interpolation technique.
*
*The value of a desired cortical pixel is obtained through a bilinear interpolation of the values
*of the four nearest neighbouring Cartesian pixels to the center of the RF.
*The same principle is applied to the inverse transformation.
*
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Interp
{
public:
LogPolar_Interp() {}
/**
*Constructor
*\param w the width of the input image
*\param h the height of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
int interp=INTER_LINEAR, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
*\param source the Cartesian image
*\return the transformed image (cortical image)
*/
const Mat to_cortical(const Mat &source);
/**
*Transformation from cortical image to retinal (inverse log-polar) image.
*\param source the cortical image
*\return the transformed image (retinal image)
*/
const Mat to_cartesian(const Mat &source);
/**
*Destructor
*/
~LogPolar_Interp();
protected:
Mat Rsri;
Mat Csri;
int S, R, M, N;
int top, bottom,left,right;
double ro0, romax, a, q;
int interp;
Mat ETAyx;
Mat CSIyx;
void create_map(int M, int N, int R, int S, double ro0);
};
/**
*Overlapping circular receptive fields technique
*
*The Cartesian plane is divided in two regions: the fovea and the periphery.
*The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in
*the periphery we use the overlapping Gaussian circular RFs.
*
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Overlapping
{
public:
LogPolar_Overlapping() {}
/**
*Constructor
*\param w the width of the input image
*\param h the height of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
double ro0=3.0, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
*\param source the Cartesian image
*\return the transformed image (cortical image)
*/
const Mat to_cortical(const Mat &source);
/**
*Transformation from cortical image to retinal (inverse log-polar) image.
*\param source the cortical image
*\return the transformed image (retinal image)
*/
const Mat to_cartesian(const Mat &source);
/**
*Destructor
*/
~LogPolar_Overlapping();
protected:
Mat Rsri;
Mat Csri;
vector<int> Rsr;
vector<int> Csr;
vector<double> Wsr;
int S, R, M, N, ind1;
int top, bottom,left,right;
double ro0, romax, a, q;
struct kernel
{
kernel() { w = 0; }
vector<double> weights;
int w;
};
Mat ETAyx;
Mat CSIyx;
vector<kernel> w_ker_2D;
void create_map(int M, int N, int R, int S, double ro0);
};
/**
* Adjacent receptive fields technique
*
*All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF.
*The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF.
*This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements
*to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370
*
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Adjacent
{
public:
LogPolar_Adjacent() {}
/**
*Constructor
*\param w the width of the input image
*\param h the height of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param smin the size of the subpixel (default value 0.25 pixel)
*\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
*\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
*\param source the Cartesian image
*\return the transformed image (cortical image)
*/
const Mat to_cortical(const Mat &source);
/**
*Transformation from cortical image to retinal (inverse log-polar) image.
*\param source the cortical image
*\return the transformed image (retinal image)
*/
const Mat to_cartesian(const Mat &source);
/**
*Destructor
*/
~LogPolar_Adjacent();
protected:
struct pixel
{
pixel() { u = v = 0; a = 0.; }
int u;
int v;
double a;
};
int S, R, M, N;
int top, bottom,left,right;
double ro0, romax, a, q;
vector<vector<pixel> > L;
vector<double> A;
void subdivide_recursively(double x, double y, int i, int j, double length, double smin);
bool get_uv(double x, double y, int&u, int&v);
void create_map(int M, int N, int R, int S, double ro0, double smin);
};
CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
class CV_EXPORTS LDA
{
public:
// Initializes a LDA with num_components (default 0) and specifies how
// samples are aligned (default dataAsRow=true).
LDA(int num_components = 0) :
_num_components(num_components) {};
// Initializes and performs a Discriminant Analysis with Fisher's
// Optimization Criterion on given data in src and corresponding labels
// in labels. If 0 (or less) number of components are given, they are
// automatically determined for given data in computation.
LDA(InputArrayOfArrays src, InputArray labels,
int num_components = 0) :
_num_components(num_components)
{
this->compute(src, labels); //! compute eigenvectors and eigenvalues
}
// Serializes this object to a given filename.
void save(const string& filename) const;
// Deserializes this object from a given filename.
void load(const string& filename);
// Serializes this object to a given cv::FileStorage.
void save(FileStorage& fs) const;
// Deserializes this object from a given cv::FileStorage.
void load(const FileStorage& node);
// Destructor.
~LDA() {}
//! Compute the discriminants for data in src and labels.
void compute(InputArrayOfArrays src, InputArray labels);
// Projects samples into the LDA subspace.
Mat project(InputArray src);
// Reconstructs projections from the LDA subspace.
Mat reconstruct(InputArray src);
// Returns the eigenvectors of this LDA.
Mat eigenvectors() const { return _eigenvectors; };
// Returns the eigenvalues of this LDA.
Mat eigenvalues() const { return _eigenvalues; }
protected:
bool _dataAsRow;
int _num_components;
Mat _eigenvectors;
Mat _eigenvalues;
void lda(InputArrayOfArrays src, InputArray labels);
};
class CV_EXPORTS_W FaceRecognizer : public Algorithm
{
public:
//! virtual destructor
virtual ~FaceRecognizer() {}
// Trains a FaceRecognizer.
CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
// Updates a FaceRecognizer.
CV_WRAP virtual void update(InputArrayOfArrays src, InputArray labels);
// Gets a prediction from a FaceRecognizer.
virtual int predict(InputArray src) const = 0;
// Predicts the label and confidence for a given sample.
CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
// Serializes this object to a given filename.
CV_WRAP virtual void save(const string& filename) const;
// Deserializes this object from a given filename.
CV_WRAP virtual void load(const string& filename);
// Serializes this object to a given cv::FileStorage.
virtual void save(FileStorage& fs) const = 0;
// Deserializes this object from a given cv::FileStorage.
virtual void load(const FileStorage& fs) = 0;
};
CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
enum
{
COLORMAP_AUTUMN = 0,
COLORMAP_BONE = 1,
COLORMAP_JET = 2,
COLORMAP_WINTER = 3,
COLORMAP_RAINBOW = 4,
COLORMAP_OCEAN = 5,
COLORMAP_SUMMER = 6,
COLORMAP_SPRING = 7,
COLORMAP_COOL = 8,
COLORMAP_HSV = 9,
COLORMAP_PINK = 10,
COLORMAP_HOT = 11
};
CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
CV_EXPORTS bool initModule_contrib();
}
#include "opencv2/contrib/retina.hpp"
#include "opencv2/contrib/openfabmap.hpp"
#endif
#ifdef __OPENCV_BUILD
#error this is a compatibility header which should not be used inside the OpenCV library
#endif
#include "opencv2/contrib.hpp"

View File

@@ -2,8 +2,8 @@
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
#include <opencv2/core/core.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/core.hpp>
#include <opencv2/objdetect.hpp>
#include <vector>

View File

@@ -43,12 +43,11 @@
#ifndef __OPENCV_HYBRIDTRACKER_H_
#define __OPENCV_HYBRIDTRACKER_H_
#include "opencv2/core/core.hpp"
#include "opencv2/core/operations.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/ml/ml.hpp"
#include "opencv2/ml.hpp"
#ifdef __cplusplus
@@ -76,9 +75,9 @@ struct CV_EXPORTS CvMeanShiftTrackerParams
CvTermCriteria term_crit = CvTermCriteria());
int tracking_type;
vector<float> h_range;
vector<float> s_range;
vector<float> v_range;
std::vector<float> h_range;
std::vector<float> s_range;
std::vector<float> v_range;
CvTermCriteria term_crit;
};
@@ -145,7 +144,7 @@ class CV_EXPORTS CvFeatureTracker
private:
Ptr<Feature2D> dd;
Ptr<DescriptorMatcher> matcher;
vector<DMatch> matches;
std::vector<DMatch> matches;
Mat prev_image;
Mat prev_image_bw;
@@ -153,7 +152,7 @@ private:
Point2d prev_center;
int ittr;
vector<Point2f> features[2];
std::vector<Point2f> features[2];
public:
Mat disp_matches;

View File

@@ -52,8 +52,8 @@
#ifndef __OPENCV_OPENFABMAP_H_
#define __OPENCV_OPENFABMAP_H_
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include <vector>
#include <list>
@@ -65,10 +65,6 @@ namespace cv {
namespace of2 {
using std::list;
using std::map;
using std::multiset;
/*
Return data format of a FABMAP compare call
*/
@@ -115,50 +111,50 @@ public:
//methods to add training data for sampling method
virtual void addTraining(const Mat& queryImgDescriptor);
virtual void addTraining(const vector<Mat>& queryImgDescriptors);
virtual void addTraining(const std::vector<Mat>& queryImgDescriptors);
//methods to add to the test data
virtual void add(const Mat& queryImgDescriptor);
virtual void add(const vector<Mat>& queryImgDescriptors);
virtual void add(const std::vector<Mat>& queryImgDescriptors);
//accessors
const vector<Mat>& getTrainingImgDescriptors() const;
const vector<Mat>& getTestImgDescriptors() const;
const std::vector<Mat>& getTrainingImgDescriptors() const;
const std::vector<Mat>& getTestImgDescriptors() const;
//Main FabMap image comparison
void compare(const Mat& queryImgDescriptor,
vector<IMatch>& matches, bool addQuery = false,
std::vector<IMatch>& matches, bool addQuery = false,
const Mat& mask = Mat());
void compare(const Mat& queryImgDescriptor,
const Mat& testImgDescriptors, vector<IMatch>& matches,
const Mat& testImgDescriptors, std::vector<IMatch>& matches,
const Mat& mask = Mat());
void compare(const Mat& queryImgDescriptor,
const vector<Mat>& testImgDescriptors,
vector<IMatch>& matches, const Mat& mask = Mat());
void compare(const vector<Mat>& queryImgDescriptors, vector<
const std::vector<Mat>& testImgDescriptors,
std::vector<IMatch>& matches, const Mat& mask = Mat());
void compare(const std::vector<Mat>& queryImgDescriptors, std::vector<
IMatch>& matches, bool addQuery = false, const Mat& mask =
Mat());
void compare(const vector<Mat>& queryImgDescriptors,
const vector<Mat>& testImgDescriptors,
vector<IMatch>& matches, const Mat& mask = Mat());
void compare(const std::vector<Mat>& queryImgDescriptors,
const std::vector<Mat>& testImgDescriptors,
std::vector<IMatch>& matches, const Mat& mask = Mat());
protected:
void compareImgDescriptor(const Mat& queryImgDescriptor,
int queryIndex, const vector<Mat>& testImgDescriptors,
vector<IMatch>& matches);
int queryIndex, const std::vector<Mat>& testImgDescriptors,
std::vector<IMatch>& matches);
void addImgDescriptor(const Mat& queryImgDescriptor);
//the getLikelihoods method is overwritten for each different FabMap
//method.
virtual void getLikelihoods(const Mat& queryImgDescriptor,
const vector<Mat>& testImgDescriptors,
vector<IMatch>& matches);
const std::vector<Mat>& testImgDescriptors,
std::vector<IMatch>& matches);
virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
//turn likelihoods into probabilities (also add in motion model if used)
void normaliseDistribution(vector<IMatch>& matches);
void normaliseDistribution(std::vector<IMatch>& matches);
//Chow-Liu Tree
int pq(int q);
@@ -174,9 +170,9 @@ protected:
//data
Mat clTree;
vector<Mat> trainingImgDescriptors;
vector<Mat> testImgDescriptors;
vector<IMatch> priorMatches;
std::vector<Mat> trainingImgDescriptors;
std::vector<Mat> testImgDescriptors;
std::vector<IMatch> priorMatches;
//parameters
double PzGe;
@@ -203,8 +199,8 @@ public:
protected:
//FabMap1 implementation of likelihood comparison
void getLikelihoods(const Mat& queryImgDescriptor, const vector<
Mat>& testImgDescriptors, vector<IMatch>& matches);
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
};
/*
@@ -219,8 +215,8 @@ public:
protected:
//FabMap look-up-table implementation of the likelihood comparison
void getLikelihoods(const Mat& queryImgDescriptor, const vector<
Mat>& testImgDescriptors, vector<IMatch>& matches);
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
//precomputed data
int (*table)[8];
@@ -243,8 +239,8 @@ public:
protected:
//FabMap Fast Bail-out implementation of the likelihood comparison
void getLikelihoods(const Mat& queryImgDescriptor, const vector<
Mat>& testImgDescriptors, vector<IMatch>& matches);
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
//stucture used to determine word comparison order
struct WordStats {
@@ -268,7 +264,7 @@ protected:
};
//private fast bail-out necessary functions
void setWordStatistics(const Mat& queryImgDescriptor, multiset<WordStats>& wordData);
void setWordStatistics(const Mat& queryImgDescriptor, std::multiset<WordStats>& wordData);
double limitbisection(double v, double m);
double bennettInequality(double v, double m, double delta);
static bool compInfo(const WordStats& first, const WordStats& second);
@@ -295,39 +291,39 @@ public:
void addTraining(const Mat& queryImgDescriptors) {
FabMap::addTraining(queryImgDescriptors);
}
void addTraining(const vector<Mat>& queryImgDescriptors);
void addTraining(const std::vector<Mat>& queryImgDescriptors);
void add(const Mat& queryImgDescriptors) {
FabMap::add(queryImgDescriptors);
}
void add(const vector<Mat>& queryImgDescriptors);
void add(const std::vector<Mat>& queryImgDescriptors);
protected:
//FabMap2 implementation of the likelihood comparison
void getLikelihoods(const Mat& queryImgDescriptor, const vector<
Mat>& testImgDescriptors, vector<IMatch>& matches);
void getLikelihoods(const Mat& queryImgDescriptor, const std::vector<
Mat>& testImgDescriptors, std::vector<IMatch>& matches);
double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
//the likelihood function using the inverted index
void getIndexLikelihoods(const Mat& queryImgDescriptor, vector<
double>& defaults, map<int, vector<int> >& invertedMap,
vector<IMatch>& matches);
void getIndexLikelihoods(const Mat& queryImgDescriptor, std::vector<
double>& defaults, std::map<int, std::vector<int> >& invertedMap,
std::vector<IMatch>& matches);
void addToIndex(const Mat& queryImgDescriptor,
vector<double>& defaults,
map<int, vector<int> >& invertedMap);
std::vector<double>& defaults,
std::map<int, std::vector<int> >& invertedMap);
//data
vector<double> d1, d2, d3, d4;
vector<vector<int> > children;
std::vector<double> d1, d2, d3, d4;
std::vector<std::vector<int> > children;
// TODO: inverted map a vector?
vector<double> trainingDefaults;
map<int, vector<int> > trainingInvertedMap;
std::vector<double> trainingDefaults;
std::map<int, std::vector<int> > trainingInvertedMap;
vector<double> testDefaults;
map<int, vector<int> > testInvertedMap;
std::vector<double> testDefaults;
std::map<int, std::vector<int> > testInvertedMap;
};
/*
@@ -342,14 +338,14 @@ public:
//add data to the chow-liu tree before calling make
void add(const Mat& imgDescriptor);
void add(const vector<Mat>& imgDescriptors);
void add(const std::vector<Mat>& imgDescriptors);
const vector<Mat>& getImgDescriptors() const;
const std::vector<Mat>& getImgDescriptors() const;
Mat make(double infoThreshold = 0.0);
private:
vector<Mat> imgDescriptors;
std::vector<Mat> imgDescriptors;
Mat mergedImgDescriptors;
typedef struct info {
@@ -364,18 +360,18 @@ private:
double CP(int a, bool za, int b, bool zb); // a | b
//calculating mutual information of all edges
void createBaseEdges(list<info>& edges, double infoThreshold);
void createBaseEdges(std::list<info>& edges, double infoThreshold);
double calcMutInfo(int word1, int word2);
static bool sortInfoScores(const info& first, const info& second);
//selecting minimum spanning egdges with maximum information
bool reduceEdgesToMinSpan(list<info>& edges);
bool reduceEdgesToMinSpan(std::list<info>& edges);
//building the tree sctructure
Mat buildTree(int root_word, list<info> &edges);
Mat buildTree(int root_word, std::list<info> &edges);
void recAddToTree(Mat &cltree, int q, int pq,
list<info> &remaining_edges);
vector<int> extractChildren(list<info> &remaining_edges, int q);
std::list<info> &remaining_edges);
std::vector<int> extractChildren(std::list<info> &remaining_edges, int q);
};

View File

@@ -72,7 +72,7 @@
* Author: Alexandre Benoit
*/
#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
#include <valarray>
namespace cv
@@ -85,10 +85,8 @@ enum RETINA_COLORSAMPLINGMETHOD
RETINA_COLOR_BAYER//!< standard bayer sampling
};
class RetinaFilter;
/**
* @class Retina a wrapper class which allows the Gipsa/Listic Labs model to be used.
* @class Retina a wrapper class which allows the Gipsa/Listic Labs model to be used with OpenCV.
* This retina model allows spatio-temporal image processing (applied on still images, video sequences).
* As a summary, these are the retina model properties:
* => It applies a spectral whithening (mid-frequency details enhancement)
@@ -115,7 +113,7 @@ class CV_EXPORTS Retina : public Algorithm {
public:
// parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel
struct RetinaParameters{
struct RetinaParameters{
struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
OPLandIplParvoParameters():colorMode(true),
normaliseOutput(true),
@@ -163,15 +161,14 @@ public:
* @param retinaParameterFile : the parameters filename
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
*/
virtual void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true)=0;
virtual void setup(String retinaParameterFile="", const bool applyDefaultSetupOnFailure=true)=0;
/**
* try to open an XML retina parameters file to adjust current retina instance setup
* => if the xml file does not exist, then default setup is applied
* => warning, Exceptions are thrown if read XML file is not valid
* @param fs : the open Filestorage which contains retina parameters
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
*/
virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true)=0;
@@ -193,14 +190,13 @@ public:
* parameters setup display method
* @return a string which contains formatted parameters information
*/
virtual const std::string printSetup()=0;
virtual const String printSetup()=0;
/**
* write xml/yml formated parameters information
* @rparam fs : the filename of the xml file that will be open and writen with formatted parameters information
*/
virtual void write( std::string fs ) const=0;
virtual void write( String fs ) const=0;
/**
* write xml/yml formated parameters information

View File

@@ -35,6 +35,8 @@
//M*/
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/contrib/compat.hpp"
#define ASD_INTENSITY_SET_PIXEL(pointer, qq) {(*pointer) = (unsigned char)qq;}

View File

@@ -40,7 +40,9 @@
//M*/
#include "precomp.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/contrib/compat.hpp"
#include "opencv2/calib3d/calib3d_c.h"
#include <iostream>
using namespace cv;
@@ -359,7 +361,7 @@ void LevMarqSparse::ask_for_proj(CvMat &/*_vis*/,bool once) {
cvGetSubRect( P, &cam_mat, cvRect( 0, j * num_cam_param, 1, num_cam_param ));
CvMat measur_mat;
cvGetSubRect( hX, &measur_mat, cvRect( 0, ind * num_err_param, 1, num_err_param ));
Mat _point_mat(&point_mat), _cam_mat(&cam_mat), _measur_mat(&measur_mat);
Mat _point_mat = cv::cvarrToMat(&point_mat), _cam_mat = cv::cvarrToMat(&cam_mat), _measur_mat = cv::cvarrToMat(&measur_mat);
func( i, j, _point_mat, _cam_mat, _measur_mat, data);
assert( ind*num_err_param == ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j]);
ind+=1;
@@ -398,7 +400,7 @@ void LevMarqSparse::ask_for_projac(CvMat &/*_vis*/) //should be evaluated at p
//CvMat* Bij = B_line[j];
//CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
Mat _point_mat(&point_mat), _cam_mat(&cam_mat), _Aij(Aij), _Bij(Bij);
Mat _point_mat = cv::cvarrToMat(&point_mat), _cam_mat = cv::cvarrToMat(&cam_mat), _Aij = cv::cvarrToMat(Aij), _Bij = cv::cvarrToMat(Bij);
(*fjac)(i, j, _point_mat, _cam_mat, _Aij, _Bij, data);
}
}
@@ -989,13 +991,13 @@ static void func_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& esti
func(i,j,&_point_params,&_cam_params,&_estim,data);
};
void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points in global coordinate system (input and output)
const vector<vector<Point2d> >& imagePoints, //projections of 3d points for every camera
const vector<vector<int> >& visibility, //visibility of 3d points for every camera
vector<Mat>& cameraMatrix, //intrinsic matrices of all cameras (input and output)
vector<Mat>& R, //rotation matrices of all cameras (input and output)
vector<Mat>& T, //translation vector of all cameras (input and output)
vector<Mat>& distCoeffs, //distortion coefficients of all cameras (input and output)
void LevMarqSparse::bundleAdjust( std::vector<Point3d>& points, //positions of points in global coordinate system (input and output)
const std::vector<std::vector<Point2d> >& imagePoints, //projections of 3d points for every camera
const std::vector<std::vector<int> >& visibility, //visibility of 3d points for every camera
std::vector<Mat>& cameraMatrix, //intrinsic matrices of all cameras (input and output)
std::vector<Mat>& R, //rotation matrices of all cameras (input and output)
std::vector<Mat>& T, //translation vector of all cameras (input and output)
std::vector<Mat>& distCoeffs, //distortion coefficients of all cameras (input and output)
const TermCriteria& criteria,
BundleAdjustCallback cb, void* user_data) {
//,enum{MOTION_AND_STRUCTURE,MOTION,STRUCTURE})
@@ -1100,16 +1102,17 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
}
//fill camera params
//R.clear();T.clear();cameraMatrix.clear();
Mat levmarP = cv::cvarrToMat(levmar.P);
for( int i = 0; i < num_cameras; i++ ) {
//rotation
Mat rot_vec = Mat(levmar.P).rowRange(i*num_cam_param, i*num_cam_param+3);
Mat rot_vec = levmarP.rowRange(i*num_cam_param, i*num_cam_param+3);
Rodrigues( rot_vec, R[i] );
//translation
T[i] = Mat(levmar.P).rowRange(i*num_cam_param + 3, i*num_cam_param+6);
levmarP.rowRange(i*num_cam_param + 3, i*num_cam_param+6).copyTo(T[i]);
//intrinsic camera matrix
double* intr_data = (double*)cameraMatrix[i].data;
double* intr = (double*)(Mat(levmar.P).data + Mat(levmar.P).step * (i*num_cam_param+6));
double* intr = (double*)(levmarP.data +levmarP.step * (i*num_cam_param+6));
//focals
intr_data[0] = intr[0]; //fx
intr_data[4] = intr[1]; //fy
@@ -1119,7 +1122,7 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
//add distortion if exists
if( distCoeffs.size() ) {
Mat(levmar.P).rowRange(i*num_cam_param + 10, i*num_cam_param+10+numdist).copyTo(distCoeffs[i]);
levmarP.rowRange(i*num_cam_param + 10, i*num_cam_param+10+numdist).copyTo(distCoeffs[i]);
}
}
}

View File

@@ -180,7 +180,7 @@ void BasicRetinaFilter::setLPfilterParameters(const float beta, const float tau,
}
float _temp = (1.0f+_beta)/(2.0f*_mu*_alpha);
float a = _filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
float a = _filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)std::sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
_filteringCoeficientsTable[1+tableOffset]=(1.0f-a)*(1.0f-a)*(1.0f-a)*(1.0f-a)/(1.0f+_beta);
_filteringCoeficientsTable[2+tableOffset] =tau;
@@ -210,17 +210,17 @@ void BasicRetinaFilter::setProgressiveFilterConstants_CentredAccuracy(const floa
float _alpha=0.8f;
float _temp = (1.0f+_beta)/(2.0f*_mu*_alpha);
float a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
float a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)std::sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
_filteringCoeficientsTable[tableOffset+1]=(1.0f-a)*(1.0f-a)*(1.0f-a)*(1.0f-a)/(1.0f+_beta);
_filteringCoeficientsTable[tableOffset+2] =tau;
float commonFactor=alpha0/(float)sqrt(_halfNBcolumns*_halfNBcolumns+_halfNBrows*_halfNBrows+1.0f);
float commonFactor=alpha0/(float)std::sqrt(_halfNBcolumns*_halfNBcolumns+_halfNBrows*_halfNBrows+1.0f);
//memset(_progressiveSpatialConstant, 255, _filterOutput.getNBpixels());
for (unsigned int idColumn=0;idColumn<_halfNBcolumns; ++idColumn)
for (unsigned int idRow=0;idRow<_halfNBrows; ++idRow)
{
// computing local spatial constant
float localSpatialConstantValue=commonFactor*sqrt((float)(idColumn*idColumn)+(float)(idRow*idRow));
float localSpatialConstantValue=commonFactor*std::sqrt((float)(idColumn*idColumn)+(float)(idRow*idRow));
if (localSpatialConstantValue>1.0f)
localSpatialConstantValue=1.0f;
@@ -236,7 +236,7 @@ void BasicRetinaFilter::setProgressiveFilterConstants_CentredAccuracy(const floa
_progressiveGain[_halfNBcolumns-1+idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1-idRow)]=localGain;
_progressiveGain[_halfNBcolumns-1-idColumn+_filterOutput.getNBcolumns()*(_halfNBrows-1-idRow)]=localGain;
//std::cout<<commonFactor<<", "<<sqrt((_halfNBcolumns-1-idColumn)+(_halfNBrows-idRow-1))<<", "<<(_halfNBcolumns-1-idColumn)<<", "<<(_halfNBrows-idRow-1)<<", "<<localSpatialConstantValue<<std::endl;
//std::cout<<commonFactor<<", "<<std::sqrt((_halfNBcolumns-1-idColumn)+(_halfNBrows-idRow-1))<<", "<<(_halfNBcolumns-1-idColumn)<<", "<<(_halfNBrows-idRow-1)<<", "<<localSpatialConstantValue<<std::endl;
}
}
@@ -266,7 +266,7 @@ void BasicRetinaFilter::setProgressiveFilterConstants_CustomAccuracy(const float
}
unsigned int tableOffset=filterIndex*3;
float _temp = (1.0f+_beta)/(2.0f*_mu*_alpha);
float a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
float a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)std::sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
_filteringCoeficientsTable[tableOffset+1]=(1.0f-a)*(1.0f-a)*(1.0f-a)*(1.0f-a)/(1.0f+_beta);
_filteringCoeficientsTable[tableOffset+2] =tau;
@@ -286,7 +286,7 @@ void BasicRetinaFilter::setProgressiveFilterConstants_CustomAccuracy(const float
float localGain=(1.0f-localSpatialConstantValue)*(1.0f-localSpatialConstantValue)*(1.0f-localSpatialConstantValue)*(1.0f-localSpatialConstantValue)/(1.0f+_beta);
_progressiveGain[index]=localGain;
//std::cout<<commonFactor<<", "<<sqrt((_halfNBcolumns-1-idColumn)+(_halfNBrows-idRow-1))<<", "<<(_halfNBcolumns-1-idColumn)<<", "<<(_halfNBrows-idRow-1)<<", "<<localSpatialConstantValue<<std::endl;
//std::cout<<commonFactor<<", "<<std::sqrt((_halfNBcolumns-1-idColumn)+(_halfNBrows-idRow-1))<<", "<<(_halfNBcolumns-1-idColumn)<<", "<<(_halfNBrows-idRow-1)<<", "<<localSpatialConstantValue<<std::endl;
}
}

View File

@@ -112,7 +112,6 @@
//#define __BASIC_RETINA_ELEMENT_DEBUG
//using namespace std;
namespace cv
{
class BasicRetinaFilter

View File

@@ -90,7 +90,7 @@ Mat BOWMSCTrainer::cluster(const Mat& _descriptors) const {
Mat icovar = Mat::eye(_descriptors.cols,_descriptors.cols,_descriptors.type());
vector<Mat> initialCentres;
std::vector<Mat> initialCentres;
initialCentres.push_back(_descriptors.row(0));
for (int i = 1; i < _descriptors.rows; i++) {
double minDist = DBL_MAX;

View File

@@ -46,7 +46,7 @@
#include "precomp.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_HIGHGUI
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/highgui.hpp"
#endif
#include <iostream>
#include <queue>
@@ -54,8 +54,6 @@
namespace cv
{
using std::queue;
typedef std::pair<int,int> coordinate_t;
typedef float orientation_t;
typedef std::vector<coordinate_t> template_coords_t;
@@ -144,7 +142,7 @@ private:
LocationScaleImageRange(const std::vector<Point>& locations, const std::vector<float>& _scales) :
locations_(locations), scales_(_scales)
{
assert(locations.size()==_scales.size());
CV_Assert(locations.size()==_scales.size());
}
ImageIterator* iterator() const
@@ -395,7 +393,7 @@ private:
LocationScaleImageIterator(const std::vector<Point>& locations, const std::vector<float>& _scales) :
locations_(locations), scales_(_scales)
{
assert(locations.size()==_scales.size());
CV_Assert(locations.size()==_scales.size());
reset();
}
@@ -622,10 +620,9 @@ void ChamferMatcher::Matching::followContour(Mat& templ_img, template_coords_t&
{
const int dir[][2] = { {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
coordinate_t next;
coordinate_t next_temp;
unsigned char ptr;
assert (direction==-1 || !coords.empty());
CV_Assert (direction==-1 || !coords.empty());
coordinate_t crt = coords.back();
@@ -767,8 +764,8 @@ void ChamferMatcher::Matching::findContourOrientations(const template_coords_t&
}
// get the middle two angles
nth_element(angles.begin(), angles.begin()+M-1, angles.end());
nth_element(angles.begin()+M-1, angles.begin()+M, angles.end());
std::nth_element(angles.begin(), angles.begin()+M-1, angles.end());
std::nth_element(angles.begin()+M-1, angles.begin()+M, angles.end());
// sort(angles.begin(), angles.end());
// average them to compute tangent
@@ -825,7 +822,7 @@ ChamferMatcher::Template::Template(Mat& edge_image, float scale_) : addr_width(-
}
vector<int>& ChamferMatcher::Template::getTemplateAddresses(int width)
std::vector<int>& ChamferMatcher::Template::getTemplateAddresses(int width)
{
if (addr_width!=width) {
addr.resize(coords.size());
@@ -906,19 +903,18 @@ void ChamferMatcher::Template::show() const
p2.x = x + pad*(int)(sin(orientations[i])*100)/100;
p2.y = y + pad*(int)(cos(orientations[i])*100)/100;
line(templ_color, p1,p2, CV_RGB(255,0,0));
line(templ_color, p1,p2, Scalar(255,0,0));
}
}
circle(templ_color,Point(center.x + pad, center.y + pad),1,CV_RGB(0,255,0));
circle(templ_color,Point(center.x + pad, center.y + pad),1,Scalar(0,255,0));
#ifdef HAVE_OPENCV_HIGHGUI
namedWindow("templ",1);
imshow("templ",templ_color);
cvWaitKey(0);
waitKey();
#else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support");
CV_Error(Error::StsNotImplemented, "OpenCV has been compiled without GUI support");
#endif
templ_color.release();
@@ -931,15 +927,13 @@ void ChamferMatcher::Template::show() const
void ChamferMatcher::Matching::addTemplateFromImage(Mat& templ, float scale)
{
Template* cmt = new Template(templ, scale);
if(templates.size() > 0)
templates.clear();
templates.clear();
templates.push_back(cmt);
cmt->show();
}
void ChamferMatcher::Matching::addTemplate(Template& template_){
if(templates.size() > 0)
templates.clear();
templates.clear();
templates.push_back(&template_);
}
/**
@@ -1065,7 +1059,7 @@ void ChamferMatcher::Matching::fillNonContourOrientations(Mat& annotated_img, Ma
int cols = annotated_img.cols;
int rows = annotated_img.rows;
assert(orientation_img.cols==cols && orientation_img.rows==rows);
CV_Assert(orientation_img.cols==cols && orientation_img.rows==rows);
for (int y=0;y<rows;++y) {
for (int x=0;x<cols;++x) {
@@ -1285,7 +1279,7 @@ void ChamferMatcher::showMatch(Mat& img, int index)
std::cout << "Index too big.\n" << std::endl;
}
assert(img.channels()==3);
CV_Assert(img.channels()==3);
Match match = matches[index];
@@ -1304,7 +1298,7 @@ void ChamferMatcher::showMatch(Mat& img, int index)
void ChamferMatcher::showMatch(Mat& img, Match match)
{
assert(img.channels()==3);
CV_Assert(img.channels()==3);
const template_coords_t& templ_coords = match.tpl->coords;
for (size_t i=0;i<templ_coords.size();++i) {

View File

@@ -73,7 +73,7 @@ void ChowLiuTree::add(const Mat& imgDescriptor) {
}
void ChowLiuTree::add(const vector<Mat>& _imgDescriptors) {
void ChowLiuTree::add(const std::vector<Mat>& _imgDescriptors) {
for (size_t i = 0; i < _imgDescriptors.size(); i++) {
add(_imgDescriptors[i]);
}
@@ -164,10 +164,10 @@ cv::Mat ChowLiuTree::buildTree(int root_word, std::list<info> &edges) {
//independence from a parent node.
//find all children and do the same
vector<int> nextqs = extractChildren(edges, q);
std::vector<int> nextqs = extractChildren(edges, q);
int pq = q;
vector<int>::iterator nextq;
std::vector<int>::iterator nextq;
for(nextq = nextqs.begin(); nextq != nextqs.end(); nextq++) {
recAddToTree(cltree, *nextq, pq, edges);
}
@@ -186,16 +186,16 @@ void ChowLiuTree::recAddToTree(cv::Mat &cltree, int q, int pq,
cltree.at<double>(3, q) = CP(q, true, pq, false);
//find all children and do the same
vector<int> nextqs = extractChildren(remaining_edges, q);
std::vector<int> nextqs = extractChildren(remaining_edges, q);
pq = q;
vector<int>::iterator nextq;
std::vector<int>::iterator nextq;
for(nextq = nextqs.begin(); nextq != nextqs.end(); nextq++) {
recAddToTree(cltree, *nextq, pq, remaining_edges);
}
}
vector<int> ChowLiuTree::extractChildren(std::list<info> &remaining_edges, int q) {
std::vector<int> ChowLiuTree::extractChildren(std::list<info> &remaining_edges, int q) {
std::vector<int> children;
std::list<info>::iterator edge = remaining_edges.begin();
@@ -225,16 +225,16 @@ double ChowLiuTree::calcMutInfo(int word1, int word2) {
double accumulation = 0;
double P00 = JP(word1, false, word2, false);
if(P00) accumulation += P00 * log(P00 / (P(word1, false)*P(word2, false)));
if(P00) accumulation += P00 * std::log(P00 / (P(word1, false)*P(word2, false)));
double P01 = JP(word1, false, word2, true);
if(P01) accumulation += P01 * log(P01 / (P(word1, false)*P(word2, true)));
if(P01) accumulation += P01 * std::log(P01 / (P(word1, false)*P(word2, true)));
double P10 = JP(word1, true, word2, false);
if(P10) accumulation += P10 * log(P10 / (P(word1, true)*P(word2, false)));
if(P10) accumulation += P10 * std::log(P10 / (P(word1, true)*P(word2, false)));
double P11 = JP(word1, true, word2, true);
if(P11) accumulation += P11 * log(P11 / (P(word1, true)*P(word2, true)));
if(P11) accumulation += P11 * std::log(P11 / (P(word1, true)*P(word2, true)));
return accumulation;
}

View File

@@ -40,9 +40,9 @@ static Mat linspace(float x0, float x1, int n)
static void sortMatrixRowsByIndices(InputArray _src, InputArray _indices, OutputArray _dst)
{
if(_indices.getMat().type() != CV_32SC1)
CV_Error(CV_StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
CV_Error(Error::StsUnsupportedFormat, "cv::sortRowsByIndices only works on integer indices!");
Mat src = _src.getMat();
vector<int> indices = _indices.getMat();
std::vector<int> indices = _indices.getMat();
_dst.create(src.rows, src.cols, src.type());
Mat dst = _dst.getMat();
for(size_t idx = 0; idx < indices.size(); idx++) {
@@ -64,8 +64,8 @@ static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
if (src.rows != 1 && src.cols != 1)
CV_Error(CV_StsBadArg, "cv::argsort only sorts 1D matrices.");
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
CV_Error(Error::StsBadArg, "cv::argsort only sorts 1D matrices.");
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
Mat sorted_indices;
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
@@ -76,7 +76,7 @@ Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
{
int n = XI.rows;
// sort input table
vector<int> sort_indices = argsort(X_);
std::vector<int> sort_indices = argsort(X_);
Mat X = sortMatrixRowsByIndices(X_,sort_indices);
Mat Y = sortMatrixRowsByIndices(Y_,sort_indices);
@@ -116,8 +116,8 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
Mat Y = _Y.getMat();
Mat xi = _xi.getMat();
// check types & alignment
assert((x.type() == Y.type()) && (Y.type() == xi.type()));
assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
CV_Assert((x.type() == Y.type()) && (Y.type() == xi.type()));
CV_Assert((x.cols == 1) && (x.rows == Y.rows) && (x.cols == Y.cols));
// call templated interp1
switch(x.type()) {
case CV_8SC1: return interp1_<char>(x,Y,xi); break;
@@ -127,7 +127,7 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
case CV_32SC1: return interp1_<int>(x,Y,xi); break;
case CV_32FC1: return interp1_<float>(x,Y,xi); break;
case CV_64FC1: return interp1_<double>(x,Y,xi); break;
default: CV_Error(CV_StsUnsupportedFormat, ""); break;
default: CV_Error(Error::StsUnsupportedFormat, ""); break;
}
return Mat();
}
@@ -473,7 +473,7 @@ namespace colormap
void ColorMap::operator()(InputArray _src, OutputArray _dst) const
{
if(_lut.total() != 256)
CV_Error(CV_StsAssert, "cv::LUT only supports tables of size 256.");
CV_Error(Error::StsAssert, "cv::LUT only supports tables of size 256.");
Mat src = _src.getMat();
// Return original matrix if wrong type is given (is fail loud better here?)
if(src.type() != CV_8UC1 && src.type() != CV_8UC3)
@@ -483,8 +483,8 @@ namespace colormap
}
// Turn into a BGR matrix into its grayscale representation.
if(src.type() == CV_8UC3)
cvtColor(src.clone(), src, CV_BGR2GRAY);
cvtColor(src.clone(), src, CV_GRAY2BGR);
cvtColor(src.clone(), src, COLOR_BGR2GRAY);
cvtColor(src.clone(), src, COLOR_GRAY2BGR);
// Apply the ColorMap.
LUT(src, _lut, _dst);
}
@@ -521,7 +521,7 @@ namespace colormap
colormap == COLORMAP_WINTER ? (colormap::ColorMap*)(new colormap::Winter) : 0;
if( !cm )
CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
CV_Error( Error::StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
(*cm)(src, dst);

View File

@@ -43,7 +43,6 @@
#include "opencv2/contrib/hybridtracker.hpp"
using namespace cv;
using namespace std;
CvMeanShiftTracker::CvMeanShiftTracker(CvMeanShiftTrackerParams _params) : params(_params)
{
@@ -61,7 +60,7 @@ void CvMeanShiftTracker::newTrackingWindow(Mat image, Rect selection)
float srange[] = { 0, 1 };
const float* ranges[] = {hrange, srange};
cvtColor(image, hsv, CV_BGR2HSV);
cvtColor(image, hsv, COLOR_BGR2HSV);
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
hue.create(hsv.size(), CV_8UC2);
@@ -84,7 +83,7 @@ RotatedRect CvMeanShiftTracker::updateTrackingWindow(Mat image)
float srange[] = { 0, 1 };
const float* ranges[] = {hrange, srange};
cvtColor(image, hsv, CV_BGR2HSV);
cvtColor(image, hsv, COLOR_BGR2HSV);
inRange(hsv, Scalar(0, 30, MIN(10, 256)), Scalar(180, 256, MAX(10, 256)), mask);
hue.create(hsv.size(), CV_8UC2);
mixChannels(&hsv, 1, &hue, 1, channels, 2);

View File

@@ -1,5 +1,8 @@
#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
#include "opencv2/contrib/detection_based_tracker.hpp"
#include "opencv2/core/utility.hpp"
#include <pthread.h>
#if defined(DEBUG) || defined(_DEBUG)
#undef DEBUGLOGS
@@ -12,7 +15,6 @@
#ifdef ANDROID
#include <android/log.h>
#include <pthread.h>
#define LOG_TAG "OBJECT_DETECTOR"
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define LOGI0(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
@@ -42,7 +44,6 @@
using namespace cv;
using namespace std;
static inline cv::Point2f centerRect(const cv::Rect& r)
{
@@ -70,7 +71,7 @@ class cv::DetectionBasedTracker::SeparateDetectionWork
public:
SeparateDetectionWork(cv::DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector);
virtual ~SeparateDetectionWork();
bool communicateWithDetectingThread(const Mat& imageGray, vector<Rect>& rectsWhereRegions);
bool communicateWithDetectingThread(const Mat& imageGray, std::vector<Rect>& rectsWhereRegions);
bool run();
void stop();
void resetTracking();
@@ -226,7 +227,7 @@ void cv::DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
{
static double freq = getTickFrequency();
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start");
vector<Rect> objects;
std::vector<Rect> objects;
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
pthread_mutex_lock(&mutex);
@@ -384,7 +385,7 @@ void cv::DetectionBasedTracker::SeparateDetectionWork::resetTracking()
}
bool cv::DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, vector<Rect>& rectsWhereRegions)
bool cv::DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, std::vector<Rect>& rectsWhereRegions)
{
static double freq = getTickFrequency();
@@ -498,7 +499,7 @@ void DetectionBasedTracker::process(const Mat& imageGray)
Mat imageDetect=imageGray;
vector<Rect> rectsWhereRegions;
std::vector<Rect> rectsWhereRegions;
bool shouldHandleResult=false;
if (!separateDetectionWork.empty()) {
shouldHandleResult = separateDetectionWork->communicateWithDetectingThread(imageGray, rectsWhereRegions);
@@ -534,7 +535,7 @@ void DetectionBasedTracker::process(const Mat& imageGray)
}
LOGI("DetectionBasedTracker::process: tracked objects num==%d", (int)trackedObjects.size());
vector<Rect> detectedObjectsInRegions;
std::vector<Rect> detectedObjectsInRegions;
LOGD("DetectionBasedTracker::process: rectsWhereRegions.size()=%d", (int)rectsWhereRegions.size());
for(size_t i=0; i < rectsWhereRegions.size(); i++) {
@@ -609,7 +610,7 @@ void cv::DetectionBasedTracker::resetTracking()
trackedObjects.clear();
}
void cv::DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObjects)
void cv::DetectionBasedTracker::updateTrackedObjects(const std::vector<Rect>& detectedObjects)
{
enum {
NEW_RECTANGLE=-1,
@@ -624,7 +625,7 @@ void cv::DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detecte
trackedObjects[i].numDetectedFrames++;
}
vector<int> correspondence(detectedObjects.size(), NEW_RECTANGLE);
std::vector<int> correspondence(detectedObjects.size(), NEW_RECTANGLE);
correspondence.clear();
correspondence.resize(detectedObjects.size(), NEW_RECTANGLE);
@@ -830,7 +831,7 @@ Rect cv::DetectionBasedTracker::calcTrackedObjectPositionToShow(int i, ObjectSta
return res;
}
void cv::DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, vector<Rect>& detectedObjectsInRegions)
void cv::DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, std::vector<Rect>& detectedObjectsInRegions)
{
Rect r0(Point(), img.size());
Rect r1 = scale_rect(r, innerParameters.coeffTrackingWindowSize);
@@ -843,7 +844,7 @@ void cv::DetectionBasedTracker::detectInRegion(const Mat& img, const Rect& r, ve
int d = cvRound(std::min(r.width, r.height) * innerParameters.coeffObjectSizeToTrack);
vector<Rect> tmpobjects;
std::vector<Rect> tmpobjects;
Mat img1(img, r1);//subimage for rectangle -- without data copying
LOGD("DetectionBasedTracker::detectInRegion: img1.size()=%d x %d, d=%d",

View File

@@ -21,11 +21,9 @@
namespace cv
{
using std::set;
// Reads a sequence from a FileNode::SEQ with type _Tp into a result vector.
template<typename _Tp>
inline void readFileNodeList(const FileNode& fn, vector<_Tp>& result) {
inline void readFileNodeList(const FileNode& fn, std::vector<_Tp>& result) {
if (fn.type() == FileNode::SEQ) {
for (FileNodeIterator it = fn.begin(); it != fn.end();) {
_Tp item;
@@ -37,10 +35,10 @@ inline void readFileNodeList(const FileNode& fn, vector<_Tp>& result) {
// Writes the a list of given items to a cv::FileStorage.
template<typename _Tp>
inline void writeFileNodeList(FileStorage& fs, const string& name,
const vector<_Tp>& items) {
inline void writeFileNodeList(FileStorage& fs, const String& name,
const std::vector<_Tp>& items) {
// typedefs
typedef typename vector<_Tp>::const_iterator constVecIterator;
typedef typename std::vector<_Tp>::const_iterator constVecIterator;
// write the elements in item to fs
fs << name << "[";
for (constVecIterator it = items.begin(); it != items.end(); ++it) {
@@ -52,8 +50,8 @@ inline void writeFileNodeList(FileStorage& fs, const string& name,
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
// make sure the input data is a vector of matrices or vector of vector
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
string error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
CV_Error(Error::StsBadArg, error_message);
}
// number of samples
size_t n = src.total();
@@ -68,8 +66,8 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
for(unsigned int i = 0; i < n; i++) {
// make sure data can be reshaped, throw exception if not!
if(src.getMat(i).total() != d) {
string error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, d, src.getMat(i).total());
CV_Error(Error::StsBadArg, error_message);
}
// get a hold of the current row
Mat xi = data.row(i);
@@ -86,13 +84,13 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
// Removes duplicate elements in a given vector.
template<typename _Tp>
inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
typedef typename set<_Tp>::const_iterator constSetIterator;
typedef typename vector<_Tp>::const_iterator constVecIterator;
set<_Tp> set_elems;
inline std::vector<_Tp> remove_dups(const std::vector<_Tp>& src) {
typedef typename std::set<_Tp>::const_iterator constSetIterator;
typedef typename std::vector<_Tp>::const_iterator constVecIterator;
std::set<_Tp> set_elems;
for (constVecIterator it = src.begin(); it != src.end(); ++it)
set_elems.insert(*it);
vector<_Tp> elems;
std::vector<_Tp> elems;
for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
elems.push_back(*it);
return elems;
@@ -106,7 +104,7 @@ class Eigenfaces : public FaceRecognizer
private:
int _num_components;
double _threshold;
vector<Mat> _projections;
std::vector<Mat> _projections;
Mat _labels;
Mat _eigenvectors;
Mat _eigenvalues;
@@ -162,7 +160,7 @@ private:
Mat _eigenvectors;
Mat _eigenvalues;
Mat _mean;
vector<Mat> _projections;
std::vector<Mat> _projections;
Mat _labels;
public:
@@ -220,7 +218,7 @@ private:
int _neighbors;
double _threshold;
vector<Mat> _histograms;
std::vector<Mat> _histograms;
Mat _labels;
// Computes a LBPH model with images in src and
@@ -300,23 +298,29 @@ public:
//------------------------------------------------------------------------------
// FaceRecognizer
//------------------------------------------------------------------------------
void FaceRecognizer::update(InputArrayOfArrays, InputArray) {
string error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str());
CV_Error(CV_StsNotImplemented, error_msg);
void FaceRecognizer::update(InputArrayOfArrays src, InputArray labels ) {
if( dynamic_cast<LBPH*>(this) != 0 )
{
dynamic_cast<LBPH*>(this)->update( src, labels );
return;
}
String error_msg = format("This FaceRecognizer (%s) does not support updating, you have to use FaceRecognizer::train to update it.", this->name().c_str());
CV_Error(Error::StsNotImplemented, error_msg);
}
void FaceRecognizer::save(const string& filename) const {
void FaceRecognizer::save(const String& filename) const {
FileStorage fs(filename, FileStorage::WRITE);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
this->save(fs);
fs.release();
}
void FaceRecognizer::load(const string& filename) {
void FaceRecognizer::load(const String& filename) {
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
this->load(fs);
fs.release();
}
@@ -326,18 +330,18 @@ void FaceRecognizer::load(const string& filename) {
//------------------------------------------------------------------------------
void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
if(_src.total() == 0) {
string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(Error::StsBadArg, error_message);
} else if(_local_labels.getMat().type() != CV_32SC1) {
string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _local_labels.type());
CV_Error(Error::StsBadArg, error_message);
}
// make sure data has correct size
if(_src.total() > 1) {
for(int i = 1; i < static_cast<int>(_src.total()); i++) {
if(_src.getMat(i-1).total() != _src.getMat(i).total()) {
string error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total());
CV_Error(CV_StsUnsupportedFormat, error_message);
String error_message = format("In the Eigenfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", _src.getMat(i-1).total(), _src.getMat(i).total());
CV_Error(Error::StsUnsupportedFormat, error_message);
}
}
}
@@ -350,8 +354,8 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
int n = data.rows;
// assert there are as much samples as labels
if(static_cast<int>(labels.total()) != n) {
string error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", n, labels.total());
CV_Error(Error::StsBadArg, error_message);
}
// clear existing model data
_labels.release();
@@ -361,7 +365,7 @@ void Eigenfaces::train(InputArrayOfArrays _src, InputArray _local_labels) {
_num_components = n;
// perform the PCA
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, _num_components);
PCA pca(data, Mat(), PCA::DATA_AS_ROW, _num_components);
// copy the PCA results
_mean = pca.mean.reshape(1,1); // store the mean vector
_eigenvalues = pca.eigenvalues.clone(); // eigenvalues by row
@@ -381,12 +385,12 @@ void Eigenfaces::predict(InputArray _src, int &minClass, double &minDist) const
// make sure the user is passing correct data
if(_projections.empty()) {
// throw error if no data (or simply return -1?)
string error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?";
CV_Error(CV_StsError, error_message);
String error_message = "This Eigenfaces model is not computed yet. Did you call Eigenfaces::train?";
CV_Error(Error::StsError, error_message);
} else if(_eigenvectors.rows != static_cast<int>(src.total())) {
// check data alignment just for clearer exception messages
string error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(Error::StsBadArg, error_message);
}
// project into PCA subspace
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
@@ -435,18 +439,18 @@ void Eigenfaces::save(FileStorage& fs) const {
//------------------------------------------------------------------------------
void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
if(src.total() == 0) {
string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(Error::StsBadArg, error_message);
} else if(_lbls.getMat().type() != CV_32SC1) {
string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _lbls.type());
CV_Error(Error::StsBadArg, error_message);
}
// make sure data has correct size
if(src.total() > 1) {
for(int i = 1; i < static_cast<int>(src.total()); i++) {
if(src.getMat(i-1).total() != src.getMat(i).total()) {
string error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
CV_Error(CV_StsUnsupportedFormat, error_message);
String error_message = format("In the Fisherfaces method all input samples (training images) must be of equal size! Expected %d pixels, but was %d pixels.", src.getMat(i-1).total(), src.getMat(i).total());
CV_Error(Error::StsUnsupportedFormat, error_message);
}
}
}
@@ -457,17 +461,17 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
int N = data.rows;
// make sure labels are passed in correct shape
if(labels.total() != (size_t) N) {
string error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("The number of samples (src) must equal the number of labels (labels)! len(src)=%d, len(labels)=%d.", N, labels.total());
CV_Error(Error::StsBadArg, error_message);
} else if(labels.rows != 1 && labels.cols != 1) {
string error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Expected the labels in a matrix with one row or column! Given dimensions are rows=%s, cols=%d.", labels.rows, labels.cols);
CV_Error(Error::StsBadArg, error_message);
}
// clear existing model data
_labels.release();
_projections.clear();
// safely copy from cv::Mat to std::vector
vector<int> ll;
std::vector<int> ll;
for(unsigned int i = 0; i < labels.total(); i++) {
ll.push_back(labels.at<int>(i));
}
@@ -477,7 +481,7 @@ void Fisherfaces::train(InputArrayOfArrays src, InputArray _lbls) {
if((_num_components <= 0) || (_num_components > (C-1)))
_num_components = (C-1);
// perform a PCA and keep (N-C) components
PCA pca(data, Mat(), CV_PCA_DATA_AS_ROW, (N-C));
PCA pca(data, Mat(), PCA::DATA_AS_ROW, (N-C));
// project the data and perform a LDA on it
LDA lda(pca.project(data),labels, _num_components);
// store the total mean vector
@@ -501,11 +505,11 @@ void Fisherfaces::predict(InputArray _src, int &minClass, double &minDist) const
// check data alignment just for clearer exception messages
if(_projections.empty()) {
// throw error if no data (or simply return -1?)
string error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
CV_Error(CV_StsBadArg, error_message);
String error_message = "This Fisherfaces model is not computed yet. Did you call Fisherfaces::train?";
CV_Error(Error::StsBadArg, error_message);
} else if(src.total() != (size_t) _eigenvectors.rows) {
string error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong input image size. Reason: Training and Test images must be of equal size! Expected an image with %d elements, but got %d.", _eigenvectors.rows, src.total());
CV_Error(Error::StsBadArg, error_message);
}
// project into LDA subspace
Mat q = subspaceProject(_eigenvectors, _mean, src.reshape(1,1));
@@ -636,8 +640,8 @@ static void elbp(InputArray src, OutputArray dst, int radius, int neighbors)
case CV_32FC1: elbp_<float>(src,dst, radius, neighbors); break;
case CV_64FC1: elbp_<double>(src,dst, radius, neighbors); break;
default:
string error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
CV_Error(CV_StsNotImplemented, error_msg);
String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
CV_Error(Error::StsNotImplemented, error_msg);
break;
}
}
@@ -683,7 +687,7 @@ static Mat histc(InputArray _src, int minVal, int maxVal, bool normed)
return histc_(src, minVal, maxVal, normed);
break;
default:
CV_Error(CV_StsUnmatchedFormats, "This type is not implemented yet."); break;
CV_Error(Error::StsUnmatchedFormats, "This type is not implemented yet."); break;
}
return Mat();
}
@@ -764,25 +768,25 @@ void LBPH::update(InputArrayOfArrays _in_src, InputArray _in_labels) {
void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserveData) {
if(_in_src.kind() != _InputArray::STD_VECTOR_MAT && _in_src.kind() != _InputArray::STD_VECTOR_VECTOR) {
string error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
String error_message = "The images are expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
CV_Error(Error::StsBadArg, error_message);
}
if(_in_src.total() == 0) {
string error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(CV_StsUnsupportedFormat, error_message);
String error_message = format("Empty training data was given. You'll need more than one sample to learn a model.");
CV_Error(Error::StsUnsupportedFormat, error_message);
} else if(_in_labels.getMat().type() != CV_32SC1) {
string error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
CV_Error(CV_StsUnsupportedFormat, error_message);
String error_message = format("Labels must be given as integer (CV_32SC1). Expected %d, but was %d.", CV_32SC1, _in_labels.type());
CV_Error(Error::StsUnsupportedFormat, error_message);
}
// get the vector of matrices
vector<Mat> src;
std::vector<Mat> src;
_in_src.getMatVector(src);
// get the label matrix
Mat labels = _in_labels.getMat();
// check if data is well- aligned
if(labels.total() != src.size()) {
string error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("The number of samples (src) must equal the number of labels (labels). Was len(samples)=%d, len(labels)=%d.", src.size(), _labels.total());
CV_Error(Error::StsBadArg, error_message);
}
// if this model should be trained without preserving old data, delete old model data
if(!preserveData) {
@@ -812,8 +816,8 @@ void LBPH::train(InputArrayOfArrays _in_src, InputArray _in_labels, bool preserv
void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
if(_histograms.empty()) {
// throw error if no data (or simply return -1?)
string error_message = "This LBPH model is not computed yet. Did you call the train method?";
CV_Error(CV_StsBadArg, error_message);
String error_message = "This LBPH model is not computed yet. Did you call the train method?";
CV_Error(Error::StsBadArg, error_message);
}
Mat src = _src.getMat();
// get the spatial histogram from input image
@@ -828,7 +832,7 @@ void LBPH::predict(InputArray _src, int &minClass, double &minDist) const {
minDist = DBL_MAX;
minClass = -1;
for(size_t sampleIdx = 0; sampleIdx < _histograms.size(); sampleIdx++) {
double dist = compareHist(_histograms[sampleIdx], query, CV_COMP_CHISQR);
double dist = compareHist(_histograms[sampleIdx], query, HISTCMP_CHISQR);
if((dist < minDist) && (dist < _threshold)) {
minDist = dist;
minClass = _labels.at<int>((int) sampleIdx);
@@ -889,7 +893,7 @@ CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
bool initModule_contrib()
{
Ptr<Algorithm> efaces = createEigenfaces(), ffaces = createFisherfaces(), lbph = createLBPH();
Ptr<Algorithm> efaces = createEigenfaces_hidden(), ffaces = createFisherfaces_hidden(), lbph = createLBPH_hidden();
return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0;
}

View File

@@ -42,7 +42,7 @@
#include "precomp.hpp"
#include <stdio.h>
#include <iostream>
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/contrib/hybridtracker.hpp"
using namespace cv;
@@ -80,7 +80,7 @@ CvFeatureTracker::~CvFeatureTracker()
void CvFeatureTracker::newTrackingWindow(Mat image, Rect selection)
{
image.copyTo(prev_image);
cvtColor(prev_image, prev_image_bw, CV_BGR2GRAY);
cvtColor(prev_image, prev_image_bw, COLOR_BGR2GRAY);
prev_trackwindow = selection;
prev_center.x = selection.x;
prev_center.y = selection.y;
@@ -98,8 +98,8 @@ Rect CvFeatureTracker::updateTrackingWindow(Mat image)
Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
{
ittr++;
vector<KeyPoint> prev_keypoints, curr_keypoints;
vector<Point2f> prev_keys, curr_keys;
std::vector<KeyPoint> prev_keypoints, curr_keypoints;
std::vector<Point2f> prev_keys, curr_keys;
Mat prev_desc, curr_desc;
Rect window = prev_trackwindow;
@@ -131,7 +131,7 @@ Rect CvFeatureTracker::updateTrackingWindowWithSIFT(Mat image)
curr_keys.push_back(curr_keypoints[matches[i].trainIdx].pt);
}
Mat T = findHomography(prev_keys, curr_keys, CV_LMEDS);
Mat T = findHomography(prev_keys, curr_keys, LMEDS);
prev_trackwindow.x += cvRound(T.at<double> (0, 2));
prev_trackwindow.y += cvRound(T.at<double> (1, 2));
@@ -148,12 +148,12 @@ Rect CvFeatureTracker::updateTrackingWindowWithFlow(Mat image)
ittr++;
Size subPixWinSize(10,10), winSize(31,31);
Mat image_bw;
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
vector<uchar> status;
vector<float> err;
TermCriteria termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.03);
std::vector<uchar> status;
std::vector<float> err;
cvtColor(image, image_bw, CV_BGR2GRAY);
cvtColor(prev_image, prev_image_bw, CV_BGR2GRAY);
cvtColor(image, image_bw, COLOR_BGR2GRAY);
cvtColor(prev_image, prev_image_bw, COLOR_BGR2GRAY);
if (ittr == 1)
{

View File

@@ -35,6 +35,7 @@
//M*/
#include "precomp.hpp"
#include "opencv2/contrib/compat.hpp"
CvFuzzyPoint::CvFuzzyPoint(double _x, double _y)
{
@@ -380,6 +381,7 @@ void CvFuzzyMeanShiftTracker::SearchWindow::initDepthValues(IplImage *maskImage,
{
if (*depthData)
{
d = *depthData;
m1 += d;
if (d < mind)
mind = d;

View File

@@ -39,7 +39,6 @@
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/core.hpp"
#include "precomp.hpp"
#include <iostream>
@@ -86,7 +85,7 @@ static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
candidatePointsMask.at<uchar>(0, maxLoc.x) = 0;
Mat minDists;
reduce( activedDists, minDists, 0, CV_REDUCE_MIN );
reduce( activedDists, minDists, 0, REDUCE_MIN );
minMaxLoc( minDists, 0, &maxVal, 0, &maxLoc, candidatePointsMask );
dst.at<Point3_<uchar> >((int)i) = src.at<Point3_<uchar> >(maxLoc.x);
}
@@ -119,7 +118,7 @@ void cv::generateColors( std::vector<Scalar>& colors, size_t count, size_t facto
// Convert the colors set to Lab space.
// Distances between colors in this space correspond a human perception.
Mat lab;
cvtColor( bgr, lab, CV_BGR2Lab);
cvtColor( bgr, lab, COLOR_BGR2Lab);
// Subsample colors from the generated set so that
// to maximize the minimum distances between each other.
@@ -129,7 +128,7 @@ void cv::generateColors( std::vector<Scalar>& colors, size_t count, size_t facto
// Convert subsampled colors back to RGB
Mat bgr_subset;
cvtColor( lab_subset, bgr_subset, CV_Lab2BGR );
cvtColor( lab_subset, bgr_subset, COLOR_Lab2BGR );
CV_Assert( bgr_subset.total() == count );
for( size_t i = 0; i < count; i++ )

View File

@@ -43,7 +43,6 @@
#include "opencv2/contrib/hybridtracker.hpp"
using namespace cv;
using namespace std;
CvHybridTrackerParams::CvHybridTrackerParams(float _ft_tracker_weight, float _ms_tracker_weight,
CvFeatureTrackerParams _ft_params,
@@ -83,7 +82,7 @@ CvHybridTracker::~CvHybridTracker() {
inline float CvHybridTracker::getL2Norm(Point2f p1, Point2f p2) {
float distance = (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y
- p2.y);
return sqrt(distance);
return std::sqrt(distance);
}
Mat CvHybridTracker::getDistanceProjection(Mat image, Point2f center) {

View File

@@ -193,7 +193,7 @@ bool ImageLogPolProjection::_initLogRetinaSampling(const double reductionFactor,
//double rlim=1.0/reductionFactor*(minDimension/2.0+samplingStrenght);
// input frame dimensions INdependent log sampling:
_azero=(1.0+reductionFactor*sqrt(samplingStrenght))/(reductionFactor*reductionFactor*samplingStrenght-1.0);
_azero=(1.0+reductionFactor*std::sqrt(samplingStrenght))/(reductionFactor*reductionFactor*samplingStrenght-1.0);
_alim=(1.0+_azero)/reductionFactor;
#ifdef IMAGELOGPOLPROJECTION_DEBUG
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: rlim= "<<rlim<<std::endl;
@@ -223,10 +223,10 @@ bool ImageLogPolProjection::_initLogRetinaSampling(const double reductionFactor,
// get the pixel position in the original picture
// -> input frame dimensions dependent log sampling:
//double scale = samplingStrenght/(rlim-(double)sqrt(idRow*idRow+idColumn*idColumn));
//double scale = samplingStrenght/(rlim-(double)std::sqrt(idRow*idRow+idColumn*idColumn));
// -> input frame dimensions INdependent log sampling:
double scale=getOriginalRadiusLength((double)sqrt((double)(idRow*idRow+idColumn*idColumn)));
double scale=getOriginalRadiusLength((double)std::sqrt((double)(idRow*idRow+idColumn*idColumn)));
#ifdef IMAGELOGPOLPROJECTION_DEBUG
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: scale= "<<scale<<std::endl;
std::cout<<"ImageLogPolProjection::initLogRetinaSampling: scale2= "<<scale2<<std::endl;
@@ -243,7 +243,7 @@ bool ImageLogPolProjection::_initLogRetinaSampling(const double reductionFactor,
// manage border effects
double length=u*u+v*v;
double radiusRatio=sqrt(rMax/length);
double radiusRatio=std::sqrt(rMax/length);
#ifdef IMAGELOGPOLPROJECTION_DEBUG
std::cout<<"ImageLogPolProjection::(inputH, inputW)="<<halfInputRows<<", "<<halfInputColumns<<", Rmax2="<<rMax<<std::endl;
@@ -362,14 +362,14 @@ bool ImageLogPolProjection::_initLogPolarCortexSampling(const double reductionFa
//std::cout<<"ImageLogPolProjection::Starting cortex projection"<<std::endl;
// compute transformation, get theta and Radius in reagrd of the output sampled pixel
double diagonalLenght=sqrt((double)(_outputNBcolumns*_outputNBcolumns+_outputNBrows*_outputNBrows));
double diagonalLenght=std::sqrt((double)(_outputNBcolumns*_outputNBcolumns+_outputNBrows*_outputNBrows));
for (unsigned int radiusIndex=0;radiusIndex<_outputNBcolumns;++radiusIndex)
for(unsigned int orientationIndex=0;orientationIndex<_outputNBrows;++orientationIndex)
{
double x=1.0+sinh(radiusAxis[radiusIndex])*cos(orientationAxis[orientationIndex]);
double y=sinh(radiusAxis[radiusIndex])*sin(orientationAxis[orientationIndex]);
// get the input picture coordinate
double R=diagonalLenght*sqrt(x*x+y*y)/(5.0+sqrt(x*x+y*y));
double R=diagonalLenght*std::sqrt(x*x+y*y)/(5.0+std::sqrt(x*x+y*y));
double theta=atan2(y,x);
// convert input polar coord into cartesian/C compatble coordinate
unsigned int columnIndex=(unsigned int)(cos(theta)*R)+halfInputColumns;

View File

@@ -1,5 +1,5 @@
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/contrib.hpp"
#ifdef WIN32
#include <windows.h>
@@ -10,11 +10,11 @@
namespace cv
{
std::vector<std::string> Directory::GetListFiles( const std::string& path, const std::string & exten, bool addPath )
std::vector<String> Directory::GetListFiles( const String& path, const String & exten, bool addPath )
{
std::vector<std::string> list;
std::vector<String> list;
list.clear();
std::string path_f = path + "/" + exten;
String path_f = path + "/" + exten;
#ifdef WIN32
WIN32_FIND_DATA FindFileData;
HANDLE hFind;
@@ -57,10 +57,10 @@ namespace cv
if (dirp->d_type == DT_REG)
{
if (exten.compare("*") == 0)
list.push_back(static_cast<std::string>(dirp->d_name));
list.push_back(static_cast<String>(dirp->d_name));
else
if (std::string(dirp->d_name).find(exten) != std::string::npos)
list.push_back(static_cast<std::string>(dirp->d_name));
if (String(dirp->d_name).find(exten) != String::npos)
list.push_back(static_cast<String>(dirp->d_name));
}
}
closedir(dp);
@@ -69,10 +69,10 @@ namespace cv
return list;
}
std::vector<std::string> Directory::GetListFolders( const std::string& path, const std::string & exten, bool addPath )
std::vector<String> Directory::GetListFolders( const String& path, const String & exten, bool addPath )
{
std::vector<std::string> list;
std::string path_f = path + "/" + exten;
std::vector<String> list;
String path_f = path + "/" + exten;
list.clear();
#ifdef WIN32
WIN32_FIND_DATA FindFileData;
@@ -117,10 +117,10 @@ namespace cv
strcmp(dirp->d_name, "..") != 0 )
{
if (exten.compare("*") == 0)
list.push_back(static_cast<std::string>(dirp->d_name));
list.push_back(static_cast<String>(dirp->d_name));
else
if (std::string(dirp->d_name).find(exten) != std::string::npos)
list.push_back(static_cast<std::string>(dirp->d_name));
if (String(dirp->d_name).find(exten) != String::npos)
list.push_back(static_cast<String>(dirp->d_name));
}
}
closedir(dp);
@@ -129,16 +129,16 @@ namespace cv
return list;
}
std::vector<std::string> Directory::GetListFilesR ( const std::string& path, const std::string & exten, bool addPath )
std::vector<String> Directory::GetListFilesR ( const String& path, const String & exten, bool addPath )
{
std::vector<std::string> list = Directory::GetListFiles(path, exten, addPath);
std::vector<String> list = Directory::GetListFiles(path, exten, addPath);
std::vector<std::string> dirs = Directory::GetListFolders(path, exten, addPath);
std::vector<String> dirs = Directory::GetListFolders(path, exten, addPath);
std::vector<std::string>::const_iterator it;
std::vector<String>::const_iterator it;
for (it = dirs.begin(); it != dirs.end(); ++it)
{
std::vector<std::string> cl = Directory::GetListFiles(*it, exten, addPath);
std::vector<String> cl = Directory::GetListFiles(*it, exten, addPath);
list.insert(list.end(), cl.begin(), cl.end());
}

View File

@@ -24,20 +24,15 @@
namespace cv
{
using std::map;
using std::set;
using std::cout;
using std::endl;
// Removes duplicate elements in a given vector.
template<typename _Tp>
inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
typedef typename set<_Tp>::const_iterator constSetIterator;
typedef typename vector<_Tp>::const_iterator constVecIterator;
set<_Tp> set_elems;
inline std::vector<_Tp> remove_dups(const std::vector<_Tp>& src) {
typedef typename std::set<_Tp>::const_iterator constSetIterator;
typedef typename std::vector<_Tp>::const_iterator constVecIterator;
std::set<_Tp> set_elems;
for (constVecIterator it = src.begin(); it != src.end(); ++it)
set_elems.insert(*it);
vector<_Tp> elems;
std::vector<_Tp> elems;
for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
elems.push_back(*it);
return elems;
@@ -47,10 +42,10 @@ static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
if (src.rows != 1 && src.cols != 1) {
string error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
CV_Error(CV_StsBadArg, error_message);
String error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
CV_Error(Error::StsBadArg, error_message);
}
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
Mat sorted_indices;
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
@@ -59,8 +54,8 @@ static Mat argsort(InputArray _src, bool ascending=true)
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
// make sure the input data is a vector of matrices or vector of vector
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
string error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
CV_Error(CV_StsBadArg, error_message);
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
CV_Error(Error::StsBadArg, error_message);
}
// number of samples
size_t n = src.total();
@@ -75,8 +70,8 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
for(int i = 0; i < (int)n; i++) {
// make sure data can be reshaped, throw exception if not!
if(src.getMat(i).total() != d) {
string error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
CV_Error(Error::StsBadArg, error_message);
}
// get a hold of the current row
Mat xi = data.row(i);
@@ -92,10 +87,10 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
if(_indices.getMat().type() != CV_32SC1) {
CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
CV_Error(Error::StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
}
Mat src = _src.getMat();
vector<int> indices = _indices.getMat();
std::vector<int> indices = _indices.getMat();
_dst.create(src.rows, src.cols, src.type());
Mat dst = _dst.getMat();
for(size_t idx = 0; idx < indices.size(); idx++) {
@@ -183,13 +178,13 @@ Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
int d = src.cols;
// make sure the data has the correct shape
if(W.rows != d) {
string error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
CV_Error(Error::StsBadArg, error_message);
}
// make sure mean is correct if not empty
if(!mean.empty() && (mean.total() != (size_t) d)) {
string error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total());
CV_Error(Error::StsBadArg, error_message);
}
// create temporary matrices
Mat X, Y;
@@ -221,13 +216,13 @@ Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
int d = src.cols;
// make sure the data has the correct shape
if(W.cols != d) {
string error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
CV_Error(Error::StsBadArg, error_message);
}
// make sure mean is correct if not empty
if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
string error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total());
CV_Error(CV_StsBadArg, error_message);
String error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total());
CV_Error(Error::StsBadArg, error_message);
}
// initalize temporary matrices
Mat X, Y;
@@ -330,7 +325,7 @@ private:
int n1 = nn - 1;
int low = 0;
int high = nn - 1;
double eps = pow(2.0, -52.0);
double eps = std::pow(2.0, -52.0);
double exshift = 0.0;
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
@@ -342,7 +337,7 @@ private:
d[i] = H[i][i];
e[i] = 0.0;
}
for (int j = max(i - 1, 0); j < nn; j++) {
for (int j = std::max(i - 1, 0); j < nn; j++) {
norm = norm + std::abs(H[i][j]);
}
}
@@ -380,7 +375,7 @@ private:
w = H[n1][n1 - 1] * H[n1 - 1][n1];
p = (H[n1 - 1][n1 - 1] - H[n1][n1]) / 2.0;
q = p * p + w;
z = sqrt(std::abs(q));
z = std::sqrt(std::abs(q));
H[n1][n1] = H[n1][n1] + exshift;
H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift;
x = H[n1][n1];
@@ -404,7 +399,7 @@ private:
s = std::abs(x) + std::abs(z);
p = x / s;
q = z / s;
r = sqrt(p * p + q * q);
r = std::sqrt(p * p + q * q);
p = p / r;
q = q / r;
@@ -475,7 +470,7 @@ private:
s = (y - x) / 2.0;
s = s * s + w;
if (s > 0) {
s = sqrt(s);
s = std::sqrt(s);
if (y < x) {
s = -s;
}
@@ -539,7 +534,7 @@ private:
if (x == 0.0) {
break;
}
s = sqrt(p * p + q * q + r * r);
s = std::sqrt(p * p + q * q + r * r);
if (p < 0) {
s = -s;
}
@@ -570,7 +565,7 @@ private:
// Column modification
for (int i = 0; i <= min(n1, k + 3); i++) {
for (int i = 0; i <= std::min(n1, k + 3); i++) {
p = x * H[i][k] + y * H[i][k + 1];
if (notlast) {
p = p + z * H[i][k + 2];
@@ -721,7 +716,7 @@ private:
// Overflow control
t = max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
t = std::max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
if ((eps * t) * t > 1) {
for (int j = i; j <= n1; j++) {
H[j][n1 - 1] = H[j][n1 - 1] / t;
@@ -748,7 +743,7 @@ private:
for (int j = nn - 1; j >= low; j--) {
for (int i = low; i <= high; i++) {
z = 0.0;
for (int k = low; k <= min(j, high); k++) {
for (int k = low; k <= std::min(j, high); k++) {
z = z + V[i][k] * H[k][j];
}
V[i][j] = z;
@@ -782,7 +777,7 @@ private:
ort[i] = H[i][m - 1] / scale;
h += ort[i] * ort[i];
}
double g = sqrt(h);
double g = std::sqrt(h);
if (ort[m] > 0) {
g = -g;
}
@@ -941,20 +936,20 @@ public:
//------------------------------------------------------------------------------
// Linear Discriminant Analysis implementation
//------------------------------------------------------------------------------
void LDA::save(const string& filename) const {
void LDA::save(const String& filename) const {
FileStorage fs(filename, FileStorage::WRITE);
if (!fs.isOpened()) {
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
}
this->save(fs);
fs.release();
}
// Deserializes this object from a given filename.
void LDA::load(const string& filename) {
void LDA::load(const String& filename) {
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
CV_Error(CV_StsError, "File can't be opened for writing!");
CV_Error(Error::StsError, "File can't be opened for writing!");
this->load(fs);
fs.release();
}
@@ -978,7 +973,7 @@ void LDA::load(const FileStorage& fs) {
void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
// get data
Mat src = _src.getMat();
vector<int> labels;
std::vector<int> labels;
// safely copy the labels
{
Mat tmp = _lbls.getMat();
@@ -991,9 +986,9 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
// ensure working matrix is double precision
src.convertTo(data, CV_64FC1);
// maps the labels, so they're ascending: [0,1,...,C]
vector<int> mapped_labels(labels.size());
vector<int> num2label = remove_dups(labels);
map<int, int> label2num;
std::vector<int> mapped_labels(labels.size());
std::vector<int> num2label = remove_dups(labels);
std::map<int, int> label2num;
for (int i = 0; i < (int)num2label.size(); i++)
label2num[num2label[i]] = i;
for (size_t i = 0; i < labels.size(); i++)
@@ -1006,19 +1001,19 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
// we can't do a LDA on one class, what do you
// want to separate from each other then?
if(C == 1) {
string error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
CV_Error(CV_StsBadArg, error_message);
String error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
CV_Error(Error::StsBadArg, error_message);
}
// throw error if less labels, than samples
if (labels.size() != static_cast<size_t>(N)) {
string error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N);
CV_Error(CV_StsBadArg, error_message);
String error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N);
CV_Error(Error::StsBadArg, error_message);
}
// warn if within-classes scatter matrix becomes singular
if (N < D) {
cout << "Warning: Less observations than feature dimension given!"
<< "Computation will probably fail."
<< endl;
std::cout << "Warning: Less observations than feature dimension given!"
<< "Computation will probably fail."
<< std::endl;
}
// clip number of components to be a valid number
if ((_num_components <= 0) || (_num_components > (C - 1))) {
@@ -1027,8 +1022,8 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
// holds the mean over all classes
Mat meanTotal = Mat::zeros(1, D, data.type());
// holds the mean for each class
vector<Mat> meanClass(C);
vector<int> numClass(C);
std::vector<Mat> meanClass(C);
std::vector<int> numClass(C);
// initialize
for (int i = 0; i < C; i++) {
numClass[i] = 0;
@@ -1076,7 +1071,7 @@ void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
// reshape eigenvalues, so they are stored by column
_eigenvalues = _eigenvalues.reshape(1, 1);
// get sorted indices descending by their eigenvalue
vector<int> sorted_indices = argsort(_eigenvalues, false);
std::vector<int> sorted_indices = argsort(_eigenvalues, false);
// now sort eigenvalues and eigenvectors accordingly
_eigenvalues = sortMatrixColumnsByIndices(_eigenvalues, sorted_indices);
_eigenvectors = sortMatrixColumnsByIndices(_eigenvectors, sorted_indices);
@@ -1094,8 +1089,8 @@ void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) {
lda(_src.getMat(), _lbls);
break;
default:
string error_message= format("InputArray Datatype %d is not supported.", _src.kind());
CV_Error(CV_StsBadArg, error_message);
String error_message= format("InputArray Datatype %d is not supported.", _src.kind());
CV_Error(Error::StsBadArg, error_message);
break;
}
}

View File

@@ -75,13 +75,13 @@ LogPolar_Interp::LogPolar_Interp(int w, int h, Point2i center, int _R, double _r
int rtmp;
if (center.x<=w/2 && center.y>=h/2)
rtmp=(int)sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
rtmp=(int)std::sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
else if (center.x>=w/2 && center.y>=h/2)
rtmp=(int)sqrt((float)center.y*center.y + (float)center.x*center.x);
rtmp=(int)std::sqrt((float)center.y*center.y + (float)center.x*center.x);
else if (center.x>=w/2 && center.y<=h/2)
rtmp=(int)sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
else //if (center.x<=w/2 && center.y<=h/2)
rtmp=(int)sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
M=2*rtmp; N=2*rtmp;
@@ -97,8 +97,8 @@ LogPolar_Interp::LogPolar_Interp(int w, int h, Point2i center, int _R, double _r
if (sp){
int jc=M/2-1, ic=N/2-1;
int _romax=min(ic, jc);
double _a=exp(log((double)(_romax/2-1)/(double)ro0)/(double)R);
int _romax=std::min(ic, jc);
double _a=std::exp(std::log((double)(_romax/2-1)/(double)ro0)/(double)R);
S=(int) floor(2*CV_PI/(_a-1)+0.5);
}
@@ -116,8 +116,8 @@ void LogPolar_Interp::create_map(int _M, int _n, int _R, int _s, double _ro0)
ro0=_ro0;
int jc=N/2-1, ic=M/2-1;
romax=min(ic, jc);
a=exp(log((double)romax/(double)ro0)/(double)R);
romax=std::min(ic, jc);
a=std::exp(std::log((double)romax/(double)ro0)/(double)R);
q=((double)S)/(2*CV_PI);
Rsri = Mat::zeros(S,R,CV_32FC1);
@@ -129,8 +129,8 @@ void LogPolar_Interp::create_map(int _M, int _n, int _R, int _s, double _ro0)
{
for(int u=0; u<R; u++)
{
Rsri.at<float>(v,u)=(float)(ro0*pow(a,u)*sin(v/q)+jc);
Csri.at<float>(v,u)=(float)(ro0*pow(a,u)*cos(v/q)+ic);
Rsri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*sin(v/q)+jc);
Csri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*cos(v/q)+ic);
}
}
@@ -150,7 +150,7 @@ void LogPolar_Interp::create_map(int _M, int _n, int _R, int _s, double _ro0)
ETAyx.at<float>(j,i)=(float)(q*theta);
double ro2=(j-jc)*(j-jc)+(i-ic)*(i-ic);
CSIyx.at<float>(j,i)=(float)(0.5*log(ro2/(ro0*ro0))/log(a));
CSIyx.at<float>(j,i)=(float)(0.5*std::log(ro2/(ro0*ro0))/std::log(a));
}
}
}
@@ -221,13 +221,13 @@ LogPolar_Overlapping::LogPolar_Overlapping(int w, int h, Point2i center, int _R,
int rtmp;
if (center.x<=w/2 && center.y>=h/2)
rtmp=(int)sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
rtmp=(int)std::sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
else if (center.x>=w/2 && center.y>=h/2)
rtmp=(int)sqrt((float)center.y*center.y + (float)center.x*center.x);
rtmp=(int)std::sqrt((float)center.y*center.y + (float)center.x*center.x);
else if (center.x>=w/2 && center.y<=h/2)
rtmp=(int)sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
else //if (center.x<=w/2 && center.y<=h/2)
rtmp=(int)sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
M=2*rtmp; N=2*rtmp;
@@ -244,8 +244,8 @@ LogPolar_Overlapping::LogPolar_Overlapping(int w, int h, Point2i center, int _R,
if (sp){
int jc=M/2-1, ic=N/2-1;
int _romax=min(ic, jc);
double _a=exp(log((double)(_romax/2-1)/(double)ro0)/(double)R);
int _romax=std::min(ic, jc);
double _a=std::exp(std::log((double)(_romax/2-1)/(double)ro0)/(double)R);
S=(int) floor(2*CV_PI/(_a-1)+0.5);
}
@@ -261,8 +261,8 @@ void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro
ro0=_ro0;
int jc=N/2-1, ic=M/2-1;
romax=min(ic, jc);
a=exp(log((double)romax/(double)ro0)/(double)R);
romax=std::min(ic, jc);
a=std::exp(std::log((double)romax/(double)ro0)/(double)R);
q=((double)S)/(2*CV_PI);
ind1=0;
@@ -279,8 +279,8 @@ void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro
{
for(int u=0; u<R; u++)
{
Rsri.at<float>(v,u)=(float)(ro0*pow(a,u)*sin(v/q)+jc);
Csri.at<float>(v,u)=(float)(ro0*pow(a,u)*cos(v/q)+ic);
Rsri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*sin(v/q)+jc);
Csri.at<float>(v,u)=(float)(ro0*std::pow(a,u)*cos(v/q)+ic);
Rsr[v*R+u]=(int)floor(Rsri.at<float>(v,u));
Csr[v*R+u]=(int)floor(Csri.at<float>(v,u));
}
@@ -290,7 +290,7 @@ void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro
for(int i=0; i<R; i++)
{
Wsr[i]=ro0*(a-1)*pow(a,i-1);
Wsr[i]=ro0*(a-1)*std::pow(a,i-1);
if((Wsr[i]>1)&&(done==false))
{
ind1=i;
@@ -314,7 +314,7 @@ void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro
ETAyx.at<float>(j,i)=(float)(q*theta);
double ro2=(j-jc)*(j-jc)+(i-ic)*(i-ic);
CSIyx.at<float>(j,i)=(float)(0.5*log(ro2/(ro0*ro0))/log(a));
CSIyx.at<float>(j,i)=(float)(0.5*std::log(ro2/(ro0*ro0))/std::log(a));
}
}
@@ -332,7 +332,7 @@ void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro
for(int j=0; j<2*w+1; j++)
for(int i=0; i<2*w+1; i++)
{
(w_ker_2D[v*R+u].weights)[j*(2*w+1)+i]=exp(-(pow(i-w-dx, 2)+pow(j-w-dy, 2))/(2*sigma*sigma));
(w_ker_2D[v*R+u].weights)[j*(2*w+1)+i]=std::exp(-(std::pow(i-w-dx, 2)+std::pow(j-w-dy, 2))/(2*sigma*sigma));
tot+=(w_ker_2D[v*R+u].weights)[j*(2*w+1)+i];
}
for(int j=0; j<(2*w+1); j++)
@@ -351,7 +351,7 @@ const Mat LogPolar_Overlapping::to_cortical(const Mat &source)
remap(source_border,out,Csri,Rsri,INTER_LINEAR);
int wm=w_ker_2D[R-1].w;
vector<int> IMG((M+2*wm+1)*(N+2*wm+1), 0);
std::vector<int> IMG((M+2*wm+1)*(N+2*wm+1), 0);
for(int j=0; j<N; j++)
for(int i=0; i<M; i++)
@@ -388,8 +388,8 @@ const Mat LogPolar_Overlapping::to_cartesian(const Mat &source)
int wm=w_ker_2D[R-1].w;
vector<double> IMG((N+2*wm+1)*(M+2*wm+1), 0.);
vector<double> NOR((N+2*wm+1)*(M+2*wm+1), 0.);
std::vector<double> IMG((N+2*wm+1)*(M+2*wm+1), 0.);
std::vector<double> NOR((N+2*wm+1)*(M+2*wm+1), 0.);
for(int v=0; v<S; v++)
for(int u=ind1; u<R; u++)
@@ -416,7 +416,7 @@ const Mat LogPolar_Overlapping::to_cartesian(const Mat &source)
{
/*if(NOR[(M+2*wm+1)*j+i]>0)
ret[M*(j-wm)+i-wm]=(int) floor(IMG[(M+2*wm+1)*j+i]+0.5);*/
//int ro=(int)floor(sqrt((double)((j-wm-yc)*(j-wm-yc)+(i-wm-xc)*(i-wm-xc))));
//int ro=(int)floor(std::sqrt((double)((j-wm-yc)*(j-wm-yc)+(i-wm-xc)*(i-wm-xc))));
int csi=(int) floor(CSIyx.at<float>(j-wm,i-wm));
if((csi>=(ind1-(w_ker_2D[ind1]).w))&&(csi<R))
@@ -446,13 +446,13 @@ LogPolar_Adjacent::LogPolar_Adjacent(int w, int h, Point2i center, int _R, doubl
int rtmp;
if (center.x<=w/2 && center.y>=h/2)
rtmp=(int)sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
rtmp=(int)std::sqrt((float)center.y*center.y + (float)(w-center.x)*(w-center.x));
else if (center.x>=w/2 && center.y>=h/2)
rtmp=(int)sqrt((float)center.y*center.y + (float)center.x*center.x);
rtmp=(int)std::sqrt((float)center.y*center.y + (float)center.x*center.x);
else if (center.x>=w/2 && center.y<=h/2)
rtmp=(int)sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)center.x*center.x);
else //if (center.x<=w/2 && center.y<=h/2)
rtmp=(int)sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
rtmp=(int)std::sqrt((float)(h-center.y)*(h-center.y) + (float)(w-center.x)*(w-center.x));
M=2*rtmp; N=2*rtmp;
@@ -468,8 +468,8 @@ LogPolar_Adjacent::LogPolar_Adjacent(int w, int h, Point2i center, int _R, doubl
if (sp){
int jc=M/2-1, ic=N/2-1;
int _romax=min(ic, jc);
double _a=exp(log((double)(_romax/2-1)/(double)ro0)/(double)R);
int _romax=std::min(ic, jc);
double _a=std::exp(std::log((double)(_romax/2-1)/(double)ro0)/(double)R);
S=(int) floor(2*CV_PI/(_a-1)+0.5);
}
@@ -484,9 +484,9 @@ void LogPolar_Adjacent::create_map(int _M, int _n, int _R, int _s, double _ro0,
R=_R;
S=_s;
ro0=_ro0;
romax=min(M/2.0, N/2.0);
romax=std::min(M/2.0, N/2.0);
a=exp(log(romax/ro0)/(double)R);
a=std::exp(std::log(romax/ro0)/(double)R);
q=S/(2*CV_PI);
A.resize(R*S);
@@ -572,7 +572,7 @@ const Mat LogPolar_Adjacent::to_cortical(const Mat &source)
Mat source_border;
copyMakeBorder(source,source_border,top,bottom,left,right,BORDER_CONSTANT,Scalar(0));
vector<double> map(R*S, 0.);
std::vector<double> map(R*S, 0.);
for(int j=0; j<N; j++)
for(int i=0; i<M; i++)
@@ -597,7 +597,7 @@ const Mat LogPolar_Adjacent::to_cortical(const Mat &source)
const Mat LogPolar_Adjacent::to_cartesian(const Mat &source)
{
vector<double> map(M*N, 0.);
std::vector<double> map(M*N, 0.);
for(int j=0; j<N; j++)
for(int i=0; i<M; i++)
@@ -621,7 +621,7 @@ const Mat LogPolar_Adjacent::to_cartesian(const Mat &source)
bool LogPolar_Adjacent::get_uv(double x, double y, int&u, int&v)
{
double ro=sqrt(x*x+y*y), theta;
double ro=std::sqrt(x*x+y*y), theta;
if(x>0)
theta=atan(y/x);
else
@@ -635,7 +635,7 @@ bool LogPolar_Adjacent::get_uv(double x, double y, int&u, int&v)
}
else
{
u= (int) floor(log(ro/ro0)/log(a));
u= (int) floor(std::log(ro/ro0)/std::log(a));
if(theta>=0)
v= (int) floor(q*theta);
else

View File

@@ -144,7 +144,7 @@ void MagnoRetinaFilter::resize(const unsigned int NBrows, const unsigned int NBc
void MagnoRetinaFilter::setCoefficientsTable(const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float localAdaptIntegration_tau, const float localAdaptIntegration_k )
{
_temporalCoefficient=(float)exp(-1.0f/amacrinCellsTemporalCutFrequency);
_temporalCoefficient=(float)std::exp(-1.0f/amacrinCellsTemporalCutFrequency);
// the first set of parameters is dedicated to the low pass filtering property of the ganglion cells
BasicRetinaFilter::setLPfilterParameters(parasolCells_beta, parasolCells_tau, parasolCells_k, 0);
// the second set of parameters is dedicated to the ganglion cells output intergartion for their local adaptation property

View File

@@ -101,7 +101,7 @@ namespace
return true;
}
void fillMinMax(const vector<Point3f>& points, Octree::Node& node)
void fillMinMax(const std::vector<Point3f>& points, Octree::Node& node)
{
node.x_max = node.y_max = node.z_max = std::numeric_limits<float>::min();
node.x_min = node.y_min = node.z_min = std::numeric_limits<float>::max();
@@ -171,7 +171,7 @@ namespace cv
{
}
Octree::Octree(const vector<Point3f>& points3d, int maxLevels, int _minPoints)
Octree::Octree(const std::vector<Point3f>& points3d, int maxLevels, int _minPoints)
{
buildTree(points3d, maxLevels, _minPoints);
}
@@ -180,7 +180,7 @@ namespace cv
{
}
void Octree::getPointsWithinSphere(const Point3f& center, float radius, vector<Point3f>& out) const
void Octree::getPointsWithinSphere(const Point3f& center, float radius, std::vector<Point3f>& out) const
{
out.clear();
@@ -256,9 +256,9 @@ namespace cv
}
}
void Octree::buildTree(const vector<Point3f>& points3d, int maxLevels, int _minPoints)
void Octree::buildTree(const std::vector<Point3f>& points3d, int maxLevels, int _minPoints)
{
assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
CV_Assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
points.resize(points3d.size());
std::copy(points3d.begin(), points3d.end(), points.begin());
minPoints = _minPoints;
@@ -286,9 +286,9 @@ namespace cv
{
size_t size = nodes[nodeInd].end - nodes[nodeInd].begin;
vector<size_t> boxBorders(MAX_LEAFS+1, 0);
vector<size_t> boxIndices(size);
vector<Point3f> tempPoints(size);
std::vector<size_t> boxBorders(MAX_LEAFS+1, 0);
std::vector<size_t> boxIndices(size);
std::vector<Point3f> tempPoints(size);
for (int i = nodes[nodeInd].begin, j = 0; i < nodes[nodeInd].end; ++i, ++j)
{
@@ -304,7 +304,7 @@ namespace cv
for (size_t i = 1; i < boxBorders.size(); ++i)
boxBorders[i] += boxBorders[i-1];
vector<size_t> writeInds(boxBorders.begin(), boxBorders.end());
std::vector<size_t> writeInds(boxBorders.begin(), boxBorders.end());
for (size_t i = 0; i < size; ++i)
{

Some files were not shown because too many files have changed in this diff Show More