Set stricter warning rules for gcc

This commit is contained in:
Andrey Kamaev
2012-06-07 17:21:29 +00:00
parent 0395f7c63f
commit 49a1ba6038
241 changed files with 9054 additions and 8947 deletions

View File

@@ -41,7 +41,7 @@
#include "precomp.hpp"
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning(disable:4786) // Disable MSVC warnings in the standard library.
#pragma warning(disable:4100)
#pragma warning(disable:4512)
@@ -49,7 +49,7 @@
#include <stdio.h>
#include <map>
#include <algorithm>
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning(default:4100)
#pragma warning(default:4512)
#endif
@@ -148,7 +148,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
cvReleaseImage(&gray_img);
CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
}
CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));
img = gray_img;
@@ -172,7 +172,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
etalon_size, points, &count) != 0;
if (count == 0)
continue;
// If found is true, it means all the points were found (count = num_points).
// If found is false but count is non-zero, it means that not all points were found.
@@ -258,7 +258,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
{ 0.f, 1.f, 0.f, 0.f },
{ 0.f, 0.f, 1.f, 0.f },
{ transVect[0], transVect[1], transVect[2], 1.f } };
float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
{ rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
{ rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
@@ -267,7 +267,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
MultMatrix(camera_info[c].mat, tmat, rmat);
// change the transformation of the cameras to put them in the world coordinate
// change the transformation of the cameras to put them in the world coordinate
// system we want to work with.
// Start with an identity matrix; then fill in the values to accomplish

View File

@@ -53,13 +53,13 @@
#include "assert.h"
#include "math.h"
#if _MSC_VER >= 1400
#if defined _MSC_VER && _MSC_VER >= 1400
#pragma warning(disable: 4512) // suppress "assignment operator could not be generated"
#endif
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
#undef __deref
#undef __valuetype
@@ -72,23 +72,23 @@ public:
private:
struct node {
int dim; // split dimension; >=0 for nodes, -1 for leaves
__valuetype value; // if leaf, value of leaf
int left, right; // node indices of left and right branches
scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
int dim; // split dimension; >=0 for nodes, -1 for leaves
__valuetype value; // if leaf, value of leaf
int left, right; // node indices of left and right branches
scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
};
typedef std::vector < node > node_array;
__deref deref; // requires operator() (__valuetype lhs,int dim)
__deref deref; // requires operator() (__valuetype lhs,int dim)
node_array nodes; // node storage
int point_dim; // dimension of points (the k in kd-tree)
int root_node; // index of root node, -1 if empty tree
node_array nodes; // node storage
int point_dim; // dimension of points (the k in kd-tree)
int root_node; // index of root node, -1 if empty tree
// for given set of point indices, compute dimension of highest variance
template < class __instype, class __valuector >
int dimension_of_highest_variance(__instype * first, __instype * last,
__valuector ctor) {
__valuector ctor) {
assert(last - first > 0);
accum_type maxvar = -std::numeric_limits < accum_type >::max();
@@ -96,32 +96,32 @@ private:
for (int j = 0; j < point_dim; ++j) {
accum_type mean = 0;
for (__instype * k = first; k < last; ++k)
mean += deref(ctor(*k), j);
mean += deref(ctor(*k), j);
mean /= last - first;
accum_type var = 0;
for (__instype * k = first; k < last; ++k) {
accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
var += diff * diff;
accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
var += diff * diff;
}
var /= last - first;
assert(maxj != -1 || var >= maxvar);
if (var >= maxvar) {
maxvar = var;
maxj = j;
maxvar = var;
maxj = j;
}
}
return maxj;
}
// given point indices and dimension, find index of median; (almost) modifies [first,last)
// given point indices and dimension, find index of median; (almost) modifies [first,last)
// such that points_in[first,median]<=point[median], points_in(median,last)>point[median].
// implemented as partial quicksort; expected linear perf.
template < class __instype, class __valuector >
__instype * median_partition(__instype * first, __instype * last,
int dim, __valuector ctor) {
int dim, __valuector ctor) {
assert(last - first > 0);
__instype *k = first + (last - first) / 2;
median_partition(first, last, k, dim, ctor);
@@ -143,14 +143,14 @@ private:
};
template < class __instype, class __valuector >
void median_partition(__instype * first, __instype * last,
__instype * k, int dim, __valuector ctor) {
void median_partition(__instype * first, __instype * last,
__instype * k, int dim, __valuector ctor) {
int pivot = (int)((last - first) / 2);
std::swap(first[pivot], last[-1]);
__instype *middle = std::partition(first, last - 1,
median_pr < __instype, __valuector >
(last[-1], dim, deref, ctor));
median_pr < __instype, __valuector >
(last[-1], dim, deref, ctor));
std::swap(*middle, last[-1]);
if (middle < k)
@@ -170,36 +170,36 @@ private:
__instype *median = median_partition(first, last, dim, ctor);
__instype *split = median;
for (; split != last && deref(ctor(*split), dim) ==
deref(ctor(*median), dim); ++split);
for (; split != last && deref(ctor(*split), dim) ==
deref(ctor(*median), dim); ++split);
if (split == last) { // leaf
int nexti = -1;
for (--split; split >= first; --split) {
int i = (int)nodes.size();
node & n = *nodes.insert(nodes.end(), node());
n.dim = -1;
n.value = ctor(*split);
n.left = -1;
n.right = nexti;
nexti = i;
}
int nexti = -1;
for (--split; split >= first; --split) {
int i = (int)nodes.size();
node & n = *nodes.insert(nodes.end(), node());
n.dim = -1;
n.value = ctor(*split);
n.left = -1;
n.right = nexti;
nexti = i;
}
return nexti;
return nexti;
} else { // node
int i = (int)nodes.size();
// note that recursive insert may invalidate this ref
node & n = *nodes.insert(nodes.end(), node());
int i = (int)nodes.size();
// note that recursive insert may invalidate this ref
node & n = *nodes.insert(nodes.end(), node());
n.dim = dim;
n.boundary = deref(ctor(*median), dim);
n.dim = dim;
n.boundary = deref(ctor(*median), dim);
int left = insert(first, split, ctor);
nodes[i].left = left;
int right = insert(split, last, ctor);
nodes[i].right = right;
int left = insert(first, split, ctor);
nodes[i].left = left;
int right = insert(split, last, ctor);
nodes[i].right = right;
return i;
return i;
}
}
}
@@ -214,21 +214,21 @@ private:
if (n.dim >= 0) { // node
if (deref(p, n.dim) <= n.boundary) // left
r = remove(&n.left, p);
r = remove(&n.left, p);
else // right
r = remove(&n.right, p);
r = remove(&n.right, p);
// if terminal, remove this node
if (n.left == -1 && n.right == -1)
*i = -1;
*i = -1;
return r;
} else { // leaf
if (n.value == p) {
*i = n.right;
return true;
*i = n.right;
return true;
} else
return remove(&n.right, p);
return remove(&n.right, p);
}
}
@@ -245,14 +245,14 @@ public:
}
// given points, initialize a balanced tree
CvKDTree(__valuetype * first, __valuetype * last, int _point_dim,
__deref _deref = __deref())
__deref _deref = __deref())
: deref(_deref) {
set_data(first, last, _point_dim, identity_ctor());
}
// given points, initialize a balanced tree
template < class __instype, class __valuector >
CvKDTree(__instype * first, __instype * last, int _point_dim,
__valuector ctor, __deref _deref = __deref())
__valuector ctor, __deref _deref = __deref())
: deref(_deref) {
set_data(first, last, _point_dim, ctor);
}
@@ -266,7 +266,7 @@ public:
}
template < class __instype, class __valuector >
void set_data(__instype * first, __instype * last, int _point_dim,
__valuector ctor) {
__valuector ctor) {
point_dim = _point_dim;
nodes.clear();
nodes.reserve(last - first);
@@ -292,9 +292,9 @@ public:
std::cout << " ";
const node & n = nodes[i];
if (n.dim >= 0) {
std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
nodes[i].boundary << std::endl;
std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
nodes[i].boundary << std::endl;
print(n.left, indent + 3);
print(n.right, indent + 3);
} else
@@ -304,9 +304,9 @@ public:
////////////////////////////////////////////////////////////////////////////////////////
// bbf search
public:
struct bbf_nn { // info on found neighbors (approx k nearest)
const __valuetype *p; // nearest neighbor
accum_type dist; // distance from d to query point
struct bbf_nn { // info on found neighbors (approx k nearest)
const __valuetype *p; // nearest neighbor
accum_type dist; // distance from d to query point
bbf_nn(const __valuetype & _p, accum_type _dist)
: p(&_p), dist(_dist) {
}
@@ -316,9 +316,9 @@ public:
};
typedef std::vector < bbf_nn > bbf_nn_pqueue;
private:
struct bbf_node { // info on branches not taken
int node; // corresponding node
accum_type dist; // minimum distance from bounds to query point
struct bbf_node { // info on branches not taken
int node; // corresponding node
accum_type dist; // minimum distance from bounds to query point
bbf_node(int _node, accum_type _dist)
: node(_node), dist(_dist) {
}
@@ -346,10 +346,10 @@ private:
int bbf_branch(int i, const __desctype * d, bbf_pqueue & pq) const {
const node & n = nodes[i];
// push bbf_node with bounds of alternate branch, then branch
if (d[n.dim] <= n.boundary) { // left
if (d[n.dim] <= n.boundary) { // left
pq_alternate(n.right, pq, n.boundary - d[n.dim]);
return n.left;
} else { // right
} else { // right
pq_alternate(n.left, pq, d[n.dim] - n.boundary);
return n.right;
}
@@ -366,11 +366,11 @@ private:
}
// called per candidate nearest neighbor; constructs new bbf_nn for
// candidate and adds it to priority queue of all candidates; if
// candidate and adds it to priority queue of all candidates; if
// queue len exceeds k, drops the point furthest from query point d.
template < class __desctype >
void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
const __desctype * d, const __valuetype & p) const {
void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
const __desctype * d, const __valuetype & p) const {
bbf_nn nn(p, distance(d, p));
if ((int) nn_pq.size() < k) {
nn_pq.push_back(nn);
@@ -384,14 +384,14 @@ private:
}
public:
// finds (with high probability) the k nearest neighbors of d,
// finds (with high probability) the k nearest neighbors of d,
// searching at most emax leaves/bins.
// ret_nn_pq is an array containing the (at most) k nearest neighbors
// ret_nn_pq is an array containing the (at most) k nearest neighbors
// (see bbf_nn structure def above).
template < class __desctype >
int find_nn_bbf(const __desctype * d,
int k, int emax,
bbf_nn_pqueue & ret_nn_pq) const {
int find_nn_bbf(const __desctype * d,
int k, int emax,
bbf_nn_pqueue & ret_nn_pq) const {
assert(k > 0);
ret_nn_pq.clear();
@@ -411,17 +411,17 @@ public:
int i;
for (i = bbf.node;
i != -1 && nodes[i].dim >= 0;
i = bbf_branch(i, d, tmp_pq));
i != -1 && nodes[i].dim >= 0;
i = bbf_branch(i, d, tmp_pq));
if (i != -1) {
// add points in leaf/bin to ret_nn_pq
do {
bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
} while (-1 != (i = nodes[i].right));
// add points in leaf/bin to ret_nn_pq
do {
bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
} while (-1 != (i = nodes[i].right));
--emax;
--emax;
}
}
@@ -433,27 +433,27 @@ public:
// orthogonal range search
private:
void find_ortho_range(int i, scalar_type * bounds_min,
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
if (i == -1)
return;
const node & n = nodes[i];
if (n.dim >= 0) { // node
if (bounds_min[n.dim] <= n.boundary)
find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
if (bounds_max[n.dim] > n.boundary)
find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
} else { // leaf
do {
inbounds.push_back(nodes[i].value);
inbounds.push_back(nodes[i].value);
} while (-1 != (i = nodes[i].right));
}
}
public:
// return all points that lie within the given bounds; inbounds is cleared
int find_ortho_range(scalar_type * bounds_min,
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
scalar_type * bounds_max,
std::vector < __valuetype > &inbounds) const {
inbounds.clear();
find_ortho_range(root_node, bounds_min, bounds_max, inbounds);
return (int)inbounds.size();

View File

@@ -237,9 +237,9 @@ public:
virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
};/* CvBlobTrackFVGenN */
CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
inline CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
inline CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
inline CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
#undef MAX_FV_SIZE
#define MAX_FV_SIZE 4
@@ -408,7 +408,7 @@ public:
virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
};/* CvBlobTrackFVGenSS */
CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
inline CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
/*======================= TRAJECTORY ANALYZER MODULES =====================*/
/* Trajectory Analyser module */
@@ -1510,7 +1510,7 @@ public:
}; /* CvBlobTrackAnalysisSVM. */
#if 0
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP()
{return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenP);}
@@ -1522,3 +1522,4 @@ CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS()
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS()
{return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenSS);}
#endif

View File

@@ -162,12 +162,15 @@ public:
}
}; /* class CvBlobTrackerOneKalman */
#if 0
static CvBlobTrackerOne* cvCreateModuleBlobTrackerOneKalman()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneKalman;
}
CvBlobTracker* cvCreateBlobTrackerKalman()
{
return cvCreateBlobTrackerList(cvCreateModuleBlobTrackerOneKalman);
}
#endif

View File

@@ -716,7 +716,7 @@ void CvBlobTrackerOneMSFG::CollectHist(IplImage* pImg, IplImage* pMask, CvBlob*
}; /* CollectHist */
#endif
CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFG;
}
@@ -739,7 +739,7 @@ public:
};
};
CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
static CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMS;
}
@@ -1169,6 +1169,7 @@ public:
}; /* CvBlobTrackerOneMSPF */
CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF();
CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSPF;

View File

@@ -47,7 +47,7 @@ typedef float DefHistType;
#define DefHistTypeMat CV_32F
#define HIST_INDEX(_pData) (((_pData)[0]>>m_ByteShift) + (((_pData)[1]>>(m_ByteShift))<<m_BinBit)+((pImgData[2]>>m_ByteShift)<<(m_BinBit*2)))
void calcKernelEpanechnikov(CvMat* pK)
static void calcKernelEpanechnikov(CvMat* pK)
{ /* Allocate kernel for histogramm creation: */
int x,y;
int w = pK->width;
@@ -445,7 +445,7 @@ public:
virtual void Release(){delete this;};
}; /*CvBlobTrackerOneMSFGS*/
CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFGS;
}

View File

@@ -188,7 +188,7 @@ void CvBlobTrackPostProcKalman::Release()
delete this;
}
CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcKalman;
}

View File

@@ -106,12 +106,12 @@ public:
}
}; /* class CvBlobTrackPostProcTimeAver */
CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(0);
}
CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(1);
}

View File

@@ -44,7 +44,7 @@
#undef quad
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4701 )
#endif
@@ -99,18 +99,18 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
Stop();
if (latestPoints != NULL)
{
for( i = 0; i < MAX_CAMERAS; i++ )
cvFree( latestPoints + i );
}
if (latestPoints != NULL)
{
for( i = 0; i < MAX_CAMERAS; i++ )
cvFree( latestPoints + i );
}
if( type == CV_CALIB_ETALON_USER || type != etalonType )
{
if (etalonParams != NULL)
{
cvFree( &etalonParams );
}
if (etalonParams != NULL)
{
cvFree( &etalonParams );
}
}
etalonType = type;
@@ -154,10 +154,10 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
if( etalonPointCount != pointCount )
{
if (etalonPoints != NULL)
{
cvFree( &etalonPoints );
}
if (etalonPoints != NULL)
{
cvFree( &etalonPoints );
}
etalonPointCount = pointCount;
etalonPoints = (CvPoint2D32f*)cvAlloc( arrSize );
}
@@ -184,15 +184,15 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
break;
case CV_CALIB_ETALON_USER:
if (params != NULL)
{
memcpy( etalonParams, params, arrSize );
}
if (points != NULL)
{
memcpy( etalonPoints, points, arrSize );
}
break;
if (params != NULL)
{
memcpy( etalonParams, params, arrSize );
}
if (points != NULL)
{
memcpy( etalonPoints, points, arrSize );
}
break;
default:
assert(0);
@@ -226,7 +226,7 @@ CvCalibFilter::GetEtalon( int* paramCount, const double** params,
void CvCalibFilter::SetCameraCount( int count )
{
Stop();
if( count != cameraCount )
{
for( int i = 0; i < cameraCount; i++ )
@@ -245,7 +245,7 @@ void CvCalibFilter::SetCameraCount( int count )
}
}
bool CvCalibFilter::SetFrames( int frames )
{
if( frames < 5 )
@@ -253,7 +253,7 @@ bool CvCalibFilter::SetFrames( int frames )
assert(0);
return false;
}
framesTotal = frames;
return true;
}
@@ -304,7 +304,7 @@ void CvCalibFilter::Stop( bool calibrate )
cameraParams[i].imgSize[0] = (float)imgSize.width;
cameraParams[i].imgSize[1] = (float)imgSize.height;
// cameraParams[i].focalLength[0] = cameraParams[i].matrix[0];
// cameraParams[i].focalLength[1] = cameraParams[i].matrix[4];
@@ -315,7 +315,7 @@ void CvCalibFilter::Stop( bool calibrate )
memcpy( cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0]));
mat.data.ptr = (uchar*)(cameraParams + i);
/* check resultant camera parameters: if there are some INF's or NAN's,
stop and reset results */
if( !cvCheckArr( &mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000 ))
@@ -342,7 +342,7 @@ void CvCalibFilter::Stop( bool calibrate )
{
stereo.fundMatr[i] = stereo.fundMatr[i];
}
}
}
@@ -499,16 +499,16 @@ bool CvCalibFilter::GetLatestPoints( int idx, CvPoint2D32f** pts,
int* count, bool* found )
{
int n;
if( (unsigned)idx >= (unsigned)cameraCount ||
!pts || !count || !found )
{
assert(0);
return false;
}
n = latestCounts[idx];
*found = n > 0;
*count = abs(n);
*pts = latestPoints[idx];
@@ -616,7 +616,7 @@ const CvCamera* CvCalibFilter::GetCameraParams( int idx ) const
assert(0);
return 0;
}
return isCalibrated ? cameraParams + idx : 0;
}
@@ -630,7 +630,7 @@ const CvStereoCamera* CvCalibFilter::GetStereoParams() const
assert(0);
return 0;
}
return &stereo;
}
@@ -640,9 +640,9 @@ bool CvCalibFilter::SetCameraParams( CvCamera* params )
{
CvMat mat;
int arrSize;
Stop();
if( !params )
{
assert(0);
@@ -667,7 +667,7 @@ bool CvCalibFilter::SaveCameraParams( const char* filename )
if( isCalibrated )
{
int i, j;
FILE* f = fopen( filename, "w" );
if( !f ) return false;
@@ -729,7 +729,7 @@ bool CvCalibFilter::LoadCameraParams( const char* filename )
return false;
SetCameraCount( d );
for( i = 0; i < cameraCount; i++ )
{
for( j = 0; j < (int)(sizeof(cameraParams[i])/sizeof(float)); j++ )
@@ -763,16 +763,16 @@ bool CvCalibFilter::LoadCameraParams( const char* filename )
CV_Assert(values_read == 1);
}
}
fclose(f);
stereo.warpSize = cvSize( cvRound(cameraParams[0].imgSize[0]), cvRound(cameraParams[0].imgSize[1]));
isCalibrated = true;
return true;
}
@@ -924,4 +924,4 @@ bool CvCalibFilter::Undistort( CvMat** srcarr, CvMat** dstarr )
return true;
}

View File

@@ -45,7 +45,7 @@
//#include <limits.h>
//#include "cv.h"
//#include "highgui.h"
#if 0
#include <stdio.h>
/* Valery Mosyagin */
@@ -53,7 +53,7 @@
/* ===== Function for find corresponding between images ===== */
/* Create feature points on image and return number of them. Array points fills by found points */
int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
static int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
{
int foundFeaturePoints = 0;
IplImage *grayImage = 0;
@@ -175,9 +175,9 @@ int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
/* For given points1 (with pntStatus) on image1 finds corresponding points2 on image2 and set pntStatus2 for them */
/* Returns number of corresponding points */
int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
static int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
IplImage *image2,/* Image 2 */
CvMat *points1,
CvMat *points1,
CvMat *pntStatus1,
CvMat *points2,
CvMat *pntStatus2,
@@ -203,7 +203,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
/* Test input data for errors */
/* Test for null pointers */
if( image1 == 0 || image2 == 0 ||
if( image1 == 0 || image2 == 0 ||
points1 == 0 || points2 == 0 ||
pntStatus1 == 0 || pntStatus2 == 0)
{
@@ -226,7 +226,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
}
/* Test for matrices */
if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) ||
if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) ||
!CV_IS_MAT(pntStatus1) || !CV_IS_MAT(pntStatus2) )
{
CV_ERROR( CV_StsUnsupportedFormat, "Input parameters (points and status) must be a matrices" );
@@ -333,11 +333,11 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
pyrImage1, pyrImage2,
cornerPoints1, cornerPoints2,
numVisPoints, cvSize(10,10), 3,
status, errors,
status, errors,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03),
0/*CV_LKFLOW_PYR_A_READY*/ );
memset(stat2,0,sizeof(uchar)*numPoints);
int currVis = 0;
@@ -393,7 +393,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
CvMat fundMatr;
double fundMatr_dat[9];
fundMatr = cvMat(3,3,CV_64F,fundMatr_dat);
CV_CALL( pStatus = cvCreateMat(1,totalCorns,CV_32F) );
int num = cvFindFundamentalMat(tmpPoints1,tmpPoints2,&fundMatr,CV_FM_RANSAC,threshold,0.99,pStatus);
@@ -435,8 +435,9 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
return resNumCorrPoints;
}
/*-------------------------------------------------------------------------------------*/
int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
static int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
{
/* Add to existing points and status arrays new points or just grow */
CvMat *newOldPoint = 0;
@@ -445,7 +446,7 @@ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,
CV_FUNCNAME( "icvGrowPointsAndStatus" );
__BEGIN__;
/* Test for errors */
if( oldPoints == 0 || oldStatus == 0 )
{
@@ -546,8 +547,9 @@ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,
return newTotalNumber;
}
/*-------------------------------------------------------------------------------------*/
int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
static int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
CvMat *newPoints,/* New points */
CvMat *oldStatus,/* Status for old points */
CvMat *newStatus,
@@ -560,7 +562,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
CvSeq* seq = 0;
int originalPoints = 0;
CV_FUNCNAME( "icvRemoveDoublePoins" );
__BEGIN__;
@@ -624,7 +626,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
{
CV_ERROR( CV_StsOutOfRange, "Statuses must have 1 row" );
}
/* we have points on image and wants add new points */
/* use subdivision for find nearest points */
@@ -731,7 +733,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
/* Point is double. Turn it off */
/* Set status */
//newStatus->data.ptr[i] = 0;
/* No this is a double point */
//originalPoints--;
flag = 0;
@@ -745,7 +747,7 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
__END__;
cvReleaseMemStorage( &storage );
return originalPoints;
@@ -755,11 +757,11 @@ int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
void icvComputeProjectMatrix(CvMat* objPoints,CvMat* projPoints,CvMat* projMatr);
/*-------------------------------------------------------------------------------------*/
void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
static void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
{
/* Compute number of good points */
int num = cvCountNonZero(status);
/* Create arrays */
CvMat *objPoints = 0;
objPoints = cvCreateMat(4,num,CV_64F);
@@ -802,7 +804,7 @@ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *stat
currVis++;
}
fprintf(file,"\n");
}
@@ -820,17 +822,16 @@ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *stat
/*-------------------------------------------------------------------------------------*/
/* For given N images
/* For given N images
we have corresponding points on N images
computed projection matrices
reconstructed 4D points
we must to compute
we must to compute
*/
void icvAddNewImageToPrevious____(
static void icvAddNewImageToPrevious____(
IplImage *newImage,//Image to add
IplImage *oldImage,//Previous image
CvMat *oldPoints,// previous 2D points on prev image (some points may be not visible)
@@ -868,7 +869,7 @@ void icvAddNewImageToPrevious____(
int corrNum;
corrNum = icvFindCorrForGivenPoints( oldImage,/* Image 1 */
newImage,/* Image 2 */
oldPoints,
oldPoints,
oldPntStatus,
points2,
status,
@@ -887,10 +888,10 @@ void icvAddNewImageToPrevious____(
// icvComputeProjectMatrix(objPoints4D,points2,&projMatr);
icvComputeProjectMatrixStatus(objPoints4D,points2,status,&projMatr);
cvCopy(&projMatr,newProjMatr);
/* Create new points and find correspondence */
icvCreateFeaturePoints(newImage, newFPoints2D2,newFPointsStatus);
/* Good if we test new points before find corr points */
/* Find correspondence for new found points */
@@ -947,7 +948,7 @@ void icvAddNewImageToPrevious____(
//CreateGood
/*-------------------------------------------------------------------------------------*/
int icvDeleteSparsInPoints( int numImages,
static int icvDeleteSparsInPoints( int numImages,
CvMat **points,
CvMat **status,
CvMat *wasStatus)/* status of previous configuration */
@@ -979,7 +980,7 @@ int icvDeleteSparsInPoints( int numImages,
int numCoord;
numCoord = points[0]->rows;// !!! may be number of coordinates is not correct !!!
int i;
int currExistPoint;
currExistPoint = 0;
@@ -1041,7 +1042,7 @@ int icvDeleteSparsInPoints( int numImages,
return comNumber;
}
#if 0
/*-------------------------------------------------------------------------------------*/
void icvGrowPointsArray(CvMat **points)
{
@@ -1089,7 +1090,7 @@ int AddImageToStruct( IplImage *newImage,//Image to add
cvConvert(pntStatus,status);
int corrNum = FindCorrForGivenPoints(oldImage,newImage,oldPoints,newPoints,status);
/* Status has new status of points */
CvMat projMatr;

View File

@@ -48,7 +48,7 @@
Stan Birchfield and Carlo Tomasi
International Journal of Computer Vision,
35(3): 269-293, December 1999.
This implementation uses different cost function that results in
O(pixPerRow*maxDisparity) complexity of dynamic programming stage versus
O(pixPerRow*log(pixPerRow)*maxDisparity) in the above paper.
@@ -68,7 +68,7 @@
typedef struct _CvDPCell
{
uchar step; //local-optimal step
int sum; //current sum
int sum; //current sum
}_CvDPCell;
typedef struct _CvRightImData
@@ -79,17 +79,17 @@ typedef struct _CvRightImData
#define CV_IMAX3(a,b,c) ((temp3 = (a) >= (b) ? (a) : (b)),(temp3 >= (c) ? temp3 : (c)))
#define CV_IMIN3(a,b,c) ((temp3 = (a) <= (b) ? (a) : (b)),(temp3 <= (c) ? temp3 : (c)))
void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
static void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
uchar* disparities,
CvSize size, int widthStep,
int maxDisparity,
float _param1, float _param2,
int maxDisparity,
float _param1, float _param2,
float _param3, float _param4,
float _param5 )
{
int x, y, i, j, temp3;
int d, s;
int dispH = maxDisparity + 3;
int dispH = maxDisparity + 3;
uchar *dispdata;
int imgW = size.width;
int imgH = size.height;
@@ -103,22 +103,22 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
int param5 = cvRound(_param5);
#define CELL(d,x) cells[(d)+(x)*dispH]
uchar* dsi = (uchar*)cvAlloc(sizeof(uchar)*imgW*dispH);
uchar* edges = (uchar*)cvAlloc(sizeof(uchar)*imgW*imgH);
_CvDPCell* cells = (_CvDPCell*)cvAlloc(sizeof(_CvDPCell)*imgW*MAX(dispH,(imgH+1)/2));
_CvRightImData* rData = (_CvRightImData*)cvAlloc(sizeof(_CvRightImData)*imgW);
int* reliabilities = (int*)cells;
for( y = 0; y < imgH; y++ )
{
for( y = 0; y < imgH; y++ )
{
uchar* srcdata1 = src1 + widthStep * y;
uchar* srcdata2 = src2 + widthStep * y;
uchar* srcdata2 = src2 + widthStep * y;
//init rData
prevval = prev = srcdata2[0];
for( j = 1; j < imgW; j++ )
{
{
curr = srcdata2[j];
val = (uchar)((curr + prev)>>1);
rData[j-1].max_val = (uchar)CV_IMAX3( val, prevval, prev );
@@ -130,12 +130,12 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
// fill dissimularity space image
for( i = 1; i <= maxDisparity + 1; i++ )
{
{
dsi += imgW;
rData--;
for( j = i - 1; j < imgW - 1; j++ )
{
int t;
{
int t;
if( (t = srcdata1[j] - rData[j+1].max_val) >= 0 )
{
dsi[j] = (uchar)t;
@@ -160,36 +160,36 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
for( j = 3; j < imgW-4; j++ )
{
edges[y*imgW+j] = 0;
if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
CV_IMIN3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+j] |= 1;
}
if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
CV_IMIN3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+j] |= 2;
}
}
}
}
//find correspondence using dynamical programming
//init DP table
for( x = 0; x < imgW; x++ )
for( x = 0; x < imgW; x++ )
{
CELL(0,x).sum = CELL(dispH-1,x).sum = ICV_MAX_DP_SUM_VAL;
CELL(0,x).step = CELL(dispH-1,x).step = ICV_DP_STEP_LEFT;
}
for( d = 2; d < dispH; d++ )
for( d = 2; d < dispH; d++ )
{
CELL(d,d-2).sum = ICV_MAX_DP_SUM_VAL;
CELL(d,d-2).step = ICV_DP_STEP_UP;
}
}
CELL(1,0).sum = 0;
CELL(1,0).step = ICV_DP_STEP_LEFT;
for( x = 1; x < imgW; x++ )
{
{
int d = MIN( x + 1, maxDisparity + 1);
uchar* _edges = edges + y*imgW + x;
int e0 = _edges[0] & 1;
@@ -201,17 +201,17 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
int sum[3];
//check left step
sum[0] = _cell[d-dispH].sum - param2;
sum[0] = _cell[d-dispH].sum - param2;
//check up step
if( _cell[d+1].step != ICV_DP_STEP_DIAG && e0 )
{
sum[1] = _cell[d+1].sum + param1;
if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
{
int t;
sum[2] = _cell[d-1-dispH].sum + param1;
t = sum[1] < sum[0];
@@ -223,7 +223,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
_cell[d].sum = sum[t] + s;
}
else
{
{
_cell[d].step = ICV_DP_STEP_DIAG;
_cell[d].sum = sum[2] + s;
}
@@ -242,7 +242,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
}
}
}
else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
{
sum[2] = _cell[d-1-dispH].sum + param1;
if( sum[0] <= sum[2] )
@@ -278,25 +278,25 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
min_val = CELL(i,imgW-1).sum;
}
}
//track optimal pass
for( x = imgW - 1; x > 0; x-- )
{
{
dispdata[x] = (uchar)(d - 1);
while( CELL(d,x).step == ICV_DP_STEP_UP ) d++;
if ( CELL(d,x).step == ICV_DP_STEP_DIAG )
{
s = x;
while( CELL(d,x).step == ICV_DP_STEP_DIAG )
while( CELL(d,x).step == ICV_DP_STEP_DIAG )
{
d--;
x--;
d--;
x--;
}
for( i = x; i < s; i++ )
{
dispdata[i] = (uchar)(d-1);
}
}
}
}
}//for x
}// for y
@@ -319,9 +319,9 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
{
for( y = 1; y < imgH - 1; y++ )
{
if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) -
CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) -
CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+x] |= 4;
@@ -332,14 +332,14 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
}
}
//remove along any particular row, every gradient
//remove along any particular row, every gradient
//for which two adjacent columns do not agree.
for( y = 0; y < imgH; y++ )
{
prev = edges[y*imgW];
for( x = 1; x < imgW - 1; x++ )
{
curr = edges[y*imgW+x];
curr = edges[y*imgW+x];
if( (curr & 4) &&
( !( prev & 4 ) ||
!( edges[y*imgW+x+1] & 4 ) ) )
@@ -360,41 +360,41 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
;
s = y - i;
for( ; i < y; i++ )
{
{
reliabilities[i*imgW+x] = s;
}
}
}
}
//Y - propagate reliable regions
}
//Y - propagate reliable regions
for( x = 0; x < imgW; x++ )
{
{
for( y = 0; y < imgH; y++ )
{
{
d = dest[y*widthStep+x];
if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 4) &&
d > 0 )//highly || moderately
{
{
disparities[y*widthStep+x] = (uchar)d;
//up propagation
for( i = y - 1; i >= 0; i-- )
{
if( ( edges[i*imgW+x] & 4 ) ||
( dest[i*widthStep+x] < d &&
( dest[i*widthStep+x] < d &&
reliabilities[i*imgW+x] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[i*widthStep+x] - 1 == d ) ) break;
disparities[i*widthStep+x] = (uchar)d;
}
disparities[i*widthStep+x] = (uchar)d;
}
//down propagation
for( i = y + 1; i < imgH; i++ )
{
if( ( edges[i*imgW+x] & 4 ) ||
( dest[i*widthStep+x] < d &&
( dest[i*widthStep+x] < d &&
reliabilities[i*imgW+x] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[i*widthStep+x] - 1 == d ) ) break;
disparities[i*widthStep+x] = (uchar)d;
@@ -417,41 +417,41 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
for( ; x < imgW && dest[y*widthStep+x] == dest[y*widthStep+x-1]; x++ );
s = x - i;
for( ; i < x; i++ )
{
{
reliabilities[y*imgW+i] = s;
}
}
}
}
//X - propagate reliable regions
for( y = 0; y < imgH; y++ )
{
}
//X - propagate reliable regions
for( y = 0; y < imgH; y++ )
{
for( x = 0; x < imgW; x++ )
{
{
d = dest[y*widthStep+x];
if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 1) &&
d > 0 )//highly || moderately
{
{
disparities[y*widthStep+x] = (uchar)d;
//up propagation
for( i = x - 1; i >= 0; i-- )
{
if( (edges[y*imgW+i] & 1) ||
( dest[y*widthStep+i] < d &&
( dest[y*widthStep+i] < d &&
reliabilities[y*imgW+i] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[y*widthStep+i] - 1 == d ) ) break;
disparities[y*widthStep+i] = (uchar)d;
}
}
//down propagation
for( i = x + 1; i < imgW; i++ )
{
if( (edges[y*imgW+i] & 1) ||
( dest[y*widthStep+i] < d &&
( dest[y*widthStep+i] < d &&
reliabilities[y*imgW+i] >= param3 ) ||
( reliabilities[y*imgW+x] < param5 &&
( reliabilities[y*imgW+x] < param5 &&
dest[y*widthStep+i] - 1 == d ) ) break;
disparities[y*widthStep+i] = (uchar)d;
@@ -466,10 +466,10 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
}
//release resources
cvFree( &dsi );
cvFree( &edges );
cvFree( &cells );
cvFree( &rData );
cvFree( &dsi );
cvFree( &edges );
cvFree( &cells );
cvFree( &rData );
}
@@ -483,7 +483,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
// rightImage - right image of stereo-pair (format 8uC1).
// mode -mode of correspondance retrieval (now CV_RETR_DP_BIRCHFIELD only)
// dispImage - destination disparity image
// maxDisparity - maximal disparity
// maxDisparity - maximal disparity
// param1, param2, param3, param4, param5 - parameters of algorithm
// Returns:
// Notes:
@@ -491,43 +491,43 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
// All images must have format 8uC1.
//F*/
CV_IMPL void
cvFindStereoCorrespondence(
cvFindStereoCorrespondence(
const CvArr* leftImage, const CvArr* rightImage,
int mode,
CvArr* depthImage,
int maxDisparity,
double param1, double param2, double param3,
int maxDisparity,
double param1, double param2, double param3,
double param4, double param5 )
{
{
CV_FUNCNAME( "cvFindStereoCorrespondence" );
__BEGIN__;
CvMat *src1, *src2;
CvMat *src1, *src2;
CvMat *dst;
CvMat src1_stub, src2_stub, dst_stub;
int coi;
int coi;
CV_CALL( src1 = cvGetMat( leftImage, &src1_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
CV_CALL( src2 = cvGetMat( rightImage, &src2_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
CV_CALL( dst = cvGetMat( depthImage, &dst_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
// check args
if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
// check args
if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
CV_MAT_TYPE( dst->type ) != CV_8UC1) CV_ERROR(CV_StsUnsupportedFormat,
"All images must be single-channel and have 8u" );
"All images must be single-channel and have 8u" );
if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ) )
CV_ERROR( CV_StsUnmatchedSizes, "" );
if( maxDisparity <= 0 || maxDisparity >= src1->width || maxDisparity > 255 )
CV_ERROR(CV_StsOutOfRange,
CV_ERROR(CV_StsOutOfRange,
"parameter /maxDisparity/ is out of range");
if( mode == CV_DISPARITY_BIRCHFIELD )
{
if( param1 == CV_UNDEF_SC_PARAM ) param1 = CV_IDP_BIRCHFIELD_PARAM1;
@@ -536,10 +536,10 @@ cvFindStereoCorrespondence(
if( param4 == CV_UNDEF_SC_PARAM ) param4 = CV_IDP_BIRCHFIELD_PARAM4;
if( param5 == CV_UNDEF_SC_PARAM ) param5 = CV_IDP_BIRCHFIELD_PARAM5;
CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
src2->data.ptr, dst->data.ptr,
CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
src2->data.ptr, dst->data.ptr,
cvGetMatSize( src1 ), src1->step,
maxDisparity, (float)param1, (float)param2, (float)param3,
maxDisparity, (float)param1, (float)param2, (float)param3,
(float)param4, (float)param5 ) );
}
else
@@ -547,7 +547,7 @@ cvFindStereoCorrespondence(
CV_ERROR( CV_StsBadArg, "Unsupported mode of function" );
}
__END__;
__END__;
}
/* End of file. */

View File

@@ -41,7 +41,7 @@
#include "precomp.hpp"
CvStatus CV_STDCALL
static CvStatus
icvJacobiEigens_32f(float *A, float *V, float *E, int n, float eps)
{
int i, j, k, ind;

View File

@@ -83,7 +83,7 @@ static int CompareContour(const void* a, const void* b, void* )
return (dx < wt && dy < ht);
}
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
static void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
{ /* Create contours: */
IplImage* pIB = NULL;
CvSeq* cnt = NULL;

View File

@@ -160,9 +160,5 @@ public:
};
/* Blob detector constructor: */
CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}
//CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}

File diff suppressed because it is too large Load Diff

View File

@@ -48,7 +48,7 @@
#include "_kdtree.hpp"
#include "_featuretree.h"
#if _MSC_VER >= 1400
#if defined _MSC_VER && _MSC_VER >= 1400
#pragma warning(disable:4996) // suppress "function call with parameters may be unsafe" in std::copy
#endif
@@ -95,7 +95,7 @@ class CvKDTreeWrap : public CvFeatureTree {
for (int j = 0; j < d->rows; ++j) {
const typename __treetype::scalar_type* dj =
(const typename __treetype::scalar_type*) dptr;
(const typename __treetype::scalar_type*) dptr;
int* resultsj = (int*) resultsptr;
double* distj = (double*) distptr;
@@ -103,8 +103,8 @@ class CvKDTreeWrap : public CvFeatureTree {
assert((int)nn.size() <= k);
for (unsigned int j = 0; j < nn.size(); ++j) {
*resultsj++ = *nn[j].p;
*distj++ = nn[j].dist;
*resultsj++ = *nn[j].p;
*distj++ = nn[j].dist;
}
std::fill(resultsj, resultsj + k - nn.size(), -1);
std::fill(distj, distj + k - nn.size(), 0);
@@ -117,16 +117,16 @@ class CvKDTreeWrap : public CvFeatureTree {
template <class __treetype>
int find_ortho_range(CvMat* bounds_min, CvMat* bounds_max,
CvMat* results) {
CvMat* results) {
int rn = results->rows * results->cols;
std::vector<int> inbounds;
dispatch_cvtype(mat, ((__treetype*)data)->
find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
(typename __treetype::scalar_type*)bounds_max->data.ptr,
inbounds));
find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
(typename __treetype::scalar_type*)bounds_max->data.ptr,
inbounds));
std::copy(inbounds.begin(),
inbounds.begin() + std::min((int)inbounds.size(), rn),
(int*) results->data.ptr);
inbounds.begin() + std::min((int)inbounds.size(), rn),
(int*) results->data.ptr);
return (int)inbounds.size();
}
@@ -135,7 +135,7 @@ class CvKDTreeWrap : public CvFeatureTree {
public:
CvKDTreeWrap(CvMat* _mat) : mat(_mat) {
// * a flag parameter should tell us whether
// * (a) user ensures *mat outlives *this and is unchanged,
// * (a) user ensures *mat outlives *this and is unchanged,
// * (b) we take reference and user ensures mat is unchanged,
// * (c) we copy data, (d) we own and release data.
@@ -144,8 +144,8 @@ public:
tmp[j] = j;
dispatch_cvtype(mat, data = new tree_type
(&tmp[0], &tmp[0] + tmp.size(), mat->cols,
tree_type::deref_type(mat)));
(&tmp[0], &tmp[0] + tmp.size(), mat->cols,
tree_type::deref_type(mat)));
}
~CvKDTreeWrap() {
dispatch_cvtype(mat, delete (tree_type*) data);
@@ -185,15 +185,15 @@ public:
assert(CV_MAT_TYPE(results->type) == CV_32SC1);
dispatch_cvtype(mat, find_nn<tree_type>
(desc, k, emax, results, dist));
(desc, k, emax, results, dist));
}
int FindOrthoRange(CvMat* bounds_min, CvMat* bounds_max,
CvMat* results) {
CvMat* results) {
bool free_bounds = false;
int count = -1;
if (bounds_min->cols * bounds_min->rows != dims() ||
bounds_max->cols * bounds_max->rows != dims())
bounds_max->cols * bounds_max->rows != dims())
CV_Error(CV_StsUnmatchedSizes, "bounds_{min,max} must 1 x dims or dims x 1");
if (CV_MAT_TYPE(bounds_min->type) != CV_MAT_TYPE(bounds_max->type))
CV_Error(CV_StsUnmatchedFormats, "bounds_{min,max} must have same type");
@@ -218,7 +218,7 @@ public:
assert(bounds_max->rows * bounds_max->cols == dims());
dispatch_cvtype(mat, count = find_ortho_range<tree_type>
(bounds_min, bounds_max,results));
(bounds_min, bounds_max,results));
if (free_bounds) {
cvReleaseMat(&bounds_min);

View File

@@ -1247,7 +1247,7 @@ int _cvSolveEqu1th(T c1, T c0, T* X);
vertices_number: in, number of vertices in polygon
Return :
--------------------------------------------------------------------------*/
void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
static void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
{
int N = 2*vertices_number;
cvSetSeqBlockSize(pVoronoiDiagramInt->SiteSeq,N*pVoronoiDiagramInt->SiteSeq->elem_size);

View File

@@ -50,6 +50,7 @@
typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
#if 0
/* Optimization using Levenberg-Marquardt */
void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
pointer_LMFunc function,
@@ -75,7 +76,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
CvMat *matrJtJN = 0;
CvMat *matrJt = 0;
CvMat *vectB = 0;
CV_FUNCNAME( "cvLevenbegrMarquardtOptimization" );
__BEGIN__;
@@ -104,7 +105,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
{
CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector X0 must be 1" );
}
if( observRes->cols != 1 )
{
CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector observed rusult must be 1" );
@@ -157,8 +158,8 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
/* Print result of function to file */
/* Compute error */
cvSub(observRes,resFunc,error);
cvSub(observRes,resFunc,error);
//valError = error_function(observRes,resFunc);
/* Need to use new version of computing error (norm) */
valError = cvNorm(observRes,resFunc);
@@ -169,7 +170,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
/* Define optimal delta for J'*J*delta=J'*error */
/* compute J'J */
cvMulTransposed(Jac,matrJtJ,1);
cvCopy(matrJtJ,matrJtJN);
/* compute J'*error */
@@ -244,6 +245,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
return;
}
#endif
/*------------------------------------------------------------------------------*/
#if 0

View File

@@ -65,9 +65,13 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
*/
#define TRACK_BUNDLE_FILE "d:\\test\\bundle.txt"
void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPoints,
CvMat** pointsPres, int numImages,
CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon );
/* ============== Bundle adjustment optimization ================= */
void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
static void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
{
/* Compute derivate for given projection matrix points and status of points */
@@ -201,7 +205,7 @@ void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMa
}
/*======================================================================================*/
void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
static void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
{
CV_FUNCNAME( "icvComputeDerivateProjAll" );
__BEGIN__;
@@ -228,7 +232,7 @@ void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **point
}
/*======================================================================================*/
void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
static void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
{
CV_FUNCNAME( "icvComputeDerivatePoints" );
@@ -267,7 +271,7 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
{
CV_ERROR( CV_StsOutOfRange, "Size of projection matrix (projMatr) must be 3x4" );
}
if( !CV_IS_MAT(presPoints) )
{
CV_ERROR( CV_StsUnsupportedFormat, "Status must be a matrix 1xN" );
@@ -282,10 +286,10 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
{
CV_ERROR( CV_StsUnsupportedFormat, "derivPoint must be a matrix 2 x 4VisNum" );
}
/* ----- End test ----- */
/* ----- End test ----- */
/* Compute derivates by points */
double p[12];
int i;
for( i = 0; i < 12; i++ )
@@ -311,16 +315,16 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
piX[0] = X[0]*p[0] + X[1]*p[1] + X[2]*p[2] + X[3]*p[3];
piX[1] = X[0]*p[4] + X[1]*p[5] + X[2]*p[6] + X[3]*p[7];
piX[2] = X[0]*p[8] + X[1]*p[9] + X[2]*p[10] + X[3]*p[11];
int i,j;
double tmp3 = 1/(piX[2]*piX[2]);
for( j = 0; j < 2; j++ )//for x and y
{
for( i = 0; i < 4; i++ )// for X,Y,Z,W
{
cvmSet( derivPoint,
cvmSet( derivPoint,
j, currVisPoint*4+i,
(p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3 );
}
@@ -337,8 +341,9 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
__END__;
return;
}
/*======================================================================================*/
void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
static void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
{
CV_FUNCNAME( "icvComputeDerivatePointsAll" );
__BEGIN__;
@@ -364,7 +369,7 @@ void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **poi
return;
}
/*======================================================================================*/
void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
static void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
{
int *shifts = 0;
@@ -404,10 +409,10 @@ void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, C
{
if( cvmGet(presPoints[currImage],0,currPoint) > 0 )
{
sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+j);
sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+j);
}
}
@@ -429,11 +434,11 @@ void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, C
__END__;
cvFree( &shifts);
return;
}
/*======================================================================================*/
void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
static void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
{
CV_FUNCNAME( "icvComputeMatrixVAll" );
__BEGIN__;
@@ -460,7 +465,7 @@ void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
return;
}
/*======================================================================================*/
void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
static void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
{
CV_FUNCNAME( "icvComputeMatrixW" );
__BEGIN__;
@@ -509,10 +514,10 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
for( int currCol = 0; currCol < 4; currCol++ )
{
double sum;
sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
cvmGet(pointDeriv[currImage],0,currVis*4+currCol);
sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
cvmGet(pointDeriv[currImage],1,currVis*4+currCol);
cvmSet(matrW,currImage*12+currLine,currPoint*4+currCol,sum);
@@ -529,7 +534,7 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
}
}
}
#ifdef TRACK_BUNDLE
{
FILE *file;
@@ -560,9 +565,10 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
__END__;
return;
}
/*======================================================================================*/
/* Compute jacobian mult projection matrices error */
void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
static void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
{
CV_FUNCNAME( "icvComputeJacErrorProj" );
__BEGIN__;
@@ -596,7 +602,7 @@ void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,C
double sum = 0;
for( int i = 0; i < num; i++ )
{
sum += cvmGet(projDeriv[currImage],i,currCol) *
sum += cvmGet(projDeriv[currImage],i,currCol) *
cvmGet(projErrors[currImage],i%2,i/2);
}
cvmSet(jacProjErr,currImage*12+currCol,0,sum);
@@ -627,9 +633,10 @@ void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,C
__END__;
return;
}
/*======================================================================================*/
/* Compute jacobian mult points error */
void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
static void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
{
int *shifts = 0;
@@ -734,6 +741,7 @@ void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors
}
/*======================================================================================*/
/* Reconstruct 4D points using status */
void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat** presPoints,
CvMat *points4D,int numImages,CvMat **projError)
@@ -797,7 +805,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
numVisProj++;
}
}
if( numVisProj < 2 )
{
/* This point can't be reconstructed */
@@ -821,7 +829,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
y = cvmGet(projPoints[currImage],1,currPoint);
for( int k = 0; k < 4; k++ )
{
matrA_dat[currVisProj*12 + k] =
matrA_dat[currVisProj*12 + k] =
x * cvmGet(projMatrs[currImage],2,k) - cvmGet(projMatrs[currImage],0,k);
matrA_dat[currVisProj*12+4 + k] =
@@ -854,7 +862,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
CvMat point3D;
double point3D_dat[3];
point3D = cvMat(3,1,CV_64F,point3D_dat);
int currPoint;
int numVis = 0;
double totalError = 0;
@@ -897,7 +905,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
/*======================================================================================*/
void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
static void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
{
CV_FUNCNAME( "icvProjPointsStatusFunc" );
__BEGIN__;
@@ -943,7 +951,7 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
fclose(file);
}
#endif
int currImage;
for( currImage = 0; currImage < numImages; currImage++ )
{
@@ -969,7 +977,7 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
fclose(file);
}
#endif
cvmMul(projMatrs[currImage],&point4D,&point3D);
double w = point3D_dat[2];
cvmSet(projPoints[currImage],0,currVisPoint,point3D_dat[0]/w);
@@ -998,11 +1006,11 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
}
/*======================================================================================*/
void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
static void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
{
/* Free each matrix */
int currMatr;
if( *matrArray != 0 )
{/* Need delete */
for( currMatr = 0; currMatr < numMatr; currMatr++ )
@@ -1015,7 +1023,7 @@ void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
}
/*======================================================================================*/
void *icvClearAlloc(int size)
static void *icvClearAlloc(int size)
{
void *ptr = 0;
@@ -1047,6 +1055,7 @@ int icvDeleteSparsInPoints( int numImages,
}
#endif
/*======================================================================================*/
/* !!! may be useful to return norm of error */
/* !!! may be does not work correct with not all visible 4D points */
@@ -1054,15 +1063,15 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
CvMat** pointsPres, int numImages,
CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon )
{
CvMat *vectorX_points4D = 0;
CvMat **vectorX_projMatrs = 0;
CvMat **vectorX_projMatrs = 0;
CvMat *newVectorX_points4D = 0;
CvMat **newVectorX_projMatrs = 0;
CvMat *changeVectorX_points4D = 0;
CvMat *changeVectorX_projMatrs = 0;
CvMat *changeVectorX_projMatrs = 0;
CvMat **observVisPoints = 0;
CvMat **projVisPoints = 0;
@@ -1097,17 +1106,17 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
{
CV_ERROR( CV_StsOutOfRange, "Number of images must be more than zero" );
}
if( maxIter < 1 || maxIter > 2000 )
{
CV_ERROR( CV_StsOutOfRange, "Maximum number of iteration must be in [1..1000]" );
}
if( epsilon < 0 )
{
CV_ERROR( CV_StsOutOfRange, "Epsilon parameter must be >= 0" );
}
if( !CV_IS_MAT(resultPoints4D) )
{
CV_ERROR( CV_StsUnsupportedFormat, "resultPoints4D must be a matrix 4 x NumPnt" );
@@ -1139,7 +1148,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
CV_CALL( changeVectorX_projMatrs = cvCreateMat(3,4,CV_64F));
int currImage;
/* ----- Test input params ----- */
for( currImage = 0; currImage < numImages; currImage++ )
{
@@ -1355,7 +1364,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
double norm = cvNorm(vectorX_projMatrs[i]);
fprintf(file," test 6.01 prev normProj=%lf\n",norm);
}
fclose(file);
}
#endif
@@ -1384,7 +1393,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
double norm = cvNorm(matrsUk[i]);
fprintf(file," test 6.01 prev matrsUk=%lf\n",norm);
}
for( i = 0; i < numPoints; i++ )
{
double norm = cvNorm(matrsVi[i]);
@@ -1427,7 +1436,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
double norm = cvNorm(matrsUk[i]);
fprintf(file," test 6.01 post1 matrsUk=%lf\n",norm);
}
for( i = 0; i < numPoints; i++ )
{
double norm = cvNorm(matrsVi[i]);
@@ -1612,7 +1621,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
newError += currNorm * currNorm;
}
newError = sqrt(newError);
currIter++;
@@ -1732,7 +1741,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
} while( change > epsilon && currIter < maxIter );
/*--------------------------------------------*/
/* Optimization complete copy computed params */
/* Copy projection matrices */

View File

@@ -46,6 +46,8 @@
/* Valery Mosyagin */
#if 0
typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
@@ -61,7 +63,7 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
/* Jacobian computation for trifocal case */
void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
static void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
{
CV_FUNCNAME( "icvJacobianFunction_ProjTrifocal" );
__BEGIN__;
@@ -101,7 +103,7 @@ void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
/* Fill Jacobian matrix */
int currProjPoint;
int currMatr;
cvZero(Jacobian);
for( currMatr = 0; currMatr < 3; currMatr++ )
{
@@ -137,7 +139,7 @@ void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
{
for( i = 0; i < 4; i++ )// for X,Y,Z,W
{
cvmSet( Jacobian,
cvmSet( Jacobian,
currMatr*numPoints*2+currProjPoint*2+j, 36+currProjPoint*4+i,
(p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3 );
}
@@ -161,7 +163,7 @@ void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
return;
}
void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
static void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
{
/* Computes function in a given point */
/* Computers project points using 3 projection matrices and points 3D */
@@ -264,7 +266,7 @@ void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
/*----------------------------------------------------------------------------------------*/
void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
static void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
CvMat **resultProjMatrs, CvMat *resultPoints4D)
{
@@ -312,7 +314,7 @@ void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
{
CV_ERROR( CV_StsNullPtr, "Some of projPoints is a NULL pointer" );
}
if( resultProjMatrs[i] == 0 )
{
CV_ERROR( CV_StsNullPtr, "Some of resultProjMatrs is a NULL pointer" );
@@ -402,7 +404,7 @@ void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
cvmSet(vectorX0,36 + currPoint*4 + 3,0,cvmGet(points4D,3,currPoint));
}
/* Allocate memory for result */
cvLevenbergMarquardtOptimization( icvJacobianFunction_ProjTrifocal, icvFunc_ProjTrifocal,
vectorX0,observRes,optimX,100,1e-6);
@@ -441,7 +443,7 @@ void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
/*------------------------------------------------------------------------------*/
/* Create good points using status information */
void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
static void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
{
*goodPoints = 0;
@@ -493,3 +495,4 @@ void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
return;
}
#endif

View File

@@ -87,7 +87,7 @@ double _cvStretchingWork(CvPoint2D32f* P1,
L1 = sqrt( (double)P1->x*P1->x + P1->y*P1->y);
L2 = sqrt( (double)P2->x*P2->x + P2->y*P2->y);
L_min = MIN(L1, L2);
dL = fabs( L1 - L2 );
@@ -96,15 +96,15 @@ double _cvStretchingWork(CvPoint2D32f* P1,
////////////////////////////////////////////////////////////////////////////////////
CvPoint2D32f Q( CvPoint2D32f q0, CvPoint2D32f q1, CvPoint2D32f q2, double t );
double angle( CvPoint2D32f A, CvPoint2D32f B );
double _cvBendingWork( CvPoint2D32f* B0,
CvPoint2D32f* F0,
CvPoint2D32f* B1,
CvPoint2D32f* F1/*,
CvPoint* K*/)
{
CvPoint2D32f Q( CvPoint2D32f q0, CvPoint2D32f q1, CvPoint2D32f q2, double t );
double angle( CvPoint2D32f A, CvPoint2D32f B );
CvPoint2D32f Q0, Q1, Q2;
CvPoint2D32f Q1_nm = { 0, 0 }, Q2_nm = { 0, 0 };
double d0, d1, d2, des, t_zero;
@@ -140,7 +140,7 @@ double _cvBendingWork( CvPoint2D32f* B0,
d_angle = d_angle - CV_PI*0.5;
d_angle = fabs(d_angle);
K->x = -K->x;
K->y = -K->y;
B1->x = -B1->x;
@@ -427,7 +427,7 @@ void _cvWorkSouthEast(int i, int j, _CvWork** W, CvPoint2D32f* edges1, CvPoint2D
small_edge.y = NULL_EDGE*edges1[i-2].y;
w1 = W[i-1][j-1].w_east + _cvBendingWork(&edges1[i-2],
&edges1[i-1],
&edges1[i-1],
/*&null_edge*/&small_edge,
&edges2[j-1]/*,
&edges2[j-2]*/);
@@ -442,7 +442,7 @@ void _cvWorkSouthEast(int i, int j, _CvWork** W, CvPoint2D32f* edges1, CvPoint2D
small_edge.y = NULL_EDGE*edges2[j-2].y;
w3 = W[i-1][j-1].w_south + _cvBendingWork( /*&null_edge*/&small_edge,
&edges1[i-1],
&edges1[i-1],
&edges2[j-2],
&edges2[j-1]/*,
&edges1[i-2]*/);
@@ -511,6 +511,7 @@ void _cvWorkSouth(int i, int j, _CvWork** W, CvPoint2D32f* edges1, CvPoint2D32f*
}
}
//===================================================
CvPoint2D32f Q(CvPoint2D32f q0,CvPoint2D32f q1,CvPoint2D32f q2,double t)
{
@@ -519,14 +520,14 @@ CvPoint2D32f Q(CvPoint2D32f q0,CvPoint2D32f q1,CvPoint2D32f q2,double t)
q.x = (float)(q0.x*(1-t)*(1-t) + 2*q1.x*t*(1-t) + q2.x*t*t);
q.y = (float)(q0.y*(1-t)*(1-t) + 2*q1.y*t*(1-t) + q2.y*t*t);
return q;
return q;
}
double angle(CvPoint2D32f A, CvPoint2D32f B)
{
return acos( (A.x*B.x + A.y*B.y)/sqrt( (double)(A.x*A.x + A.y*A.y)*(B.x*B.x + B.y*B.y) ) );
}
#if 0
/***************************************************************************************\
*
* This function compute intermediate polygon between contour1 and contour2
@@ -536,14 +537,14 @@ double angle(CvPoint2D32f A, CvPoint2D32f B)
* param = [0,1]; 0 correspondence to contour1, 1 - contour2
*
\***************************************************************************************/
CvSeq* icvBlendContours(CvSeq* contour1,
static CvSeq* icvBlendContours(CvSeq* contour1,
CvSeq* contour2,
CvSeq* corr,
double param,
CvMemStorage* storage)
{
int j;
CvSeqWriter writer01;
CvSeqReader reader01;
@@ -558,7 +559,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
int corr_point;
// Create output sequence.
CvSeq* output = cvCreateSeq(0,
CvSeq* output = cvCreateSeq(0,
sizeof(CvSeq),
sizeof(CvPoint),
storage );
@@ -570,7 +571,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
point1 = (CvPoint* )malloc( Ni*sizeof(CvPoint) );
point2 = (CvPoint* )malloc( Nj*sizeof(CvPoint) );
// Initialize arrays of point
// Initialize arrays of point
cvCvtSeqToArray( contour1, point1, CV_WHOLE_SEQ );
cvCvtSeqToArray( contour2, point2, CV_WHOLE_SEQ );
@@ -583,7 +584,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
i = Ni-1; //correspondence to points of contour1
for( ; corr; corr = corr->h_next )
{
{
//Initializes process of sequential reading from sequence
cvStartReadSeq( corr, &reader01, 0 );
@@ -595,7 +596,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
// Compute point of intermediate polygon.
point_output.x = cvRound(point1[i].x + param*( point2[corr_point].x - point1[i].x ));
point_output.y = cvRound(point1[i].y + param*( point2[corr_point].y - point1[i].y ));
// Write element to sequence.
CV_WRITE_SEQ_ELEM( point_output, writer01 );
}
@@ -603,7 +604,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
}
// Updates sequence header.
cvFlushSeqWriter( &writer01 );
return output;
}
@@ -621,9 +622,9 @@ CvSeq* icvBlendContours(CvSeq* contour1,
**************************************************************************************************/
void icvCalcContoursCorrespondence(CvSeq* contour1,
CvSeq* contour2,
CvSeq** corr,
static void icvCalcContoursCorrespondence(CvSeq* contour1,
CvSeq* contour2,
CvSeq** corr,
CvMemStorage* storage)
{
int i,j; // counter of cycles
@@ -660,7 +661,7 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
edges1 = (CvPoint2D32f* )malloc( (Ni-1)*sizeof(CvPoint2D32f) );
edges2 = (CvPoint2D32f* )malloc( (Nj-1)*sizeof(CvPoint2D32f) );
// Initialize arrays of point
// Initialize arrays of point
cvCvtSeqToArray( contour1, point1, CV_WHOLE_SEQ );
cvCvtSeqToArray( contour2, point2, CV_WHOLE_SEQ );
@@ -679,7 +680,7 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
edges2[i].y = (float)( point2[i+1].y - point2[i].y );
};
// Find infinity constant
// Find infinity constant
//inf=1;
/////////////
@@ -716,11 +717,11 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
{
j=0;/////////
W[i][j].w_east = W[i-1][j].w_east;
W[i][j].w_east = W[i][j].w_east /*+
W[i][j].w_east = W[i][j].w_east /*+
_cvBendingWork( &edges1[i-2], &edges1[i-1], &null_edge, &null_edge, NULL )*/;
W[i][j].w_east = W[i][j].w_east + _cvStretchingWork( &edges2[i-1], &null_edge );
W[i][j].path_e = PATH_TO_E;
j=1;//////////
W[i][j].w_south = inf;
@@ -732,18 +733,18 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
small_edge.x = NULL_EDGE*edges1[i-2].x;
small_edge.y = NULL_EDGE*edges1[i-2].y;
W[i][j].w_southeast = W[i][j].w_southeast +
W[i][j].w_southeast = W[i][j].w_southeast +
_cvBendingWork( &edges1[i-2], &edges1[i-1], /*&null_edge*/&small_edge, &edges2[j-1]/*, &edges2[Nj-2]*/);
W[i][j].path_se = PATH_TO_E;
}
for(j=2; j<Nj; j++)
{
{
i=0;//////////
W[i][j].w_south = W[i][j-1].w_south;
W[i][j].w_south = W[i][j].w_south + _cvStretchingWork( &null_edge, &edges2[j-1] );
W[i][j].w_south = W[i][j].w_south /*+
W[i][j].w_south = W[i][j].w_south /*+
_cvBendingWork( &null_edge, &null_edge, &edges2[j-2], &edges2[j-1], NULL )*/;
W[i][j].path_s = 3;
@@ -758,7 +759,7 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
small_edge.x = NULL_EDGE*edges2[j-2].x;
small_edge.y = NULL_EDGE*edges2[j-2].y;
W[i][j].w_southeast = W[i][j].w_southeast +
W[i][j].w_southeast = W[i][j].w_southeast +
_cvBendingWork( /*&null_edge*/&small_edge, &edges1[i-1], &edges2[j-2], &edges2[j-1]/*, &edges1[Ni-2]*/);
W[i][j].path_se = 3;
}
@@ -773,8 +774,8 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
i=Ni-1;j=Nj-1;
*corr = cvCreateSeq(0,
sizeof(CvSeq),
*corr = cvCreateSeq(0,
sizeof(CvSeq),
sizeof(int),
storage );
@@ -806,26 +807,26 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
{
CV_WRITE_SEQ_ELEM( j, writer );
switch( path )
switch( path )
{
case PATH_TO_E:
path = W[i][j].path_e;
i--;
cvFlushSeqWriter( &writer );
corr01->h_next = cvCreateSeq( 0,
sizeof(CvSeq),
corr01->h_next = cvCreateSeq( 0,
sizeof(CvSeq),
sizeof(int),
storage );
corr01 = corr01->h_next;
cvStartAppendToSeq( corr01, &writer );
break;
case PATH_TO_SE:
path = W[i][j].path_se;
j--; i--;
cvFlushSeqWriter( &writer );
corr01->h_next = cvCreateSeq( 0,
sizeof(CvSeq),
corr01->h_next = cvCreateSeq( 0,
sizeof(CvSeq),
sizeof(int),
storage );
corr01 = corr01->h_next;
@@ -852,4 +853,4 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
free(edges1);
free(edges2);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -41,11 +41,11 @@
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#if _MSC_VER >= 1200
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4251 4710 4711 4514 4996 )
#endif
#ifdef HAVE_CVCONFIG_H
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif

View File

@@ -65,7 +65,7 @@ typedef struct CvTSTrans
float angle;
} CvTSTrans;
void SET_TRANS_0(CvTSTrans *pT)
static void SET_TRANS_0(CvTSTrans *pT)
{
memset(pT,0,sizeof(CvTSTrans));
pT->C = 1;

View File

@@ -99,8 +99,9 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
/*==========================================================================================*/
/* Functions for calculation the tensor */
/*==========================================================================================*/
#if 0
#if 1
void fprintMatrix(FILE* file,CvMat* matrix)
static void fprintMatrix(FILE* file,CvMat* matrix)
{
int i,j;
fprintf(file,"\n");
@@ -116,7 +117,7 @@ void fprintMatrix(FILE* file,CvMat* matrix)
#endif
/*==========================================================================================*/
void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
static void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
{
/* Normalize image points using camera matrix */
@@ -169,7 +170,7 @@ void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
return;
}
#endif
/*=====================================================================================*/
/*
@@ -405,7 +406,7 @@ int icvComputeProjectMatrices6Points( CvMat* points1,CvMat* points2,CvMat* point
}
/*==========================================================================================*/
int icvGetRandNumbers(int range,int count,int* arr)
static int icvGetRandNumbers(int range,int count,int* arr)
{
/* Generate random numbers [0,range-1] */
@@ -454,7 +455,7 @@ int icvGetRandNumbers(int range,int count,int* arr)
return 1;
}
/*==========================================================================================*/
void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int number)
static void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int number)
{
CV_FUNCNAME( "icvSelectColsByNumbers" );
@@ -501,7 +502,7 @@ void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int num
}
/*==========================================================================================*/
void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
static void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
{
CvMat* tmpProjPoints = 0;
@@ -584,7 +585,8 @@ void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
return;
}
/*==========================================================================================*/
int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of points on image */
#if 0
static int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of points on image */
CvMat** projMatrs,/* array of 3 prejection matrices */
CvMat** statuses,/* 3 arrays of status of points */
double threshold,/* Threshold for good point */
@@ -783,6 +785,7 @@ int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of point
return numProjMatrs;
}
#endif
/*==========================================================================================*/
int icvComputeProjectMatricesNPoints( CvMat* points1,CvMat* points2,CvMat* points3,
@@ -2350,8 +2353,8 @@ void ReconstructPointsFor3View_bySolve( CvMat* projMatr1,CvMat* projMatr2,CvMat*
#endif
/*==========================================================================================*/
void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* transVect)
#if 0
static void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* transVect)
{
/* We know position of camera. we must to compute rotate matrix and translate vector */
@@ -2468,7 +2471,7 @@ void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* t
/*==========================================================================================*/
void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* rotMatr,CvMat* transVect)
static void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* rotMatr,CvMat* transVect)
{
/* Computes homography for project matrix be "canonical" form */
CV_FUNCNAME( "computeProjMatrHomography" );
@@ -2586,7 +2589,7 @@ void icvComputeQknowPrincipalPoint(int numImages, CvMat **projMatrs,CvMat *matrQ
/* Part with metric reconstruction */
#if 1
void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ)
static void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ)
{
/* K*K' = P*Q*P' */
/* try to solve Q by linear method */
@@ -2731,7 +2734,7 @@ void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ
#endif
/*-----------------------------------------------------------------------------------------------------*/
void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
static void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
{
#if 0
/* Use SVD to decompose matrix Q=H*I*H' */
@@ -2789,3 +2792,5 @@ void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
#endif
}
#endif

View File

@@ -48,7 +48,7 @@
#include "_vectrack.h"
#define NUM_FACE_ELEMENTS 3
enum
enum
{
MOUTH = 0,
LEYE = 1,
@@ -69,7 +69,7 @@ int ChoiceTrackingFace2(CvFaceTracker* pTF, const int nElements, const CvFaceEle
inline int GetEnergy(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoint* ptTempl, CvRect* rTempl);
inline int GetEnergy2(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoint* ptTempl, CvRect* rTempl, int* element);
inline double CalculateTransformationLMS3_0( CvPoint* pTemplPoints, CvPoint* pSrcPoints);
inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
CvPoint* pSrcPoints,
double* pdbAverageScale,
double* pdbAverageRotate,
@@ -91,13 +91,13 @@ struct CvTrackingRect
int Energy(const CvTrackingRect& prev)
{
int prev_color = 0 == prev.iColor ? iColor : prev.iColor;
iEnergy = 1 * pow2(r.width - prev.r.width) +
1 * pow2(r.height - prev.r.height) +
1 * pow2(iColor - prev_color) / 4 +
- 1 * nRectsInThis +
- 0 * nRectsOnTop +
+ 0 * nRectsOnLeft +
+ 0 * nRectsOnRight +
iEnergy = 1 * pow2(r.width - prev.r.width) +
1 * pow2(r.height - prev.r.height) +
1 * pow2(iColor - prev_color) / 4 +
- 1 * nRectsInThis +
- 0 * nRectsOnTop +
+ 0 * nRectsOnLeft +
+ 0 * nRectsOnRight +
+ 0 * nRectsOnBottom;
return iEnergy;
}
@@ -110,10 +110,10 @@ struct CvFaceTracker
double dbRotateDelta;
double dbRotateAngle;
CvPoint ptRotate;
CvPoint ptTempl[NUM_FACE_ELEMENTS];
CvRect rTempl[NUM_FACE_ELEMENTS];
IplImage* imgGray;
IplImage* imgThresh;
CvMemStorage* mstgContours;
@@ -149,8 +149,8 @@ struct CvFaceTracker
imgGray = cvCreateImage(cvSize(imgGray->width, imgGray->height), 8, 1);
imgThresh = cvCreateImage(cvSize(imgGray->width, imgGray->height), 8, 1);
mstgContours = cvCreateMemStorage();
if ((NULL == imgGray) ||
(NULL == imgThresh) ||
if ((NULL == imgGray) ||
(NULL == imgThresh) ||
(NULL == mstgContours))
return FALSE;
return TRUE;
@@ -162,11 +162,11 @@ struct CvFaceTracker
ReallocImage(&imgThresh, sz, 1);
ptRotate = face[MOUTH].ptCenter;
float m[6];
CvMat mat = cvMat( 2, 3, CV_32FC1, m );
CvMat mat = cvMat( 2, 3, CV_32FC1, m );
if (NULL == imgGray || NULL == imgThresh)
return FALSE;
/*m[0] = (float)cos(-dbRotateAngle*CV_PI/180.);
m[1] = (float)sin(-dbRotateAngle*CV_PI/180.);
m[2] = (float)ptRotate.x;
@@ -175,7 +175,7 @@ struct CvFaceTracker
m[5] = (float)ptRotate.y;*/
cv2DRotationMatrix( cvPointTo32f(ptRotate), -dbRotateAngle, 1., &mat );
cvWarpAffine( img, imgGray, &mat );
if (NULL == mstgContours)
mstgContours = cvCreateMemStorage();
else
@@ -225,7 +225,7 @@ protected:
void Energy();
}; //class CvFaceElement
int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
inline int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
{
return ((CvTrackingRect*)el1)->iEnergy - ((CvTrackingRect*)el2)->iEnergy;
}// int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
@@ -322,7 +322,7 @@ void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers, i
}
for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
{
cr.r = cvContourBoundingRect(internal);
cr.r = cvContourBoundingRect(internal);
Move(cr.r, roi.x, roi.y);
if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize && cr.r.height > dMinSize)
{
@@ -353,7 +353,7 @@ void CvFaceElement::MergeRects(int d)
for (j = i + 1; j < nRects; j++)
{
CvTrackingRect* pRect2 = (CvTrackingRect*)(reader2.ptr);
if (abs(pRect1->ptCenter.y - pRect2->ptCenter.y) < d &&
if (abs(pRect1->ptCenter.y - pRect2->ptCenter.y) < d &&
abs(pRect1->r.height - pRect2->r.height) < d)
{
CvTrackingRect rNew;
@@ -432,7 +432,7 @@ cvInitFaceTracker(CvFaceTracker* pFaceTracker, const IplImage* imgGray, CvRect*
(NULL == pRects) ||
(nRects < NUM_FACE_ELEMENTS))
return NULL;
//int new_face = FALSE;
CvFaceTracker* pFace = pFaceTracker;
if (NULL == pFace)
@@ -468,7 +468,7 @@ cvTrackFace(CvFaceTracker* pFaceTracker, IplImage* imgGray, CvRect* pRects, int
pFaceTracker->InitNextImage(imgGray);
*ptRotate = pFaceTracker->ptRotate;
*dbAngleRotate = pFaceTracker->dbRotateAngle;
int nElements = 16;
double dx = pFaceTracker->face[LEYE].ptCenter.x - pFaceTracker->face[REYE].ptCenter.x;
double dy = pFaceTracker->face[LEYE].ptCenter.y - pFaceTracker->face[REYE].ptCenter.y;
@@ -476,9 +476,9 @@ cvTrackFace(CvFaceTracker* pFaceTracker, IplImage* imgGray, CvRect* pRects, int
int d = cvRound(0.25 * d_eyes);
int dMinSize = d;
int nRestarts = 0;
int elem;
CvFaceElement big_face[NUM_FACE_ELEMENTS];
START:
// init
@@ -533,7 +533,7 @@ START:
}
if (2 == elements)
find2 = TRUE;
else
else
restart = TRUE;
}
}
@@ -563,13 +563,13 @@ RESTART:
pFaceTracker->iTrackingFaceType = noel;
found = TRUE;
}
else
else
{
restart = TRUE;
goto RESTART;
}
}
if (found)
{
// angle by mouth & eyes
@@ -613,7 +613,7 @@ void ThresholdingParam(IplImage *imgGray, int iNumLayers, int &iMinLevel, int &i
{
assert(imgGray != NULL);
assert(imgGray->nChannels == 1);
int i, j;
int i, j;
// create histogram
int histImg[256] = {0};
uchar* buffImg = (uchar*)imgGray->imageData;
@@ -760,7 +760,7 @@ int ChoiceTrackingFace2(CvFaceTracker* pTF, const int nElements, const CvFaceEle
double prev_d02 = sqrt((double)prev_v02.x*prev_v02.x + prev_v02.y*prev_v02.y);
double new_d01 = sqrt((double)new_v01.x*new_v01.x + new_v01.y*new_v01.y);
double scale = templ_d01 / new_d01;
double new_d02 = templ_d02 / scale;
double new_d02 = templ_d02 / scale;
double sin_a = double(prev_v01.x * prev_v02.y - prev_v01.y * prev_v02.x) / (prev_d01 * prev_d02);
double cos_a = cos(asin(sin_a));
double x = double(new_v01.x) * cos_a - double(new_v01.y) * sin_a;
@@ -806,12 +806,12 @@ inline int GetEnergy(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoin
double h_mouth = double(ppNew[MOUTH]->r.height) * scale;
energy +=
int(512.0 * (e_prev + 16.0 * e_templ)) +
4 * pow2(ppNew[LEYE]->r.width - ppNew[REYE]->r.width) +
4 * pow2(ppNew[LEYE]->r.height - ppNew[REYE]->r.height) +
4 * (int)pow(w_eye - double(rTempl[LEYE].width + rTempl[REYE].width) / 2.0, 2) +
2 * (int)pow(h_eye - double(rTempl[LEYE].height + rTempl[REYE].height) / 2.0, 2) +
1 * (int)pow(w_mouth - double(rTempl[MOUTH].width), 2) +
1 * (int)pow(h_mouth - double(rTempl[MOUTH].height), 2) +
4 * pow2(ppNew[LEYE]->r.width - ppNew[REYE]->r.width) +
4 * pow2(ppNew[LEYE]->r.height - ppNew[REYE]->r.height) +
4 * (int)pow(w_eye - double(rTempl[LEYE].width + rTempl[REYE].width) / 2.0, 2) +
2 * (int)pow(h_eye - double(rTempl[LEYE].height + rTempl[REYE].height) / 2.0, 2) +
1 * (int)pow(w_mouth - double(rTempl[MOUTH].width), 2) +
1 * (int)pow(h_mouth - double(rTempl[MOUTH].height), 2) +
0;
return energy;
}
@@ -832,20 +832,20 @@ inline int GetEnergy2(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoi
double h0 = (double)ppNew[element[0]]->r.height * scale_templ;
double w1 = (double)ppNew[element[1]]->r.width * scale_templ;
double h1 = (double)ppNew[element[1]]->r.height * scale_templ;
int energy = ppNew[element[0]]->iEnergy + ppNew[element[1]]->iEnergy +
- 2 * (ppNew[element[0]]->nRectsInThis - ppNew[element[1]]->nRectsInThis) +
- 2 * (ppNew[element[0]]->nRectsInThis - ppNew[element[1]]->nRectsInThis) +
(int)pow(w0 - (double)rTempl[element[0]].width, 2) +
(int)pow(h0 - (double)rTempl[element[0]].height, 2) +
(int)pow(w1 - (double)rTempl[element[1]].width, 2) +
(int)pow(h1 - (double)rTempl[element[1]].height, 2) +
(int)pow(new_d - prev_d, 2) +
0;
return energy;
}
inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
CvPoint* pSrcPoints,
double* pdbAverageScale,
double* pdbAverageRotate,
@@ -866,41 +866,41 @@ inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
double dbYt = double(pTemplPoints[0].y + pTemplPoints[1].y + pTemplPoints[2].y ) / 3.0;
double dbXs = double(pSrcPoints[0].x + pSrcPoints[1].x + pSrcPoints[2].x) / 3.0;
double dbYs = double(pSrcPoints[0].y + pSrcPoints[1].y + pSrcPoints[2].y) / 3.0;
double dbXtXt = double(pow2(pTemplPoints[0].x) + pow2(pTemplPoints[1].x) + pow2(pTemplPoints[2].x)) / 3.0;
double dbYtYt = double(pow2(pTemplPoints[0].y) + pow2(pTemplPoints[1].y) + pow2(pTemplPoints[2].y)) / 3.0;
double dbXsXs = double(pow2(pSrcPoints[0].x) + pow2(pSrcPoints[1].x) + pow2(pSrcPoints[2].x)) / 3.0;
double dbYsYs = double(pow2(pSrcPoints[0].y) + pow2(pSrcPoints[1].y) + pow2(pSrcPoints[2].y)) / 3.0;
double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
pTemplPoints[1].x * pSrcPoints[1].x +
double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
pTemplPoints[1].x * pSrcPoints[1].x +
pTemplPoints[2].x * pSrcPoints[2].x) / 3.0;
double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
pTemplPoints[1].y * pSrcPoints[1].y +
double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
pTemplPoints[1].y * pSrcPoints[1].y +
pTemplPoints[2].y * pSrcPoints[2].y) / 3.0;
double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
pTemplPoints[1].x * pSrcPoints[1].y +
double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
pTemplPoints[1].x * pSrcPoints[1].y +
pTemplPoints[2].x * pSrcPoints[2].y) / 3.0;
double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
pTemplPoints[1].y * pSrcPoints[1].x +
double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
pTemplPoints[1].y * pSrcPoints[1].x +
pTemplPoints[2].y * pSrcPoints[2].x ) / 3.0;
dbXtXt -= dbXt * dbXt;
dbYtYt -= dbYt * dbYt;
dbXsXs -= dbXs * dbXs;
dbYsYs -= dbYs * dbYs;
dbXtXs -= dbXt * dbXs;
dbYtYs -= dbYt * dbYs;
dbXtYs -= dbXt * dbYs;
dbYtXs -= dbYt * dbXs;
dbAverageRotate = atan2( dbXtYs - dbYtXs, dbXtXs + dbYtYs );
double cosR = cos(dbAverageRotate);
double sinR = sin(dbAverageRotate);
double del = dbXsXs + dbYsYs;
@@ -909,15 +909,15 @@ inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
dbAverageScale = (double(dbXtXs + dbYtYs) * cosR + double(dbXtYs - dbYtXs) * sinR) / del;
dbLMS = dbXtXt + dbYtYt - ((double)pow(dbXtXs + dbYtYs,2) + (double)pow(dbXtYs - dbYtXs,2)) / del;
}
dbAverageShiftX = double(dbXt) - dbAverageScale * (double(dbXs) * cosR + double(dbYs) * sinR);
dbAverageShiftY = double(dbYt) - dbAverageScale * (double(dbYs) * cosR - double(dbXs) * sinR);
if( pdbAverageScale != NULL ) *pdbAverageScale = dbAverageScale;
if( pdbAverageRotate != NULL ) *pdbAverageRotate = dbAverageRotate;
if( pdbAverageShiftX != NULL ) *pdbAverageShiftX = dbAverageShiftX;
if( pdbAverageShiftY != NULL ) *pdbAverageShiftY = dbAverageShiftY;
assert(dbLMS >= 0);
return dbLMS;
}
@@ -933,39 +933,39 @@ inline double CalculateTransformationLMS3_0( CvPoint* pTemplPoints, CvPoint* pSr
double dbYt = double(pTemplPoints[0].y + pTemplPoints[1].y + pTemplPoints[2].y ) / 3.0;
double dbXs = double(pSrcPoints[0].x + pSrcPoints[1].x + pSrcPoints[2].x) / 3.0;
double dbYs = double(pSrcPoints[0].y + pSrcPoints[1].y + pSrcPoints[2].y) / 3.0;
double dbXtXt = double(pow2(pTemplPoints[0].x) + pow2(pTemplPoints[1].x) + pow2(pTemplPoints[2].x)) / 3.0;
double dbYtYt = double(pow2(pTemplPoints[0].y) + pow2(pTemplPoints[1].y) + pow2(pTemplPoints[2].y)) / 3.0;
double dbXsXs = double(pow2(pSrcPoints[0].x) + pow2(pSrcPoints[1].x) + pow2(pSrcPoints[2].x)) / 3.0;
double dbYsYs = double(pow2(pSrcPoints[0].y) + pow2(pSrcPoints[1].y) + pow2(pSrcPoints[2].y)) / 3.0;
double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
pTemplPoints[1].x * pSrcPoints[1].x +
double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
pTemplPoints[1].x * pSrcPoints[1].x +
pTemplPoints[2].x * pSrcPoints[2].x) / 3.0;
double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
pTemplPoints[1].y * pSrcPoints[1].y +
double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
pTemplPoints[1].y * pSrcPoints[1].y +
pTemplPoints[2].y * pSrcPoints[2].y) / 3.0;
double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
pTemplPoints[1].x * pSrcPoints[1].y +
double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
pTemplPoints[1].x * pSrcPoints[1].y +
pTemplPoints[2].x * pSrcPoints[2].y) / 3.0;
double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
pTemplPoints[1].y * pSrcPoints[1].x +
double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
pTemplPoints[1].y * pSrcPoints[1].x +
pTemplPoints[2].y * pSrcPoints[2].x ) / 3.0;
dbXtXt -= dbXt * dbXt;
dbYtYt -= dbYt * dbYt;
dbXsXs -= dbXs * dbXs;
dbYsYs -= dbYs * dbYs;
dbXtXs -= dbXt * dbXs;
dbYtYs -= dbYt * dbYs;
dbXtYs -= dbXt * dbYs;
dbYtXs -= dbYt * dbXs;
double del = dbXsXs + dbYsYs;
if( del != 0 )
dbLMS = dbXtXt + dbYtYt - ((double)pow(dbXtXs + dbYtYs,2) + (double)pow(dbXtYs - dbYtXs,2)) / del;