fixed warnings
This commit is contained in:
parent
b03c19a8ac
commit
35d9ce0c0c
@ -599,7 +599,7 @@ void RetinaImpl::getParvoRAW(OutputArray parvoOutputBufferCopy){
|
||||
// original API level data accessors : get buffers addresses...
|
||||
const Mat RetinaImpl::getMagnoRAW() const {
|
||||
// create a cv::Mat header for the valarray
|
||||
return Mat(_retinaFilter->getMovingContours().size(),1, CV_32F, (void*)get_data(_retinaFilter->getMovingContours()));
|
||||
return Mat((int)_retinaFilter->getMovingContours().size(),1, CV_32F, (void*)get_data(_retinaFilter->getMovingContours()));
|
||||
|
||||
}
|
||||
|
||||
@ -607,11 +607,11 @@ const Mat RetinaImpl::getParvoRAW() const {
|
||||
if (_retinaFilter->getColorMode()) // check if color mode is enabled
|
||||
{
|
||||
// create a cv::Mat table (for RGB planes as a single vector)
|
||||
return Mat(_retinaFilter->getColorOutput().size(), 1, CV_32F, (void*)get_data(_retinaFilter->getColorOutput()));
|
||||
return Mat((int)_retinaFilter->getColorOutput().size(), 1, CV_32F, (void*)get_data(_retinaFilter->getColorOutput()));
|
||||
}
|
||||
// otherwise, output is gray level
|
||||
// create a cv::Mat header for the valarray
|
||||
return Mat( _retinaFilter->getContours().size(), 1, CV_32F, (void*)get_data(_retinaFilter->getContours()));
|
||||
return Mat((int)_retinaFilter->getContours().size(), 1, CV_32F, (void*)get_data(_retinaFilter->getContours()));
|
||||
}
|
||||
|
||||
// private method called by constructirs
|
||||
|
@ -208,7 +208,7 @@ void CV_ChessboardDetectorTest::run_batch( const string& filename )
|
||||
}
|
||||
|
||||
int progress = 0;
|
||||
int max_idx = board_list.size()/2;
|
||||
int max_idx = (int)board_list.size()/2;
|
||||
double sum_error = 0.0;
|
||||
int count = 0;
|
||||
|
||||
|
@ -80,10 +80,10 @@ static Mat readMatFromBin( const string& filename )
|
||||
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
|
||||
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
|
||||
|
||||
size_t step = dataSize / rows / CV_ELEM_SIZE(type);
|
||||
CV_Assert(step >= (size_t)cols);
|
||||
int step = dataSize / rows / CV_ELEM_SIZE(type);
|
||||
CV_Assert(step >= cols);
|
||||
|
||||
Mat m = Mat( rows, step, type).colRange(0, cols);
|
||||
Mat m = Mat(rows, step, type).colRange(0, cols);
|
||||
|
||||
size_t elements_read = fread( m.ptr(), 1, dataSize, f );
|
||||
CV_Assert(elements_read == (size_t)(dataSize));
|
||||
|
@ -406,9 +406,9 @@ bool TiffDecoder::readHdrData(Mat& img)
|
||||
TIFFGetField( tif, TIFFTAG_PHOTOMETRIC, &photometric );
|
||||
TIFFSetField(tif, TIFFTAG_SGILOGDATAFMT, SGILOGDATAFMT_FLOAT);
|
||||
int size = 3 * m_width * m_height * sizeof (float);
|
||||
int strip_size = 3 * m_width * rows_per_strip;
|
||||
tstrip_t strip_size = 3 * m_width * rows_per_strip;
|
||||
float *ptr = img.ptr<float>();
|
||||
for (size_t i = 0; i < TIFFNumberOfStrips(tif); i++, ptr += strip_size)
|
||||
for (tstrip_t i = 0; i < TIFFNumberOfStrips(tif); i++, ptr += strip_size)
|
||||
{
|
||||
TIFFReadEncodedStrip(tif, i, ptr, size);
|
||||
size -= strip_size * sizeof(float);
|
||||
|
@ -174,12 +174,12 @@ bool WebPDecoder::readData(Mat &img)
|
||||
if (channels == 3)
|
||||
{
|
||||
res_ptr = WebPDecodeBGRInto(data.data, data.total(), out_data,
|
||||
out_data_size, img.step);
|
||||
(int)out_data_size, (int)img.step);
|
||||
}
|
||||
else if (channels == 4)
|
||||
{
|
||||
res_ptr = WebPDecodeBGRAInto(data.data, data.total(), out_data,
|
||||
out_data_size, img.step);
|
||||
(int)out_data_size, (int)img.step);
|
||||
}
|
||||
|
||||
if(res_ptr == out_data)
|
||||
@ -255,22 +255,22 @@ bool WebPEncoder::write(const Mat& img, const std::vector<int>& params)
|
||||
{
|
||||
if(channels == 3)
|
||||
{
|
||||
size = WebPEncodeLosslessBGR(image->data, width, height, image->step, &out);
|
||||
size = WebPEncodeLosslessBGR(image->data, width, height, (int)image->step, &out);
|
||||
}
|
||||
else if(channels == 4)
|
||||
{
|
||||
size = WebPEncodeLosslessBGRA(image->data, width, height, image->step, &out);
|
||||
size = WebPEncodeLosslessBGRA(image->data, width, height, (int)image->step, &out);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if(channels == 3)
|
||||
{
|
||||
size = WebPEncodeBGR(image->data, width, height, image->step, quality, &out);
|
||||
size = WebPEncodeBGR(image->data, width, height, (int)image->step, quality, &out);
|
||||
}
|
||||
else if(channels == 4)
|
||||
{
|
||||
size = WebPEncodeBGRA(image->data, width, height, image->step, quality, &out);
|
||||
size = WebPEncodeBGRA(image->data, width, height, (int)image->step, quality, &out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ namespace cv
|
||||
|
||||
int rotatedRectangleIntersection( const RotatedRect& rect1, const RotatedRect& rect2, OutputArray intersectingRegion )
|
||||
{
|
||||
const float samePointEps = 0.00001; // used to test if two points are the same
|
||||
const float samePointEps = 0.00001f; // used to test if two points are the same
|
||||
|
||||
Point2f vec1[4], vec2[4];
|
||||
Point2f pts1[4], pts2[4];
|
||||
|
@ -473,8 +473,7 @@ void LineSegmentDetectorImpl::flsd(std::vector<Vec4i>& lines,
|
||||
|
||||
// Search for line segments
|
||||
unsigned int ls_count = 0;
|
||||
unsigned int list_size = list.size();
|
||||
for(unsigned int i = 0; i < list_size; ++i)
|
||||
for(size_t i = 0, list_size = list.size(); i < list_size; ++i)
|
||||
{
|
||||
unsigned int adx = list[i].p.x + list[i].p.y * img_width;
|
||||
if((used.data[adx] == NOTUSED) && (angles_data[adx] != NOTDEF))
|
||||
|
@ -376,8 +376,8 @@ void CV_RotatedRectangleIntersectionTest::test7()
|
||||
|
||||
rect1.center.x = 0;
|
||||
rect1.center.y = 0;
|
||||
rect1.size.width = 12.34;
|
||||
rect1.size.height = 56.78;
|
||||
rect1.size.width = 12.34f;
|
||||
rect1.size.height = 56.78f;
|
||||
rect1.angle = 0;
|
||||
|
||||
rect2.center.x = 0;
|
||||
@ -464,7 +464,7 @@ void CV_RotatedRectangleIntersectionTest::test9()
|
||||
rect2.center.x = 2;
|
||||
rect2.center.y = 0;
|
||||
rect2.size.width = 2;
|
||||
rect2.size.height = 123.45;
|
||||
rect2.size.height = 123.45f;
|
||||
rect2.angle = 0;
|
||||
|
||||
vector<Point2f> vertices;
|
||||
|
@ -275,8 +275,8 @@ static Mat readMatFromBin( const string& filename )
|
||||
size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
|
||||
CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
|
||||
|
||||
size_t step = dataSize / rows / CV_ELEM_SIZE(type);
|
||||
CV_Assert(step >= (size_t)cols);
|
||||
int step = dataSize / rows / CV_ELEM_SIZE(type);
|
||||
CV_Assert(step >= cols);
|
||||
|
||||
Mat m = Mat( rows, step, type).colRange(0, cols);
|
||||
|
||||
|
@ -795,7 +795,7 @@ ERStat* ERFilterNM::er_tree_filter ( InputArray image, ERStat * stat, ERStat *pa
|
||||
int p_prev = p-1;
|
||||
int p_next = p+1;
|
||||
if (p_prev == -1)
|
||||
p_prev = contour_poly.size()-1;
|
||||
p_prev = (int)contour_poly.size()-1;
|
||||
if (p_next == (int)contour_poly.size())
|
||||
p_next = 0;
|
||||
|
||||
@ -2455,9 +2455,9 @@ void MaxMeaningfulClustering::build_merge_info(double *Z, int N, vector<HCluster
|
||||
|
||||
cluster.dist = dist;
|
||||
if (cluster.dist >= 1)
|
||||
cluster.dist = 0.999999;
|
||||
cluster.dist = 0.999999f;
|
||||
if (cluster.dist == 0)
|
||||
cluster.dist = 1.e-25;
|
||||
cluster.dist = 1.e-25f;
|
||||
|
||||
cluster.dist_ext = 1;
|
||||
|
||||
@ -2811,7 +2811,7 @@ void erGrouping(InputArrayOfArrays _src, vector<vector<ERStat> > ®ions, const
|
||||
|
||||
for (int f=0; f<num_features; f++)
|
||||
{
|
||||
unsigned int N = regions.at(c).size();
|
||||
unsigned int N = (unsigned int)regions.at(c).size();
|
||||
if (N<3) break;
|
||||
int dim = dims[f];
|
||||
double *data = (double*)malloc(dim*N * sizeof(double));
|
||||
@ -2892,7 +2892,7 @@ void erGrouping(InputArrayOfArrays _src, vector<vector<ERStat> > ®ions, const
|
||||
}
|
||||
|
||||
// Find the Max. Meaningful Clusters in the co-occurrence matrix
|
||||
mm_clustering(D, regions.at(c).size(), METHOD_METR_AVERAGE, &meaningful_clusters);
|
||||
mm_clustering(D, (unsigned int)regions.at(c).size(), METHOD_METR_AVERAGE, &meaningful_clusters);
|
||||
free(D);
|
||||
|
||||
|
||||
|
@ -84,11 +84,12 @@ int solveLP(const Mat& Func, const Mat& Constr, Mat& z){
|
||||
//return the optimal solution
|
||||
z.create(c.cols,1,CV_64FC1);
|
||||
MatIterator_<double> it=z.begin<double>();
|
||||
unsigned int nsize = (unsigned int)N.size();
|
||||
for(int i=1;i<=c.cols;i++,it++){
|
||||
if(indexToRow[i]<N.size()){
|
||||
if(indexToRow[i]<nsize){
|
||||
*it=0;
|
||||
}else{
|
||||
*it=b.at<double>(indexToRow[i]-N.size(),b.cols-1);
|
||||
*it=b.at<double>(indexToRow[i]-nsize,b.cols-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,7 +103,7 @@ static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<
|
||||
*it=it[-1]+1;
|
||||
}
|
||||
B.resize(b.rows);
|
||||
B[0]=N.size();
|
||||
B[0]=(int)N.size();
|
||||
for (std::vector<int>::iterator it = B.begin()+1 ; it != B.end(); ++it){
|
||||
*it=it[-1]+1;
|
||||
}
|
||||
@ -151,8 +152,9 @@ static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<
|
||||
dprintf(("\tAFTER INNER_SIMPLEX\n"));
|
||||
print_simplex_state(c,b,v,N,B);
|
||||
|
||||
if(indexToRow[0]>=N.size()){
|
||||
int iterator_offset=indexToRow[0]-N.size();
|
||||
unsigned int nsize = (unsigned int)N.size();
|
||||
if(indexToRow[0]>=nsize){
|
||||
int iterator_offset=indexToRow[0]-nsize;
|
||||
if(b(iterator_offset,b.cols-1)>0){
|
||||
return SOLVELP_UNFEASIBLE;
|
||||
}
|
||||
@ -176,14 +178,14 @@ static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<
|
||||
c=0;
|
||||
v=0;
|
||||
for(int I=1;I<old_c.cols;I++){
|
||||
if(indexToRow[I]<N.size()){
|
||||
if(indexToRow[I]<nsize){
|
||||
dprintf(("I=%d from nonbasic\n",I));
|
||||
int iterator_offset=indexToRow[I];
|
||||
c(0,iterator_offset)+=old_c(0,I);
|
||||
print_matrix(c);
|
||||
}else{
|
||||
dprintf(("I=%d from basic\n",I));
|
||||
int iterator_offset=indexToRow[I]-N.size();
|
||||
int iterator_offset=indexToRow[I]-nsize;
|
||||
c-=old_c(0,I)*b.row(iterator_offset).colRange(0,b.cols-1);
|
||||
v+=old_c(0,I)*b(iterator_offset,b.cols-1);
|
||||
print_matrix(c);
|
||||
|
@ -135,7 +135,7 @@ public:
|
||||
computeBitmaps(pyr0[level], tb1, eb1);
|
||||
computeBitmaps(pyr1[level], tb2, eb2);
|
||||
|
||||
int min_err = pyr0[level].total();
|
||||
int min_err = (int)pyr0[level].total();
|
||||
Point new_shift(shift);
|
||||
for(int i = -1; i <= 1; i++) {
|
||||
for(int j = -1; j <= 1; j++) {
|
||||
@ -253,7 +253,7 @@ protected:
|
||||
calcHist(&img, 1, &channels, Mat(), hist, 1, &hist_size, ranges);
|
||||
float *ptr = hist.ptr<float>();
|
||||
int median = 0, sum = 0;
|
||||
int thresh = img.total() / 2;
|
||||
int thresh = (int)img.total() / 2;
|
||||
while(sum < thresh && median < LDR_SIZE) {
|
||||
sum += static_cast<int>(ptr[median]);
|
||||
median++;
|
||||
|
@ -97,7 +97,7 @@ public:
|
||||
|
||||
std::vector<Mat> result_split(channels);
|
||||
for(int channel = 0; channel < channels; channel++) {
|
||||
Mat A = Mat::zeros(sample_points.size() * images.size() + LDR_SIZE + 1, LDR_SIZE + sample_points.size(), CV_32F);
|
||||
Mat A = Mat::zeros((int)sample_points.size() * (int)images.size() + LDR_SIZE + 1, LDR_SIZE + (int)sample_points.size(), CV_32F);
|
||||
Mat B = Mat::zeros(A.rows, 1, CV_32F);
|
||||
|
||||
int eq = 0;
|
||||
@ -106,8 +106,8 @@ public:
|
||||
|
||||
int val = images[j].ptr()[3*(sample_points[i].y * images[j].cols + sample_points[j].x) + channel];
|
||||
A.at<float>(eq, val) = w.at<float>(val);
|
||||
A.at<float>(eq, LDR_SIZE + i) = -w.at<float>(val);
|
||||
B.at<float>(eq, 0) = w.at<float>(val) * log(times.at<float>(j));
|
||||
A.at<float>(eq, LDR_SIZE + (int)i) = -w.at<float>(val);
|
||||
B.at<float>(eq, 0) = w.at<float>(val) * log(times.at<float>((int)j));
|
||||
eq++;
|
||||
}
|
||||
}
|
||||
@ -219,7 +219,7 @@ public:
|
||||
float* rad_ptr = radiance.ptr<float>();
|
||||
for(size_t pos = 0; pos < images[i].total(); pos++) {
|
||||
for(int c = 0; c < channels; c++, ptr++, rad_ptr++) {
|
||||
new_response.at<Vec3f>(*ptr)[c] += times.at<float>(i) * *rad_ptr;
|
||||
new_response.at<Vec3f>(*ptr)[c] += times.at<float>((int)i) * *rad_ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public:
|
||||
LUT(images[i], response, response_img);
|
||||
split(response_img, splitted);
|
||||
for(int c = 0; c < channels; c++) {
|
||||
result_split[c] += w.mul(splitted[c] - exp_values.at<float>(i));
|
||||
result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i));
|
||||
}
|
||||
weight_sum += w;
|
||||
}
|
||||
@ -327,8 +327,8 @@ public:
|
||||
LUT(images[i], weight, w);
|
||||
LUT(images[i], response, im);
|
||||
|
||||
result += times.at<float>(i) * w.mul(im);
|
||||
wsum += times.at<float>(i) * times.at<float>(i) * w;
|
||||
result += times.at<float>((int)i) * w.mul(im);
|
||||
wsum += times.at<float>((int)i) * times.at<float>((int)i) * w;
|
||||
}
|
||||
result = result.mul(1 / wsum);
|
||||
}
|
||||
|
@ -505,7 +505,7 @@ protected:
|
||||
void calculateSum(std::vector<Mat>& x_contrast, std::vector<Mat>& y_contrast, Mat& sum)
|
||||
{
|
||||
sum = Mat::zeros(x_contrast[x_contrast.size() - 1].size(), CV_32F);
|
||||
for(int i = x_contrast.size() - 1; i >= 0; i--)
|
||||
for(int i = (int)x_contrast.size() - 1; i >= 0; i--)
|
||||
{
|
||||
Mat grad_x, grad_y;
|
||||
getGradient(x_contrast[i], grad_x, 1);
|
||||
|
@ -112,7 +112,7 @@ static Mat _localAffineEstimate(const std::vector<Point2f>& shape1, const std::v
|
||||
bool fullAfine)
|
||||
{
|
||||
Mat out(2,3,CV_32F);
|
||||
int siz=2*shape1.size();
|
||||
int siz=2*(int)shape1.size();
|
||||
|
||||
if (fullAfine)
|
||||
{
|
||||
|
@ -65,10 +65,10 @@ public:
|
||||
comparer=_comparer;
|
||||
iterations=_iterations;
|
||||
transformer=_transformer;
|
||||
bendingEnergyWeight=0.3;
|
||||
imageAppearanceWeight=0.0;
|
||||
shapeContextWeight=1.0;
|
||||
sigma=10;
|
||||
bendingEnergyWeight=0.3f;
|
||||
imageAppearanceWeight=0.0f;
|
||||
shapeContextWeight=1.0f;
|
||||
sigma=10.0f;
|
||||
name_ = "ShapeDistanceExtractor.SCD";
|
||||
}
|
||||
|
||||
@ -503,7 +503,7 @@ void SCDMatcher::hungarian(cv::Mat &costMatrix, std::vector<cv::DMatch> &outMatc
|
||||
std::vector<int> matches(costMatrix.rows, 0), colsol(costMatrix.rows), rowsol(costMatrix.rows);
|
||||
std::vector<float> d(costMatrix.rows), pred(costMatrix.rows), v(costMatrix.rows);
|
||||
|
||||
const float LOWV=1e-10;
|
||||
const float LOWV = 1e-10f;
|
||||
bool unassignedfound;
|
||||
int i=0, imin=0, numfree=0, prvnumfree=0, f=0, i0=0, k=0, freerow=0;
|
||||
int j=0, j1=0, j2=0, endofpath=0, last=0, low=0, up=0;
|
||||
|
@ -212,9 +212,9 @@ void ThinPlateSplineShapeTransformerImpl::estimateTransformation(InputArray _pts
|
||||
}
|
||||
|
||||
// Organizing the correspondent points in matrix style //
|
||||
Mat shape1(matches.size(),2,CV_32F); // transforming shape
|
||||
Mat shape2(matches.size(),2,CV_32F); // target shape
|
||||
for (size_t i=0; i<matches.size(); i++)
|
||||
Mat shape1((int)matches.size(),2,CV_32F); // transforming shape
|
||||
Mat shape2((int)matches.size(),2,CV_32F); // target shape
|
||||
for (int i=0, end = (int)matches.size(); i<end; i++)
|
||||
{
|
||||
Point2f pt1=pts1.at<Point2f>(0,matches[i].queryIdx);
|
||||
shape1.at<float>(i,0) = pt1.x;
|
||||
@ -229,11 +229,11 @@ void ThinPlateSplineShapeTransformerImpl::estimateTransformation(InputArray _pts
|
||||
// Building the matrices for solving the L*(w|a)=(v|0) problem with L={[K|P];[P'|0]}
|
||||
|
||||
//Building K and P (Neede to buil L)
|
||||
Mat matK(matches.size(),matches.size(),CV_32F);
|
||||
Mat matP(matches.size(),3,CV_32F);
|
||||
for (size_t i=0; i<matches.size(); i++)
|
||||
Mat matK((int)matches.size(),(int)matches.size(),CV_32F);
|
||||
Mat matP((int)matches.size(),3,CV_32F);
|
||||
for (int i=0, end=(int)matches.size(); i<end; i++)
|
||||
{
|
||||
for (size_t j=0; j<matches.size(); j++)
|
||||
for (int j=0; j<end; j++)
|
||||
{
|
||||
if (i==j)
|
||||
{
|
||||
@ -251,19 +251,19 @@ void ThinPlateSplineShapeTransformerImpl::estimateTransformation(InputArray _pts
|
||||
}
|
||||
|
||||
//Building L
|
||||
Mat matL=Mat::zeros(matches.size()+3,matches.size()+3,CV_32F);
|
||||
Mat matLroi(matL, Rect(0,0,matches.size(),matches.size())); //roi for K
|
||||
Mat matL=Mat::zeros((int)matches.size()+3,(int)matches.size()+3,CV_32F);
|
||||
Mat matLroi(matL, Rect(0,0,(int)matches.size(),(int)matches.size())); //roi for K
|
||||
matK.copyTo(matLroi);
|
||||
matLroi = Mat(matL,Rect(matches.size(),0,3,matches.size())); //roi for P
|
||||
matLroi = Mat(matL,Rect((int)matches.size(),0,3,(int)matches.size())); //roi for P
|
||||
matP.copyTo(matLroi);
|
||||
Mat matPt;
|
||||
transpose(matP,matPt);
|
||||
matLroi = Mat(matL,Rect(0,matches.size(),matches.size(),3)); //roi for P'
|
||||
matLroi = Mat(matL,Rect(0,(int)matches.size(),(int)matches.size(),3)); //roi for P'
|
||||
matPt.copyTo(matLroi);
|
||||
|
||||
//Building B (v|0)
|
||||
Mat matB = Mat::zeros(matches.size()+3,2,CV_32F);
|
||||
for (size_t i=0; i<matches.size(); i++)
|
||||
Mat matB = Mat::zeros((int)matches.size()+3,2,CV_32F);
|
||||
for (int i=0, end = (int)matches.size(); i<end; i++)
|
||||
{
|
||||
matB.at<float>(i,0) = shape2.at<float>(i,0); //x's
|
||||
matB.at<float>(i,1) = shape2.at<float>(i,1); //y's
|
||||
|
@ -46,11 +46,11 @@ using namespace std;
|
||||
|
||||
const int angularBins=12;
|
||||
const int radialBins=4;
|
||||
const float minRad=0.2;
|
||||
const float minRad=0.2f;
|
||||
const float maxRad=2;
|
||||
const int NSN=5;//10;//20; //number of shapes per class
|
||||
const int NP=100; //number of points sympliying the contour
|
||||
const float outlierWeight=0.1;
|
||||
const float outlierWeight=0.1f;
|
||||
const int numOutliers=20;
|
||||
const float CURRENT_MAX_ACCUR=95; //98% and 99% reached in several tests, 95 is fixed as minimum boundary
|
||||
|
||||
@ -96,7 +96,7 @@ vector <Point2f> CV_ShapeEMDTest::convertContourType(const Mat& currentQuery, in
|
||||
|
||||
// In case actual number of points is less than n
|
||||
int dum=0;
|
||||
for (int add=contoursQuery.size()-1; add<n; add++)
|
||||
for (int add=(int)contoursQuery.size()-1; add<n; add++)
|
||||
{
|
||||
contoursQuery.push_back(contoursQuery[dum++]); //adding dummy values
|
||||
}
|
||||
@ -148,14 +148,14 @@ void CV_ShapeEMDTest::mpegTest()
|
||||
listShapeNames(namesHeaders);
|
||||
|
||||
// distance matrix //
|
||||
Mat distanceMat=Mat::zeros(NSN*namesHeaders.size(), NSN*namesHeaders.size(), CV_32F);
|
||||
Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F);
|
||||
|
||||
// query contours (normal v flipped, h flipped) and testing contour //
|
||||
vector<Point2f> contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting;
|
||||
|
||||
// reading query and computing its properties //
|
||||
int counter=0;
|
||||
const int loops=NSN*namesHeaders.size()*NSN*namesHeaders.size();
|
||||
const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size();
|
||||
for (size_t n=0; n<namesHeaders.size(); n++)
|
||||
{
|
||||
for (int i=1; i<=NSN; i++)
|
||||
@ -165,7 +165,6 @@ void CV_ShapeEMDTest::mpegTest()
|
||||
thepathandname<<path+namesHeaders[n]<<"-"<<i<<".png";
|
||||
Mat currentQuery, flippedHQuery, flippedVQuery;
|
||||
currentQuery=imread(thepathandname.str(), IMREAD_GRAYSCALE);
|
||||
Mat currentQueryBuf=currentQuery.clone();
|
||||
flip(currentQuery, flippedHQuery, 0);
|
||||
flip(currentQuery, flippedVQuery, 1);
|
||||
// compute border of the query and its flipped versions //
|
||||
@ -184,8 +183,8 @@ void CV_ShapeEMDTest::mpegTest()
|
||||
counter++;
|
||||
if (nt==n && it==i)
|
||||
{
|
||||
distanceMat.at<float>(NSN*n+i-1,
|
||||
NSN*nt+it-1)=0;
|
||||
distanceMat.at<float>(NSN*(int)n+i-1,
|
||||
NSN*(int)nt+it-1)=0;
|
||||
continue;
|
||||
}
|
||||
// read testing image //
|
||||
@ -200,9 +199,9 @@ void CV_ShapeEMDTest::mpegTest()
|
||||
std::cout<<std::endl<<"Progress: "<<counter<<"/"<<loops<<": "<<100*double(counter)/loops<<"% *******"<<std::endl;
|
||||
std::cout<<"Computing shape distance between "<<namesHeaders[n]<<i<<
|
||||
" and "<<namesHeaders[nt]<<it<<": ";
|
||||
distanceMat.at<float>(NSN*n+i-1, NSN*nt+it-1)=
|
||||
distanceMat.at<float>(NSN*(int)n+i-1, NSN*(int)nt+it-1)=
|
||||
computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting);
|
||||
std::cout<<distanceMat.at<float>(NSN*n+i-1, NSN*nt+it-1)<<std::endl;
|
||||
std::cout<<distanceMat.at<float>(NSN*(int)n+i-1, NSN*(int)nt+it-1)<<std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -75,13 +75,13 @@ CV_HaussTest::~CV_HaussTest()
|
||||
vector<Point2f> CV_HaussTest::normalizeContour(const vector<Point> &contour)
|
||||
{
|
||||
vector<Point2f> output(contour.size());
|
||||
Mat disMat(contour.size(),contour.size(),CV_32F);
|
||||
Mat disMat((int)contour.size(),(int)contour.size(),CV_32F);
|
||||
Point2f meanpt(0,0);
|
||||
float meanVal=1;
|
||||
|
||||
for (size_t ii=0; ii<contour.size(); ii++)
|
||||
for (int ii=0, end1 = (int)contour.size(); ii<end1; ii++)
|
||||
{
|
||||
for (size_t jj=0; jj<contour.size(); jj++)
|
||||
for (int jj=0, end2 = (int)contour.size(); end2; jj++)
|
||||
{
|
||||
if (ii==jj) disMat.at<float>(ii,jj)=0;
|
||||
else
|
||||
@ -128,7 +128,7 @@ vector <Point> CV_HaussTest::convertContourType(const Mat& currentQuery, int n)
|
||||
}
|
||||
|
||||
// In case actual number of points is less than n
|
||||
for (int add=contoursQuery.size()-1; add<n; add++)
|
||||
for (int add=(int)contoursQuery.size()-1; add<n; add++)
|
||||
{
|
||||
contoursQuery.push_back(contoursQuery[contoursQuery.size()-add+1]); //adding dummy values
|
||||
}
|
||||
@ -160,14 +160,14 @@ void CV_HaussTest::mpegTest()
|
||||
listShapeNames(namesHeaders);
|
||||
|
||||
// distance matrix //
|
||||
Mat distanceMat=Mat::zeros(NSN*namesHeaders.size(), NSN*namesHeaders.size(), CV_32F);
|
||||
Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F);
|
||||
|
||||
// query contours (normal v flipped, h flipped) and testing contour //
|
||||
vector<Point> contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting;
|
||||
|
||||
// reading query and computing its properties //
|
||||
int counter=0;
|
||||
const int loops=NSN*namesHeaders.size()*NSN*namesHeaders.size();
|
||||
const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size();
|
||||
for (size_t n=0; n<namesHeaders.size(); n++)
|
||||
{
|
||||
for (int i=1; i<=NSN; i++)
|
||||
@ -195,8 +195,8 @@ void CV_HaussTest::mpegTest()
|
||||
counter++;
|
||||
if (nt==n && it==i)
|
||||
{
|
||||
distanceMat.at<float>(NSN*n+i-1,
|
||||
NSN*nt+it-1)=0;
|
||||
distanceMat.at<float>(NSN*(int)n+i-1,
|
||||
NSN*(int)nt+it-1)=0;
|
||||
continue;
|
||||
}
|
||||
// read testing image //
|
||||
@ -212,9 +212,9 @@ void CV_HaussTest::mpegTest()
|
||||
std::cout<<std::endl<<"Progress: "<<counter<<"/"<<loops<<": "<<100*double(counter)/loops<<"% *******"<<std::endl;
|
||||
std::cout<<"Computing shape distance between "<<namesHeaders[n]<<i<<
|
||||
" and "<<namesHeaders[nt]<<it<<": ";
|
||||
distanceMat.at<float>(NSN*n+i-1, NSN*nt+it-1)=
|
||||
distanceMat.at<float>(NSN*(int)n+i-1, NSN*(int)nt+it-1)=
|
||||
computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting);
|
||||
std::cout<<distanceMat.at<float>(NSN*n+i-1, NSN*nt+it-1)<<std::endl;
|
||||
std::cout<<distanceMat.at<float>(NSN*(int)n+i-1, NSN*(int)nt+it-1)<<std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -46,13 +46,13 @@ using namespace std;
|
||||
|
||||
const int angularBins=12;
|
||||
const int radialBins=4;
|
||||
const float minRad=0.2;
|
||||
const float minRad=0.2f;
|
||||
const float maxRad=2;
|
||||
const int NSN=5;//10;//20; //number of shapes per class
|
||||
const int NP=120; //number of points sympliying the contour
|
||||
const float outlierWeight=0.1;
|
||||
const float outlierWeight=0.1f;
|
||||
const int numOutliers=20;
|
||||
const float CURRENT_MAX_ACCUR=95.0; //99% and 100% reached in several tests, 95 is fixed as minimum boundary
|
||||
const float CURRENT_MAX_ACCUR=95; //99% and 100% reached in several tests, 95 is fixed as minimum boundary
|
||||
|
||||
class CV_ShapeTest : public cvtest::BaseTest
|
||||
{
|
||||
@ -95,7 +95,7 @@ vector <Point2f> CV_ShapeTest::convertContourType(const Mat& currentQuery, int n
|
||||
}
|
||||
|
||||
// In case actual number of points is less than n
|
||||
for (int add=contoursQuery.size()-1; add<n; add++)
|
||||
for (int add=(int)contoursQuery.size()-1; add<n; add++)
|
||||
{
|
||||
contoursQuery.push_back(contoursQuery[contoursQuery.size()-add+1]); //adding dummy values
|
||||
}
|
||||
@ -126,7 +126,7 @@ float CV_ShapeTest::computeShapeDistance(vector <Point2f>& query1, vector <Point
|
||||
//waitKey(0);
|
||||
Ptr <ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor(angularBins, radialBins, minRad, maxRad);
|
||||
//Ptr <HistogramCostExtractor> cost = createNormHistogramCostExtractor(cv::DIST_L1);
|
||||
Ptr <HistogramCostExtractor> cost = createChiHistogramCostExtractor(30,0.15);
|
||||
Ptr <HistogramCostExtractor> cost = createChiHistogramCostExtractor(30,0.15f);
|
||||
//Ptr <HistogramCostExtractor> cost = createEMDHistogramCostExtractor();
|
||||
//Ptr <HistogramCostExtractor> cost = createEMDL1HistogramCostExtractor();
|
||||
mysc->setIterations(1);
|
||||
@ -148,14 +148,14 @@ void CV_ShapeTest::mpegTest()
|
||||
listShapeNames(namesHeaders);
|
||||
|
||||
// distance matrix //
|
||||
Mat distanceMat=Mat::zeros(NSN*namesHeaders.size(), NSN*namesHeaders.size(), CV_32F);
|
||||
Mat distanceMat=Mat::zeros(NSN*(int)namesHeaders.size(), NSN*(int)namesHeaders.size(), CV_32F);
|
||||
|
||||
// query contours (normal v flipped, h flipped) and testing contour //
|
||||
vector<Point2f> contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting;
|
||||
|
||||
// reading query and computing its properties //
|
||||
int counter=0;
|
||||
const int loops=NSN*namesHeaders.size()*NSN*namesHeaders.size();
|
||||
const int loops=NSN*(int)namesHeaders.size()*NSN*(int)namesHeaders.size();
|
||||
for (size_t n=0; n<namesHeaders.size(); n++)
|
||||
{
|
||||
for (int i=1; i<=NSN; i++)
|
||||
@ -184,8 +184,8 @@ void CV_ShapeTest::mpegTest()
|
||||
counter++;
|
||||
if (nt==n && it==i)
|
||||
{
|
||||
distanceMat.at<float>(NSN*n+i-1,
|
||||
NSN*nt+it-1)=0;
|
||||
distanceMat.at<float>(NSN*(int)n+i-1,
|
||||
NSN*(int)nt+it-1)=0;
|
||||
continue;
|
||||
}
|
||||
// read testing image //
|
||||
@ -200,9 +200,9 @@ void CV_ShapeTest::mpegTest()
|
||||
std::cout<<std::endl<<"Progress: "<<counter<<"/"<<loops<<": "<<100*double(counter)/loops<<"% *******"<<std::endl;
|
||||
std::cout<<"Computing shape distance between "<<namesHeaders[n]<<i<<
|
||||
" and "<<namesHeaders[nt]<<it<<": ";
|
||||
distanceMat.at<float>(NSN*n+i-1, NSN*nt+it-1)=
|
||||
distanceMat.at<float>(NSN*(int)n+i-1, NSN*(int)nt+it-1)=
|
||||
computeShapeDistance(contoursQuery1, contoursQuery2, contoursQuery3, contoursTesting);
|
||||
std::cout<<distanceMat.at<float>(NSN*n+i-1, NSN*nt+it-1)<<std::endl;
|
||||
std::cout<<distanceMat.at<float>(NSN*(int)n+i-1, NSN*(int)nt+it-1)<<std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ static vector<Point> simpleContour( const Mat& currentQuery, int n=300 )
|
||||
|
||||
// In case actual number of points is less than n
|
||||
int dummy=0;
|
||||
for (int add=contoursQuery.size()-1; add<n; add++)
|
||||
for (int add=(int)contoursQuery.size()-1; add<n; add++)
|
||||
{
|
||||
contoursQuery.push_back(contoursQuery[dummy++]); //adding dummy values
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ int main(int argc, const char * argv[])
|
||||
channels.push_back(255-channels[c]);
|
||||
|
||||
// Create ERFilter objects with the 1st and 2nd stage default classifiers
|
||||
Ptr<ERFilter> er_filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),16,0.00015,0.13,0.2,true,0.1);
|
||||
Ptr<ERFilter> er_filter1 = createERFilterNM1(loadClassifierNM1("trained_classifierNM1.xml"),16,0.00015f,0.13f,0.2f,true,0.1f);
|
||||
Ptr<ERFilter> er_filter2 = createERFilterNM2(loadClassifierNM2("trained_classifierNM2.xml"),0.5);
|
||||
|
||||
vector<vector<ERStat> > regions(channels.size());
|
||||
@ -94,7 +94,7 @@ void show_help_and_exit(const char *cmd)
|
||||
|
||||
void groups_draw(Mat &src, vector<Rect> &groups)
|
||||
{
|
||||
for (int i=groups.size()-1; i>=0; i--)
|
||||
for (int i=(int)groups.size()-1; i>=0; i--)
|
||||
{
|
||||
if (src.type() == CV_8UC3)
|
||||
rectangle(src,groups.at(i).tl(),groups.at(i).br(),Scalar( 0, 255, 255 ), 3, 8 );
|
||||
|
Loading…
Reference in New Issue
Block a user