fixed hundreds of warnings from MSVC 2010.
This commit is contained in:
@@ -131,8 +131,7 @@ void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowe
|
||||
int localAdaptation_photoreceptors, localAdaptation_Gcells;
|
||||
void callBack_updateRetinaParams(int, void*)
|
||||
{
|
||||
|
||||
retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (double)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
|
||||
retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
|
||||
}
|
||||
|
||||
int colorSaturationFactor;
|
||||
|
||||
@@ -134,8 +134,8 @@ void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const floa
|
||||
{
|
||||
inputMat.copyTo(outputMat);
|
||||
// update threshold in the initial input image range
|
||||
maxInputValue=(maxInputValue-255.f)/histNormRescalefactor+maxInput;
|
||||
minInputValue=minInputValue/histNormRescalefactor+minInput;
|
||||
maxInputValue=(float)((maxInputValue-255.f)/histNormRescalefactor+maxInput);
|
||||
minInputValue=(float)(minInputValue/histNormRescalefactor+minInput);
|
||||
std::cout<<"===> Input Hist clipping values (max,min) = "<<maxInputValue<<", "<<minInputValue<<std::endl;
|
||||
cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
|
||||
cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //
|
||||
@@ -164,8 +164,7 @@ void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const floa
|
||||
int localAdaptation_photoreceptors, localAdaptation_Gcells;
|
||||
void callBack_updateRetinaParams(int, void*)
|
||||
{
|
||||
|
||||
retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (double)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
|
||||
retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
|
||||
}
|
||||
|
||||
int colorSaturationFactor;
|
||||
@@ -211,8 +210,8 @@ void loadNewFrame(const std::string filenamePrototype, const int currentFileInde
|
||||
double maxInput, minInput;
|
||||
minMaxLoc(inputImage, &minInput, &maxInput);
|
||||
std::cout<<"FIRST IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl;
|
||||
globalRescalefactor=50.0/(maxInput-minInput); // less than 255 for flexibility... experimental value to be carefull about
|
||||
float channelOffset = -1.5*minInput;
|
||||
globalRescalefactor=(float)(50.0/(maxInput-minInput)); // less than 255 for flexibility... experimental value to be carefull about
|
||||
double channelOffset = -1.5*minInput;
|
||||
globalOffset= cv::Scalar(channelOffset, channelOffset, channelOffset, channelOffset);
|
||||
}
|
||||
// call the generic input image rescaling callback
|
||||
|
||||
@@ -977,7 +977,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
}
|
||||
/* convert iterator to index */
|
||||
target_idx = std::distance(output_headers.begin(),target_idx_it);
|
||||
target_idx = (int)std::distance(output_headers.begin(),target_idx_it);
|
||||
}
|
||||
|
||||
/* prepare variables related to calculating recall if using the recall threshold */
|
||||
@@ -989,7 +989,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
|
||||
/* in order to calculate the total number of relevant images for normalization of recall
|
||||
it's necessary to extract the ground truth for the images under consideration */
|
||||
getClassifierGroundTruth(obj_class, images, ground_truth);
|
||||
total_relevant = std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<char>(),(char)1));
|
||||
total_relevant = (int)std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<char>(),(char)1));
|
||||
}
|
||||
|
||||
/* iterate through images */
|
||||
@@ -1040,7 +1040,7 @@ void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<Obd
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
}
|
||||
/* convert iterator to index */
|
||||
int class_idx = std::distance(output_headers.begin(),class_idx_it);
|
||||
int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
|
||||
//add to confusion matrix row in proportion
|
||||
output_values[class_idx] += 1.f/static_cast<float>(img_objects.size());
|
||||
}
|
||||
@@ -1174,7 +1174,7 @@ void VocData::calcDetectorConfMatRow(const string& obj_class, const ObdDatasetTy
|
||||
if (ov > maxov)
|
||||
{
|
||||
maxov = ov;
|
||||
max_gt_obj_idx = gt_obj_idx;
|
||||
max_gt_obj_idx = (int)gt_obj_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1192,7 +1192,7 @@ void VocData::calcDetectorConfMatRow(const string& obj_class, const ObdDatasetTy
|
||||
CV_Error(CV_StsError,err_msg.c_str());
|
||||
}
|
||||
/* convert iterator to index */
|
||||
int class_idx = std::distance(output_headers.begin(),class_idx_it);
|
||||
int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
|
||||
//add to confusion matrix row in proportion
|
||||
output_values[class_idx] += 1.0;
|
||||
} else {
|
||||
@@ -1540,7 +1540,7 @@ void VocData::readDetectorResultsFile(const string& input_file, vector<string>&
|
||||
bounding_boxes.push_back(bounding_box_vect);
|
||||
} else {
|
||||
/* if the image index has been seen before, add the current object below it in the 2D arrays */
|
||||
int image_idx = std::distance(image_codes.begin(),image_codes_it);
|
||||
int image_idx = (int)std::distance(image_codes.begin(),image_codes_it);
|
||||
scores[image_idx].push_back(score);
|
||||
bounding_boxes[image_idx].push_back(bounding_box);
|
||||
}
|
||||
@@ -1985,7 +1985,7 @@ struct VocabTrainParams
|
||||
{
|
||||
VocabTrainParams() : trainObjClass("chair"), vocabSize(1000), memoryUse(200), descProportion(0.3f) {}
|
||||
VocabTrainParams( const string _trainObjClass, size_t _vocabSize, size_t _memoryUse, float _descProportion ) :
|
||||
trainObjClass(_trainObjClass), vocabSize(_vocabSize), memoryUse(_memoryUse), descProportion(_descProportion) {}
|
||||
trainObjClass(_trainObjClass), vocabSize((int)_vocabSize), memoryUse((int)_memoryUse), descProportion(_descProportion) {}
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
fn["trainObjClass"] >> trainObjClass;
|
||||
@@ -2154,7 +2154,7 @@ Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainP
|
||||
|
||||
// Randomly pick an image from the dataset which hasn't yet been seen
|
||||
// and compute the descriptors from that image.
|
||||
int randImgIdx = rng( images.size() );
|
||||
int randImgIdx = rng( (unsigned)images.size() );
|
||||
Mat colorImage = imread( images[randImgIdx].path );
|
||||
vector<KeyPoint> imageKeypoints;
|
||||
fdetector->detect( colorImage, imageKeypoints );
|
||||
@@ -2296,12 +2296,12 @@ void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bow
|
||||
const SVMTrainParamsExt& svmParamsExt, int descsToDelete )
|
||||
{
|
||||
RNG& rng = theRNG();
|
||||
int pos_ex = std::count( objectPresent.begin(), objectPresent.end(), (char)1 );
|
||||
int neg_ex = std::count( objectPresent.begin(), objectPresent.end(), (char)0 );
|
||||
int pos_ex = (int)std::count( objectPresent.begin(), objectPresent.end(), (char)1 );
|
||||
int neg_ex = (int)std::count( objectPresent.begin(), objectPresent.end(), (char)0 );
|
||||
|
||||
while( descsToDelete != 0 )
|
||||
{
|
||||
int randIdx = rng(images.size());
|
||||
int randIdx = rng((unsigned)images.size());
|
||||
|
||||
// Prefer positive training examples according to svmParamsExt.targetRatio if required
|
||||
if( objectPresent[randIdx] )
|
||||
@@ -2415,14 +2415,14 @@ void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, cons
|
||||
}
|
||||
|
||||
// Prepare the input matrices for SVM training.
|
||||
Mat trainData( images.size(), bowExtractor->getVocabulary().rows, CV_32FC1 );
|
||||
Mat responses( images.size(), 1, CV_32SC1 );
|
||||
Mat trainData( (int)images.size(), bowExtractor->getVocabulary().rows, CV_32FC1 );
|
||||
Mat responses( (int)images.size(), 1, CV_32SC1 );
|
||||
|
||||
// Transfer bag of words vectors and responses across to the training data matrices
|
||||
for( size_t imageIdx = 0; imageIdx < images.size(); imageIdx++ )
|
||||
{
|
||||
// Transfer image descriptor (bag of words vector) to training data matrix
|
||||
Mat submat = trainData.row(imageIdx);
|
||||
Mat submat = trainData.row((int)imageIdx);
|
||||
if( bowImageDescriptors[imageIdx].cols != bowExtractor->descriptorSize() )
|
||||
{
|
||||
cout << "Error: computed bow image descriptor size " << bowImageDescriptors[imageIdx].cols
|
||||
@@ -2432,7 +2432,7 @@ void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, cons
|
||||
bowImageDescriptors[imageIdx].copyTo( submat );
|
||||
|
||||
// Set response value
|
||||
responses.at<int>(imageIdx) = objectPresent[imageIdx] ? 1 : -1;
|
||||
responses.at<int>((int)imageIdx) = objectPresent[imageIdx] ? 1 : -1;
|
||||
}
|
||||
|
||||
cout << "TRAINING SVM FOR CLASS ..." << objClassName << "..." << endl;
|
||||
|
||||
@@ -232,7 +232,7 @@ void saveCameraParams( const string& filename,
|
||||
|
||||
if( !imagePoints.empty() )
|
||||
{
|
||||
Mat imagePtMat((int)imagePoints.size(), imagePoints[0].size(), CV_32FC2);
|
||||
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
|
||||
for( int i = 0; i < (int)imagePoints.size(); i++ )
|
||||
{
|
||||
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
|
||||
|
||||
@@ -95,11 +95,13 @@ int main( int argc, char** argv )
|
||||
imshow("image", isColor ? image : gray);
|
||||
|
||||
int c = waitKey(0);
|
||||
if( (c & 255) == 27 )
|
||||
{
|
||||
cout << "Exiting ...\n";
|
||||
break;
|
||||
}
|
||||
switch( (char)c )
|
||||
{
|
||||
case 27:
|
||||
cout << "Exiting ...\n";
|
||||
return 0;
|
||||
case 'c':
|
||||
if( isColor )
|
||||
{
|
||||
|
||||
@@ -103,12 +103,12 @@ int main(int argc, char** argv)
|
||||
HybridTrackerParams params;
|
||||
// motion model params
|
||||
params.motion_model = CvMotionModel::LOW_PASS_FILTER;
|
||||
params.low_pass_gain = 0.1;
|
||||
params.low_pass_gain = 0.1f;
|
||||
// mean shift params
|
||||
params.ms_tracker_weight = 0.8;
|
||||
params.ms_tracker_weight = 0.8f;
|
||||
params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
|
||||
// feature tracking params
|
||||
params.ft_tracker_weight = 0.2;
|
||||
params.ft_tracker_weight = 0.2f;
|
||||
params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
|
||||
params.ft_params.window_size = 0;
|
||||
|
||||
@@ -121,12 +121,14 @@ int main(int argc, char** argv)
|
||||
|
||||
int i = 0;
|
||||
float w[4];
|
||||
while(1)
|
||||
for(;;)
|
||||
{
|
||||
i++;
|
||||
if (live)
|
||||
{
|
||||
cap >> frame;
|
||||
if( frame.empty() )
|
||||
break;
|
||||
frame.copyTo(image);
|
||||
}
|
||||
else
|
||||
@@ -134,12 +136,12 @@ int main(int argc, char** argv)
|
||||
fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
|
||||
sprintf(img_file, "seqG/%04d.png", i);
|
||||
image = imread(img_file, CV_LOAD_IMAGE_COLOR);
|
||||
selection = Rect(w[0]*image.cols, w[1]*image.rows, w[2]*image.cols, w[3]*image.rows);
|
||||
if (image.empty())
|
||||
break;
|
||||
selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
|
||||
cvRound(w[2]*image.cols), cvRound(w[3]*image.rows));
|
||||
}
|
||||
|
||||
if (image.data == NULL)
|
||||
continue;
|
||||
|
||||
sprintf(img_file_num, "Frame: %d", i);
|
||||
putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
|
||||
if (!image.empty())
|
||||
@@ -163,7 +165,8 @@ int main(int argc, char** argv)
|
||||
bitwise_not(roi, roi);
|
||||
}
|
||||
|
||||
drawRectangle(&image, Rect(w[0]*image.cols, w[1]*image.rows, w[2]*image.cols, w[3]*image.rows));
|
||||
drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
|
||||
cvRound(w[2]*image.cols), cvRound(w[3]*image.rows)));
|
||||
imshow("Win", image);
|
||||
|
||||
waitKey(100);
|
||||
@@ -174,5 +177,4 @@ int main(int argc, char** argv)
|
||||
|
||||
fclose(f);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
@@ -107,13 +107,13 @@ int main(int argc, char* argv[])
|
||||
help();
|
||||
|
||||
string images_folder, models_folder;
|
||||
float overlapThreshold = 0.2;
|
||||
float overlapThreshold = 0.2f;
|
||||
int numThreads = -1;
|
||||
if( argc > 2 )
|
||||
{
|
||||
images_folder = argv[1];
|
||||
models_folder = argv[2];
|
||||
if( argc > 3 ) overlapThreshold = atof(argv[3]);
|
||||
if( argc > 3 ) overlapThreshold = (float)atof(argv[3]);
|
||||
if( overlapThreshold < 0 || overlapThreshold > 1)
|
||||
{
|
||||
cout << "overlapThreshold must be in interval (0,1)." << endl;
|
||||
@@ -157,7 +157,7 @@ int main(int argc, char* argv[])
|
||||
|
||||
imshow( "result", image );
|
||||
|
||||
while(1)
|
||||
for(;;)
|
||||
{
|
||||
int c = waitKey();
|
||||
if( (char)c == 'n')
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
static void cv_on_mouse(int a_event, int a_x, int a_y, int a_flags, void * a_params)
|
||||
static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
|
||||
{
|
||||
m_event = a_event;
|
||||
m_x = a_x;
|
||||
@@ -186,7 +186,7 @@ int main(int argc, char * argv[])
|
||||
std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
|
||||
}
|
||||
}
|
||||
int num_modalities = detector->getModalities().size();
|
||||
int num_modalities = (int)detector->getModalities().size();
|
||||
|
||||
// Open Kinect sensor
|
||||
cv::VideoCapture capture( CV_CAP_OPENNI );
|
||||
@@ -201,7 +201,7 @@ int main(int argc, char * argv[])
|
||||
|
||||
// Main loop
|
||||
cv::Mat color, depth;
|
||||
while (true)
|
||||
for(;;)
|
||||
{
|
||||
// Capture next color/depth pair
|
||||
capture.grab();
|
||||
@@ -262,7 +262,7 @@ int main(int argc, char * argv[])
|
||||
std::vector<std::string> class_ids;
|
||||
std::vector<cv::Mat> quantized_images;
|
||||
match_timer.start();
|
||||
detector->match(sources, matching_threshold, matches, class_ids, quantized_images);
|
||||
detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
|
||||
match_timer.stop();
|
||||
|
||||
int classes_visited = 0;
|
||||
@@ -331,6 +331,9 @@ int main(int argc, char * argv[])
|
||||
|
||||
cv::FileStorage fs;
|
||||
char key = (char)cvWaitKey(10);
|
||||
if( key == 'q' )
|
||||
break;
|
||||
|
||||
switch (key)
|
||||
{
|
||||
case 'h':
|
||||
@@ -366,8 +369,8 @@ int main(int argc, char * argv[])
|
||||
writeLinemod(detector, filename);
|
||||
printf("Wrote detector and templates to %s\n", filename.c_str());
|
||||
break;
|
||||
case 'q':
|
||||
return 0;
|
||||
default:
|
||||
;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -403,8 +406,8 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
|
||||
for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
|
||||
{
|
||||
float x_diff = a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x;
|
||||
float y_diff = a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y;
|
||||
float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
|
||||
float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
|
||||
lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
|
||||
l_chain_length += lp_seg_length[l_i];
|
||||
}
|
||||
@@ -412,7 +415,7 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
{
|
||||
if (lp_seg_length[l_i] > 0)
|
||||
{
|
||||
int l_cur_num = l_num_cost_pts * lp_seg_length[l_i] / l_chain_length;
|
||||
int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
|
||||
float l_cur_len = lp_seg_length[l_i] / l_cur_num;
|
||||
|
||||
for (int l_j = 0; l_j < l_cur_num; ++l_j)
|
||||
@@ -421,8 +424,8 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
|
||||
CvPoint l_pts;
|
||||
|
||||
l_pts.x = l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x;
|
||||
l_pts.y = l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y;
|
||||
l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
|
||||
l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
|
||||
|
||||
l_chain_vector.push_back(l_pts);
|
||||
}
|
||||
@@ -441,16 +444,16 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
|
||||
reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
|
||||
|
||||
CvMat * lp_pts = cvCreateMat(l_chain_vector.size(), 4, CV_32F);
|
||||
CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
|
||||
CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
|
||||
CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
|
||||
|
||||
for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
|
||||
{
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 0) = lp_src_3Dpts[l_i].x;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 1) = lp_src_3Dpts[l_i].y;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 2) = lp_src_3Dpts[l_i].z;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
|
||||
CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
|
||||
}
|
||||
cvSVD(lp_pts, lp_w, 0, lp_v);
|
||||
|
||||
@@ -493,7 +496,7 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
}
|
||||
int l_w = l_maxx - l_minx + 1;
|
||||
int l_h = l_maxy - l_miny + 1;
|
||||
int l_nn = a_chain.size();
|
||||
int l_nn = (int)a_chain.size();
|
||||
|
||||
CvPoint * lp_chain = new CvPoint[l_nn];
|
||||
|
||||
@@ -528,7 +531,7 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
{
|
||||
for (int l_c = 0; l_c < l_w; ++l_c)
|
||||
{
|
||||
float l_dist = l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3];
|
||||
float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
|
||||
|
||||
++l_ind;
|
||||
|
||||
@@ -538,8 +541,8 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
{
|
||||
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
|
||||
{
|
||||
int l_col = (l_c + l_minx) / (l_p + 1.0);
|
||||
int l_row = (l_r + l_miny) / (l_p + 1.0);
|
||||
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
|
||||
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
|
||||
|
||||
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 0;
|
||||
}
|
||||
@@ -548,8 +551,8 @@ void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::ve
|
||||
{
|
||||
for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
|
||||
{
|
||||
int l_col = (l_c + l_minx) / (l_p + 1.0);
|
||||
int l_row = (l_r + l_miny) / (l_p + 1.0);
|
||||
int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
|
||||
int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
|
||||
|
||||
CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 255;
|
||||
}
|
||||
@@ -669,7 +672,7 @@ void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
|
||||
cv::convexHull(points, hull);
|
||||
|
||||
dst = cv::Mat::zeros(size, CV_8U);
|
||||
const int hull_count = hull.size();
|
||||
const int hull_count = (int)hull.size();
|
||||
const cv::Point* hull_pts = &hull[0];
|
||||
cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ void saveResultImages( const Mat& queryImage, const vector<KeyPoint>& queryKeypo
|
||||
{
|
||||
if( !trainImages[i].empty() )
|
||||
{
|
||||
maskMatchesByTrainImgIdx( matches, i, mask );
|
||||
maskMatchesByTrainImgIdx( matches, (int)i, mask );
|
||||
drawMatches( queryImage, queryKeypoints, trainImages[i], trainKeypoints[i],
|
||||
matches, drawImg, Scalar(255, 0, 0), Scalar(0, 255, 255), mask );
|
||||
string filename = resultDir + "/res_" + trainImagesNames[i];
|
||||
|
||||
@@ -184,7 +184,7 @@ int main( int argc, char* argv[] )
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool modeRes;
|
||||
bool modeRes=false;
|
||||
switch ( imageMode )
|
||||
{
|
||||
case 0:
|
||||
|
||||
@@ -60,7 +60,7 @@ int main(int argc, char** argv)
|
||||
// ++filename;
|
||||
if(filename[0] == '#')
|
||||
continue;
|
||||
int l = strlen(filename);
|
||||
int l = (int)strlen(filename);
|
||||
while(l > 0 && isspace(filename[l-1]))
|
||||
--l;
|
||||
filename[l] = '\0';
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
using namespace cv;
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
int main(int, char* [])
|
||||
{
|
||||
VideoCapture video(0);
|
||||
Mat frame, curr, prev, curr64f, prev64f, hann;
|
||||
|
||||
@@ -196,7 +196,7 @@ int main(int argc, const char* argv[])
|
||||
setMouseCallback(windowName, mouseCallback, &renderer);
|
||||
setOpenGlDrawCallback(windowName, openGlDrawCallback, &renderer);
|
||||
|
||||
while (true)
|
||||
for(;;)
|
||||
{
|
||||
int key = waitKey(10);
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ void on_mouse( int event, int x, int y, int /*flags*/, void* )
|
||||
return;
|
||||
|
||||
trainedPoints.push_back( Point(x,y) );
|
||||
trainedPointsMarkers.push_back( classColors.size()-1 );
|
||||
trainedPointsMarkers.push_back( (int)(classColors.size()-1) );
|
||||
updateFlag = true;
|
||||
}
|
||||
else if( event == CV_EVENT_RBUTTONUP )
|
||||
|
||||
@@ -101,21 +101,21 @@ int main(int argc, char** argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int transformationType = TransformationType::RIGID_BODY_MOTION;
|
||||
int transformationType = RIGID_BODY_MOTION;
|
||||
if( argc == 6 )
|
||||
{
|
||||
string ttype = argv[5];
|
||||
if( ttype == "-rbm" )
|
||||
{
|
||||
transformationType = TransformationType::RIGID_BODY_MOTION;
|
||||
transformationType = RIGID_BODY_MOTION;
|
||||
}
|
||||
else if ( ttype == "-r")
|
||||
{
|
||||
transformationType = TransformationType::ROTATION;
|
||||
transformationType = ROTATION;
|
||||
}
|
||||
else if ( ttype == "-t")
|
||||
{
|
||||
transformationType = TransformationType::TRANSLATION;
|
||||
transformationType = TRANSLATION;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -145,9 +145,9 @@ int main(int argc, char** argv)
|
||||
minGradMagnitudes[2] = 3;
|
||||
minGradMagnitudes[3] = 1;
|
||||
|
||||
const float minDepth = 0; //in meters
|
||||
const float maxDepth = 3; //in meters
|
||||
const float maxDepthDiff = 0.07; //in meters
|
||||
const float minDepth = 0.f; //in meters
|
||||
const float maxDepth = 3.f; //in meters
|
||||
const float maxDepthDiff = 0.07f; //in meters
|
||||
|
||||
tm.start();
|
||||
bool isFound = cv::RGBDOdometry( Rt, Mat(),
|
||||
|
||||
@@ -173,13 +173,13 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
|
||||
vector<Point> hull;
|
||||
convexHull(Mat_<Point>(Mat(imgpt)), hull);
|
||||
Mat selectedObjMask = Mat::zeros(frame.size(), CV_8U);
|
||||
fillConvexPoly(selectedObjMask, &hull[0], hull.size(), Scalar::all(255), 8, 0);
|
||||
fillConvexPoly(selectedObjMask, &hull[0], (int)hull.size(), Scalar::all(255), 8, 0);
|
||||
Rect roi = boundingRect(Mat(hull)) & Rect(Point(), frame.size());
|
||||
|
||||
if( runExtraSegmentation )
|
||||
{
|
||||
selectedObjMask = Scalar::all(GC_BGD);
|
||||
fillConvexPoly(selectedObjMask, &hull[0], hull.size(), Scalar::all(GC_PR_FGD), 8, 0);
|
||||
fillConvexPoly(selectedObjMask, &hull[0], (int)hull.size(), Scalar::all(GC_PR_FGD), 8, 0);
|
||||
Mat bgdModel, fgdModel;
|
||||
grabCut(frame, selectedObjMask, roi, bgdModel, fgdModel,
|
||||
3, GC_INIT_WITH_RECT + GC_INIT_WITH_MASK);
|
||||
|
||||
@@ -570,8 +570,9 @@ int main(int argc, char* argv[])
|
||||
{
|
||||
Mat_<float> K;
|
||||
cameras[i].K().convertTo(K, CV_32F);
|
||||
K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
|
||||
K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
|
||||
float swa = (float)seam_work_aspect;
|
||||
K(0,0) *= swa; K(0,2) *= swa;
|
||||
K(1,1) *= swa; K(1,2) *= swa;
|
||||
|
||||
corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
|
||||
sizes[i] = images_warped[i].size();
|
||||
|
||||
@@ -51,7 +51,7 @@ int process(VideoCapture& capture)
|
||||
{
|
||||
capture >> frame;
|
||||
if (frame.empty())
|
||||
continue;
|
||||
break;
|
||||
cv::Mat gray;
|
||||
cv::cvtColor(frame,gray,CV_RGB2GRAY);
|
||||
findDataMatrix(gray, codes);
|
||||
|
||||
Reference in New Issue
Block a user