Normalize line endings and whitespace

This commit is contained in:
OpenCV Buildbot
2012-10-17 03:18:30 +04:00
committed by Andrey Kamaev
parent 69020da607
commit 04384a71e4
1516 changed files with 258846 additions and 258162 deletions

View File

@@ -26,14 +26,14 @@ int main( int argc, char **argv ){
char atom_window[] = "Drawing 1: Atom";
char rook_window[] = "Drawing 2: Rook";
/// Create black empty images
/// Create black empty images
Mat atom_image = Mat::zeros( w, w, CV_8UC3 );
Mat rook_image = Mat::zeros( w, w, CV_8UC3 );
/// 1. Draw a simple atom:
/// -----------------------
/// 1.a. Creating ellipses
/// 1.a. Creating ellipses
MyEllipse( atom_image, 90 );
MyEllipse( atom_image, 0 );
MyEllipse( atom_image, 45 );
@@ -50,13 +50,13 @@ int main( int argc, char **argv ){
/// 2.b. Creating rectangles
rectangle( rook_image,
Point( 0, 7*w/8.0 ),
Point( w, w),
Scalar( 0, 255, 255 ),
-1,
8 );
Point( 0, 7*w/8.0 ),
Point( w, w),
Scalar( 0, 255, 255 ),
-1,
8 );
/// 2.c. Create a few lines
/// 2.c. Create a few lines
MyLine( rook_image, Point( 0, 15*w/16 ), Point( w, 15*w/16 ) );
MyLine( rook_image, Point( w/4, 7*w/8 ), Point( w/4, w ) );
MyLine( rook_image, Point( w/2, 7*w/8 ), Point( w/2, w ) );
@@ -84,14 +84,14 @@ void MyEllipse( Mat img, double angle )
int lineType = 8;
ellipse( img,
Point( w/2.0, w/2.0 ),
Size( w/4.0, w/16.0 ),
angle,
0,
360,
Scalar( 255, 0, 0 ),
thickness,
lineType );
Point( w/2.0, w/2.0 ),
Size( w/4.0, w/16.0 ),
angle,
0,
360,
Scalar( 255, 0, 0 ),
thickness,
lineType );
}
/**
@@ -103,12 +103,12 @@ void MyFilledCircle( Mat img, Point center )
int thickness = -1;
int lineType = 8;
circle( img,
center,
w/32.0,
Scalar( 0, 0, 255 ),
thickness,
lineType );
circle( img,
center,
w/32.0,
Scalar( 0, 0, 255 ),
thickness,
lineType );
}
/**
@@ -146,11 +146,11 @@ void MyPolygon( Mat img )
int npt[] = { 20 };
fillPoly( img,
ppt,
npt,
ppt,
npt,
1,
Scalar( 255, 255, 255 ),
lineType );
Scalar( 255, 255, 255 ),
lineType );
}
/**
@@ -161,12 +161,12 @@ void MyLine( Mat img, Point start, Point end )
{
int thickness = 2;
int lineType = 8;
line( img,
start,
end,
Scalar( 0, 0, 0 ),
thickness,
lineType );
line( img,
start,
end,
Scalar( 0, 0, 0 ),
thickness,
lineType );
}

View File

@@ -11,7 +11,7 @@
using namespace cv;
/// Global Variables
const int NUMBER = 100;
const int NUMBER = 100;
const int DELAY = 5;
const int window_width = 900;
@@ -44,14 +44,14 @@ int main( int argc, char** argv )
char window_name[] = "Drawing_2 Tutorial";
/// Also create a random object (RNG)
RNG rng( 0xFFFFFFFF );
RNG rng( 0xFFFFFFFF );
/// Initialize a matrix filled with zeros
Mat image = Mat::zeros( window_height, window_width, CV_8UC3 );
/// Show it in a window during DELAY ms
imshow( window_name, image );
imshow( window_name, image );
waitKey( DELAY );
/// Now, let's draw some lines
c = Drawing_Random_Lines(image, window_name, rng);
if( c != 0 ) return 0;
@@ -145,7 +145,7 @@ int Drawing_Random_Rectangles( Mat image, char* window_name, RNG rng )
imshow( window_name, image );
if( waitKey( DELAY ) >= 0 )
{ return -1; }
{ return -1; }
}
return 0;
@@ -163,7 +163,7 @@ int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng )
Point center;
center.x = rng.uniform(x_1, x_2);
center.y = rng.uniform(y_1, y_2);
Size axes;
axes.width = rng.uniform(0, 200);
axes.height = rng.uniform(0, 200);
@@ -172,10 +172,10 @@ int Drawing_Random_Ellipses( Mat image, char* window_name, RNG rng )
ellipse( image, center, axes, angle, angle - 100, angle + 200,
randomColor(rng), rng.uniform(-1,9), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
@@ -194,28 +194,28 @@ int Drawing_Random_Polylines( Mat image, char* window_name, RNG rng )
Point pt[2][3];
pt[0][0].x = rng.uniform(x_1, x_2);
pt[0][0].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][2].x = rng.uniform(x_1, x_2);
pt[0][2].y = rng.uniform(y_1, y_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].y = rng.uniform(y_1, y_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].y = rng.uniform(y_1, y_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].y = rng.uniform(y_1, y_2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
polylines(image, ppt, npt, 2, true, randomColor(rng), rng.uniform(1,10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
}
return 0;
}
}
/**
* @function Drawing_Random_Filled_Polygons
@@ -229,22 +229,22 @@ int Drawing_Random_Filled_Polygons( Mat image, char* window_name, RNG rng )
Point pt[2][3];
pt[0][0].x = rng.uniform(x_1, x_2);
pt[0][0].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][1].x = rng.uniform(x_1, x_2);
pt[0][1].y = rng.uniform(y_1, y_2);
pt[0][2].x = rng.uniform(x_1, x_2);
pt[0][2].y = rng.uniform(y_1, y_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].x = rng.uniform(x_1, x_2);
pt[1][0].y = rng.uniform(y_1, y_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].x = rng.uniform(x_1, x_2);
pt[1][1].y = rng.uniform(y_1, y_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].x = rng.uniform(x_1, x_2);
pt[1][2].y = rng.uniform(y_1, y_2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
fillPoly( image, ppt, npt, 2, randomColor(rng), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@@ -264,10 +264,10 @@ int Drawing_Random_Circles( Mat image, char* window_name, RNG rng )
Point center;
center.x = rng.uniform(x_1, x_2);
center.y = rng.uniform(y_1, y_2);
circle( image, center, rng.uniform(0, 300), randomColor(rng),
rng.uniform(-1, 9), lineType );
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@@ -291,7 +291,7 @@ int Displaying_Random_Text( Mat image, char* window_name, RNG rng )
putText( image, "Testing text rendering", org, rng.uniform(0,8),
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
imshow( window_name, image );
if( waitKey(DELAY) >= 0 )
{ return -1; }
@@ -308,7 +308,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG rng )
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8;
Mat image2;
for( int i = 0; i < 255; i += 2 )
@@ -316,7 +316,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG rng )
image2 = image - Scalar::all(i);
putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 );
if( waitKey(DELAY) >= 0 )
{ return -1; }

View File

@@ -1,78 +1,78 @@
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void help(char* progName)
{
cout << endl
<< "This program demonstrated the use of the discrete Fourier transform (DFT). " << endl
<< "The dft of an image is taken and it's power spectrum is displayed." << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] " << endl << endl;
}
int main(int argc, char ** argv)
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
if( I.empty())
return -1;
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize( I.rows );
int n = getOptimalDFTSize( I.cols ); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
// crop the spectrum, if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("Input Image" , I ); // Show the result
imshow("spectrum magnitude", magI);
waitKey();
return 0;
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void help(char* progName)
{
cout << endl
<< "This program demonstrated the use of the discrete Fourier transform (DFT). " << endl
<< "The dft of an image is taken and it's power spectrum is displayed." << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] " << endl << endl;
}
int main(int argc, char ** argv)
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
if( I.empty())
return -1;
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize( I.rows );
int n = getOptimalDFTSize( I.cols ); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
// crop the spectrum, if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("Input Image" , I ); // Show the result
imshow("spectrum magnitude", magI);
waitKey();
return 0;
}

View File

@@ -7,7 +7,7 @@ using namespace std;
void help(char** av)
{
cout << endl
cout << endl
<< av[0] << " shows the usage of the OpenCV serialization functionality." << endl
<< "usage: " << endl
<< av[0] << " outputfile.yml.gz" << endl
@@ -54,8 +54,8 @@ void read(const FileNode& node, MyData& x, const MyData& default_value = MyData(
}
// This function will print our custom class to the console
ostream& operator<<(ostream& out, const MyData& m)
{
ostream& operator<<(ostream& out, const MyData& m)
{
out << "{ id = " << m.id << ", ";
out << "X = " << m.X << ", ";
out << "A = " << m.A << "}";
@@ -82,10 +82,10 @@ int main(int ac, char** av)
fs << "strings" << "["; // text - string sequence
fs << "image1.jpg" << "Awesomeness" << "baboon.jpg";
fs << "]"; // close sequence
fs << "Mapping"; // text - mapping
fs << "{" << "One" << 1;
fs << "Two" << 2 << "}";
fs << "Two" << 2 << "}";
fs << "R" << R; // cv::Mat
fs << "T" << T;
@@ -98,10 +98,10 @@ int main(int ac, char** av)
{//read
cout << endl << "Reading: " << endl;
FileStorage fs;
FileStorage fs;
fs.open(filename, FileStorage::READ);
int itNr;
int itNr;
//fs["iterationNr"] >> itNr;
itNr = (int) fs["iterationNr"];
cout << itNr;
@@ -122,12 +122,12 @@ int main(int ac, char** av)
FileNodeIterator it = n.begin(), it_end = n.end(); // Go through the node
for (; it != it_end; ++it)
cout << (string)*it << endl;
n = fs["Mapping"]; // Read mappings from a sequence
cout << "Two " << (int)(n["Two"]) << "; ";
cout << "One " << (int)(n["One"]) << endl << endl;
cout << "Two " << (int)(n["Two"]) << "; ";
cout << "One " << (int)(n["One"]) << endl << endl;
MyData m;
Mat R, T;
@@ -136,18 +136,18 @@ int main(int ac, char** av)
fs["T"] >> T;
fs["MyData"] >> m; // Read your own structure_
cout << endl
cout << endl
<< "R = " << R << endl;
cout << "T = " << T << endl << endl;
cout << "MyData = " << endl << m << endl << endl;
//Show default behavior for non existing nodes
cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
cout << "Attempt to read NonExisting (should initialize the data structure with its default).";
fs["NonExisting"] >> m;
cout << endl << "NonExisting = " << endl << m << endl;
}
cout << endl
cout << endl
<< "Tip: Open up " << filename << " with a text editor to see the serialized data." << endl;
return 0;

View File

@@ -1,216 +1,216 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
using namespace std;
using namespace cv;
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to scan image objects in OpenCV (cv::Mat). As use case"
<< " we take an input image and divide the native color palette (255) with the " << endl
<< "input. Shows C operator[] method, iterators and at function for on-the-fly item address calculation."<< endl
<< "Usage:" << endl
<< "./howToScanImages imageNameToUse divideWith [G]" << endl
<< "if you add a G parameter the image is processed in gray scale" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* table);
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* table);
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar * table);
int main( int argc, char* argv[])
{
help();
if (argc < 3)
{
cout << "Not enough parameters" << endl;
return -1;
}
Mat I, J;
if( argc == 4 && !strcmp(argv[3],"G") )
I = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (!I.data)
{
cout << "The image" << argv[1] << " could not be loaded." << endl;
return -1;
}
int divideWith; // convert our input string to number - C++ style
stringstream s;
s << argv[2];
s >> divideWith;
if (!s)
{
cout << "Invalid number entered for dividing. " << endl;
return -1;
}
uchar table[256];
for (int i = 0; i < 256; ++i)
table[i] = divideWith* (i/divideWith);
const int times = 100;
double t;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceC(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the C operator [] (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceIterator(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the iterator (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
ScanImageAndReduceRandomAccess(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the on-the-fly address generation - at function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.data;
for( int i = 0; i < 256; ++i)
p[i] = table[i];
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
LUT(I, lookUpTable, J);
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the LUT function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
return 0;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
}
return I;
}
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
MatIterator_<uchar> it, end;
for( it = I.begin<uchar>(), end = I.end<uchar>(); it != end; ++it)
*it = table[*it];
break;
}
case 3:
{
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
}
}
return I;
}
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
I.at<uchar>(i,j) = table[I.at<uchar>(i,j)];
break;
}
case 3:
{
Mat_<Vec3b> _I = I;
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
{
_I(i,j)[0] = table[_I(i,j)[0]];
_I(i,j)[1] = table[_I(i,j)[1]];
_I(i,j)[2] = table[_I(i,j)[2]];
}
I = _I;
break;
}
}
return I;
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
using namespace std;
using namespace cv;
void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows how to scan image objects in OpenCV (cv::Mat). As use case"
<< " we take an input image and divide the native color palette (255) with the " << endl
<< "input. Shows C operator[] method, iterators and at function for on-the-fly item address calculation."<< endl
<< "Usage:" << endl
<< "./howToScanImages imageNameToUse divideWith [G]" << endl
<< "if you add a G parameter the image is processed in gray scale" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* table);
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* table);
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar * table);
int main( int argc, char* argv[])
{
help();
if (argc < 3)
{
cout << "Not enough parameters" << endl;
return -1;
}
Mat I, J;
if( argc == 4 && !strcmp(argv[3],"G") )
I = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (!I.data)
{
cout << "The image" << argv[1] << " could not be loaded." << endl;
return -1;
}
int divideWith; // convert our input string to number - C++ style
stringstream s;
s << argv[2];
s >> divideWith;
if (!s)
{
cout << "Invalid number entered for dividing. " << endl;
return -1;
}
uchar table[256];
for (int i = 0; i < 256; ++i)
table[i] = divideWith* (i/divideWith);
const int times = 100;
double t;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceC(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the C operator [] (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
J = ScanImageAndReduceIterator(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the iterator (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
{
cv::Mat clone_i = I.clone();
ScanImageAndReduceRandomAccess(clone_i, table);
}
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the on-the-fly address generation - at function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.data;
for( int i = 0; i < 256; ++i)
p[i] = table[i];
t = (double)getTickCount();
for (int i = 0; i < times; ++i)
LUT(I, lookUpTable, J);
t = 1000*((double)getTickCount() - t)/getTickFrequency();
t /= times;
cout << "Time of reducing with the LUT function (averaged for "
<< times << " runs): " << t << " milliseconds."<< endl;
return 0;
}
Mat& ScanImageAndReduceC(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
int channels = I.channels();
int nRows = I.rows;
int nCols = I.cols * channels;
if (I.isContinuous())
{
nCols *= nRows;
nRows = 1;
}
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
}
return I;
}
Mat& ScanImageAndReduceIterator(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
MatIterator_<uchar> it, end;
for( it = I.begin<uchar>(), end = I.end<uchar>(); it != end; ++it)
*it = table[*it];
break;
}
case 3:
{
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
}
}
return I;
}
Mat& ScanImageAndReduceRandomAccess(Mat& I, const uchar* const table)
{
// accept only char type matrices
CV_Assert(I.depth() != sizeof(uchar));
const int channels = I.channels();
switch(channels)
{
case 1:
{
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
I.at<uchar>(i,j) = table[I.at<uchar>(i,j)];
break;
}
case 3:
{
Mat_<Vec3b> _I = I;
for( int i = 0; i < I.rows; ++i)
for( int j = 0; j < I.cols; ++j )
{
_I(i,j)[0] = table[_I(i,j)[0]];
_I(i,j)[1] = table[_I(i,j)[1]];
_I(i,j)[2] = table[_I(i,j)[2]];
}
I = _I;
break;
}
}
return I;
}

View File

@@ -1,134 +1,134 @@
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv; // The new C++ interface API is inside this namespace. Import it.
using namespace std;
void help( char* progName)
{
cout << endl << progName
<< " shows how to use cv::Mat and IplImages together (converting back and forth)." << endl
<< "Also contains example for image read, spliting the planes, merging back and " << endl
<< " color conversion, plus iterating through pixels. " << endl
<< "Usage:" << endl
<< progName << " [image-name Default: lena.jpg]" << endl << endl;
}
// comment out the define to use only the latest C++ API
#define DEMO_MIXED_API_USE
int main( int argc, char** argv )
{
help(argv[0]);
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
if(IplI.empty())
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
Mat I(IplI); // Convert to the new style container. Only header created. Image not copied.
#else
Mat I = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
if( I.empty() ) // same as if( !I.data )
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
#endif
// convert image to YUV color space. The output image will be created automatically.
Mat I_YUV;
cvtColor(I, I_YUV, CV_BGR2YCrCb);
vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects
split(I_YUV, planes); // split the image into separate color planes (Y U V)
#if 1 // change it to 0 if you want to see a blurred and noisy version of this processing
// Mat scanning
// Method 1. process Y plane using an iterator
MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
for(; it != it_end; ++it)
{
double v = *it * 1.7 + rand()%21 - 10;
*it = saturate_cast<uchar>(v*v/255);
}
for( int y = 0; y < I_YUV.rows; y++ )
{
// Method 2. process the first chroma plane using pre-stored row pointer.
uchar* Uptr = planes[1].ptr<uchar>(y);
for( int x = 0; x < I_YUV.cols; x++ )
{
Uptr[x] = saturate_cast<uchar>((Uptr[x]-128)/2 + 128);
// Method 3. process the second chroma plane using individual element access
uchar& Vxy = planes[2].at<uchar>(y, x);
Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
}
}
#else
Mat noisyI(I.size(), CV_8U); // Create a matrix of the specified size and type
// Fills the matrix with normally distributed random values (around number with deviation off).
// There is also randu() for uniformly distributed random number generation
randn(noisyI, Scalar::all(128), Scalar::all(20));
// blur the noisyI a bit, kernel size is 3x3 and both sigma's are set to 0.5
GaussianBlur(noisyI, noisyI, Size(3, 3), 0.5, 0.5);
const double brightness_gain = 0;
const double contrast_gain = 1.7;
#ifdef DEMO_MIXED_API_USE
// To pass the new matrices to the functions that only work with IplImage or CvMat do:
// step 1) Convert the headers (tip: data will not be copied).
// step 2) call the function (tip: to pass a pointer do not forget unary "&" to form pointers)
IplImage cv_planes_0 = planes[0], cv_noise = noisyI;
cvAddWeighted(&cv_planes_0, contrast_gain, &cv_noise, 1, -128 + brightness_gain, &cv_planes_0);
#else
addWeighted(planes[0], contrast_gain, noisyI, 1, -128 + brightness_gain, planes[0]);
#endif
const double color_scale = 0.5;
// Mat::convertTo() replaces cvConvertScale.
// One must explicitly specify the output matrix type (we keep it intact - planes[1].type())
planes[1].convertTo(planes[1], planes[1].type(), color_scale, 128*(1-color_scale));
// alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
// This expression will not create any temporary arrays ( so should be almost as fast as above)
planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
// Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
planes[0] = planes[0].mul(planes[0], 1./255);
#endif
merge(planes, I_YUV); // now merge the results back
cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above
// processing is stored in I and thus in IplI too.
cvShowImage("image with grain", IplI);
#else
imshow("image with grain", I); // the new MATLAB style function show
#endif
waitKey();
// Tip: No memory freeing is required!
// All the memory will be automatically released by the Vector<>, Mat and Ptr<> destructor.
return 0;
}
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv; // The new C++ interface API is inside this namespace. Import it.
using namespace std;
void help( char* progName)
{
cout << endl << progName
<< " shows how to use cv::Mat and IplImages together (converting back and forth)." << endl
<< "Also contains example for image read, spliting the planes, merging back and " << endl
<< " color conversion, plus iterating through pixels. " << endl
<< "Usage:" << endl
<< progName << " [image-name Default: lena.jpg]" << endl << endl;
}
// comment out the define to use only the latest C++ API
#define DEMO_MIXED_API_USE
int main( int argc, char** argv )
{
help(argv[0]);
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
if(IplI.empty())
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
Mat I(IplI); // Convert to the new style container. Only header created. Image not copied.
#else
Mat I = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
if( I.empty() ) // same as if( !I.data )
{
cerr << "Can not load image " << imagename << endl;
return -1;
}
#endif
// convert image to YUV color space. The output image will be created automatically.
Mat I_YUV;
cvtColor(I, I_YUV, CV_BGR2YCrCb);
vector<Mat> planes; // Use the STL's vector structure to store multiple Mat objects
split(I_YUV, planes); // split the image into separate color planes (Y U V)
#if 1 // change it to 0 if you want to see a blurred and noisy version of this processing
// Mat scanning
// Method 1. process Y plane using an iterator
MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
for(; it != it_end; ++it)
{
double v = *it * 1.7 + rand()%21 - 10;
*it = saturate_cast<uchar>(v*v/255);
}
for( int y = 0; y < I_YUV.rows; y++ )
{
// Method 2. process the first chroma plane using pre-stored row pointer.
uchar* Uptr = planes[1].ptr<uchar>(y);
for( int x = 0; x < I_YUV.cols; x++ )
{
Uptr[x] = saturate_cast<uchar>((Uptr[x]-128)/2 + 128);
// Method 3. process the second chroma plane using individual element access
uchar& Vxy = planes[2].at<uchar>(y, x);
Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
}
}
#else
Mat noisyI(I.size(), CV_8U); // Create a matrix of the specified size and type
// Fills the matrix with normally distributed random values (around number with deviation off).
// There is also randu() for uniformly distributed random number generation
randn(noisyI, Scalar::all(128), Scalar::all(20));
// blur the noisyI a bit, kernel size is 3x3 and both sigma's are set to 0.5
GaussianBlur(noisyI, noisyI, Size(3, 3), 0.5, 0.5);
const double brightness_gain = 0;
const double contrast_gain = 1.7;
#ifdef DEMO_MIXED_API_USE
// To pass the new matrices to the functions that only work with IplImage or CvMat do:
// step 1) Convert the headers (tip: data will not be copied).
// step 2) call the function (tip: to pass a pointer do not forget unary "&" to form pointers)
IplImage cv_planes_0 = planes[0], cv_noise = noisyI;
cvAddWeighted(&cv_planes_0, contrast_gain, &cv_noise, 1, -128 + brightness_gain, &cv_planes_0);
#else
addWeighted(planes[0], contrast_gain, noisyI, 1, -128 + brightness_gain, planes[0]);
#endif
const double color_scale = 0.5;
// Mat::convertTo() replaces cvConvertScale.
// One must explicitly specify the output matrix type (we keep it intact - planes[1].type())
planes[1].convertTo(planes[1], planes[1].type(), color_scale, 128*(1-color_scale));
// alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
// This expression will not create any temporary arrays ( so should be almost as fast as above)
planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
// Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
planes[0] = planes[0].mul(planes[0], 1./255);
#endif
merge(planes, I_YUV); // now merge the results back
cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above
// processing is stored in I and thus in IplI too.
cvShowImage("image with grain", IplI);
#else
imshow("image with grain", I); // the new MATLAB style function show
#endif
waitKey();
// Tip: No memory freeing is required!
// All the memory will be automatically released by the Vector<>, Mat and Ptr<> destructor.
return 0;
}

View File

@@ -1,86 +1,86 @@
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
void help(char* progName)
{
cout << endl
<< "This program shows how to filter images with mask: the write it yourself and the"
<< "filter2d way. " << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] [G -- grayscale] " << endl << endl;
}
void Sharpen(const Mat& myImage,Mat& Result);
int main( int argc, char* argv[])
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I, J, K;
if (argc >= 3 && !strcmp("G", argv[2]))
I = imread( filename, CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread( filename, CV_LOAD_IMAGE_COLOR);
namedWindow("Input", CV_WINDOW_AUTOSIZE);
namedWindow("Output", CV_WINDOW_AUTOSIZE);
imshow("Input", I);
double t = (double)getTickCount();
Sharpen(I, J);
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
cvWaitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
t = (double)getTickCount();
filter2D(I, K, I.depth(), kern );
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Built-in filter2D time passed in seconds: " << t << endl;
imshow("Output", K);
cvWaitKey(0);
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
const int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
for(int j = 1 ; j < myImage.rows-1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
uchar* output = Result.ptr<uchar>(j);
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
{
*output++ = saturate_cast<uchar>(5*current[i]
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
void help(char* progName)
{
cout << endl
<< "This program shows how to filter images with mask: the write it yourself and the"
<< "filter2d way. " << endl
<< "Usage:" << endl
<< progName << " [image_name -- default lena.jpg] [G -- grayscale] " << endl << endl;
}
void Sharpen(const Mat& myImage,Mat& Result);
int main( int argc, char* argv[])
{
help(argv[0]);
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
Mat I, J, K;
if (argc >= 3 && !strcmp("G", argv[2]))
I = imread( filename, CV_LOAD_IMAGE_GRAYSCALE);
else
I = imread( filename, CV_LOAD_IMAGE_COLOR);
namedWindow("Input", CV_WINDOW_AUTOSIZE);
namedWindow("Output", CV_WINDOW_AUTOSIZE);
imshow("Input", I);
double t = (double)getTickCount();
Sharpen(I, J);
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
cvWaitKey(0);
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
t = (double)getTickCount();
filter2D(I, K, I.depth(), kern );
t = ((double)getTickCount() - t)/getTickFrequency();
cout << "Built-in filter2D time passed in seconds: " << t << endl;
imshow("Output", K);
cvWaitKey(0);
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
const int nChannels = myImage.channels();
Result.create(myImage.size(),myImage.type());
for(int j = 1 ; j < myImage.rows-1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
const uchar* next = myImage.ptr<uchar>(j + 1);
uchar* output = Result.ptr<uchar>(j);
for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
{
*output++ = saturate_cast<uchar>(5*current[i]
-current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
Result.row(Result.rows-1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
Result.col(Result.cols-1).setTo(Scalar(0));
}

View File

@@ -16,38 +16,38 @@ void help()
<< "Shows how output can be formated to OpenCV, python, numpy, csv and C styles." << endl
<< "Usage:" << endl
<< "./cvout_sample" << endl
<< "--------------------------------------------------------------------------" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int,char**)
{
help();
help();
// create by using the constructor
Mat M(2,2, CV_8UC3, Scalar(0,0,255));
cout << "M = " << endl << " " << M << endl << endl;
Mat M(2,2, CV_8UC3, Scalar(0,0,255));
cout << "M = " << endl << " " << M << endl << endl;
// create by using the create function()
M.create(4,4, CV_8UC(2));
cout << "M = "<< endl << " " << M << endl << endl;
// create multidimensional matrices
int sz[3] = {2,2,2};
int sz[3] = {2,2,2};
Mat L(3,sz, CV_8UC(1), Scalar::all(0));
// Cannot print via operator <<
// Create using MATLAB style eye, ones or zero matrix
Mat E = Mat::eye(4, 4, CV_64F);
Mat E = Mat::eye(4, 4, CV_64F);
cout << "E = " << endl << " " << E << endl << endl;
Mat O = Mat::ones(2, 2, CV_32F);
Mat O = Mat::ones(2, 2, CV_32F);
cout << "O = " << endl << " " << O << endl << endl;
Mat Z = Mat::zeros(3,3, CV_8UC1);
cout << "Z = " << endl << " " << Z << endl << endl;
// create a 3x3 double-precision identity matrix
Mat C = (Mat_<double>(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
Mat C = (Mat_<double>(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cout << "C = " << endl << " " << C << endl << endl;
Mat RowClone = C.row(1).clone();
@@ -73,9 +73,9 @@ int main(int,char**)
vector<float> v;
v.push_back( (float)CV_PI); v.push_back(2); v.push_back(3.01f);
cout << "Vector of floats via Mat = " << Mat(v) << endl << endl;
vector<Point2f> vPoints(20);
for (size_t E = 0; E < vPoints.size(); ++E)
vPoints[E] = Point2f((float)(E * 5), (float)(E % 7));