Make highgui.hpp independent from C API

This commit is contained in:
Andrey Kamaev 2013-04-07 22:45:38 +04:00
parent 288a0634c2
commit 0738ea7d0f
152 changed files with 899 additions and 594 deletions

View File

@ -1,3 +1,4 @@
set(HAVE_FFMPEG 1)
set(NEW_FFMPEG 1) set(NEW_FFMPEG 1)
set(HAVE_FFMPEG_CODEC 1) set(HAVE_FFMPEG_CODEC 1)
set(HAVE_FFMPEG_FORMAT 1) set(HAVE_FFMPEG_FORMAT 1)

View File

@ -29,7 +29,7 @@ if(WITH_GSTREAMER AND NOT WITH_GSTREAMER_1_X)
set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION}) set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION})
set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION}) set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION})
endif() endif()
endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_1_X) endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_1_X)
# if gstreamer 0.10 was not found, or we specified we wanted 1.x, try to find it # if gstreamer 0.10 was not found, or we specified we wanted 1.x, try to find it
@ -40,7 +40,7 @@ if(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER)
CHECK_MODULE(gstreamer-app-1.0 HAVE_GSTREAMER_APP) CHECK_MODULE(gstreamer-app-1.0 HAVE_GSTREAMER_APP)
CHECK_MODULE(gstreamer-riff-1.0 HAVE_GSTREAMER_RIFF) CHECK_MODULE(gstreamer-riff-1.0 HAVE_GSTREAMER_RIFF)
CHECK_MODULE(gstreamer-pbutils-1.0 HAVE_GSTREAMER_PBUTILS) CHECK_MODULE(gstreamer-pbutils-1.0 HAVE_GSTREAMER_PBUTILS)
if(HAVE_GSTREAMER_BASE AND HAVE_GSTREAMER_VIDEO AND HAVE_GSTREAMER_APP AND HAVE_GSTREAMER_RIFF AND HAVE_GSTREAMER_PBUTILS) if(HAVE_GSTREAMER_BASE AND HAVE_GSTREAMER_VIDEO AND HAVE_GSTREAMER_APP AND HAVE_GSTREAMER_RIFF AND HAVE_GSTREAMER_PBUTILS)
set(HAVE_GSTREAMER TRUE) set(HAVE_GSTREAMER TRUE)
set(GSTREAMER_BASE_VERSION ${ALIASOF_gstreamer-base-1.0_VERSION}) set(GSTREAMER_BASE_VERSION ${ALIASOF_gstreamer-base-1.0_VERSION})
@ -49,7 +49,7 @@ if(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER)
set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-1.0_VERSION}) set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-1.0_VERSION})
set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-1.0_VERSION}) set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-1.0_VERSION})
endif() endif()
endif(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER) endif(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER)
# --- unicap --- # --- unicap ---

View File

@ -66,7 +66,7 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
CvMat* _v = 0; CvMat* _v = 0;
CvPoint2D32f* v; CvPoint2D32f* v;
IplImage* img = 0; IplImage img;
IplImage* gray = 0; IplImage* gray = 0;
IplImage* thresh = 0; IplImage* thresh = 0;
@ -105,9 +105,10 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
/* read the image */ /* read the image */
sprintf( filename, "%s%s", filepath, imgname ); sprintf( filename, "%s%s", filepath, imgname );
img = cvLoadImage( filename ); cv::Mat img2 = cv::imread( filename );
img = img2;
if( !img ) if( img2.empty() )
{ {
ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename ); ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename );
if( max_idx == 1 ) if( max_idx == 1 )
@ -120,9 +121,9 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard); ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);
gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 ); gray = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 ); thresh = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
cvCvtColor( img, gray, CV_BGR2GRAY ); cvCvtColor( &img, gray, CV_BGR2GRAY );
count0 = pattern_size.width*pattern_size.height; count0 = pattern_size.width*pattern_size.height;
@ -164,7 +165,6 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
find_chessboard_time*1e-6, find_chessboard_time/num_pixels); find_chessboard_time*1e-6, find_chessboard_time/num_pixels);
cvReleaseMat( &_v ); cvReleaseMat( &_v );
cvReleaseImage( &img );
cvReleaseImage( &gray ); cvReleaseImage( &gray );
cvReleaseImage( &thresh ); cvReleaseImage( &thresh );
progress = update_progress( progress, idx-1, max_idx, 0 ); progress = update_progress( progress, idx-1, max_idx, 0 );
@ -175,7 +175,6 @@ _exit_:
/* release occupied memory */ /* release occupied memory */
cvReleaseMat( &_v ); cvReleaseMat( &_v );
cvReleaseFileStorage( &fs ); cvReleaseFileStorage( &fs );
cvReleaseImage( &img );
cvReleaseImage( &gray ); cvReleaseImage( &gray );
cvReleaseImage( &thresh ); cvReleaseImage( &thresh );

View File

@ -912,8 +912,7 @@ void ChamferMatcher::Template::show() const
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
namedWindow("templ",1); namedWindow("templ",1);
imshow("templ",templ_color); imshow("templ",templ_color);
waitKey();
cvWaitKey(0);
#else #else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support"); CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support");
#endif #endif

View File

@ -496,67 +496,73 @@ CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* r
CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor = 1.); CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor = 1.);
enum { FILLED = -1,
LINE_4 = 4,
LINE_8 = 8,
LINE_AA = 16
};
//! draws the line segment (pt1, pt2) in the image //! draws the line segment (pt1, pt2) in the image
CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
int thickness = 1, int lineType = 8, int shift = 0); int thickness = 1, int lineType = LINE_8, int shift = 0);
//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image //! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2, CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2,
const Scalar& color, int thickness = 1, const Scalar& color, int thickness = 1,
int lineType = 8, int shift = 0); int lineType = LINE_8, int shift = 0);
//! draws the rectangle outline or a solid rectangle covering rec in the image //! draws the rectangle outline or a solid rectangle covering rec in the image
CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec, CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
const Scalar& color, int thickness = 1, const Scalar& color, int thickness = 1,
int lineType = 8, int shift = 0); int lineType = LINE_8, int shift = 0);
//! draws the circle outline or a solid circle in the image //! draws the circle outline or a solid circle in the image
CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius, CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius,
const Scalar& color, int thickness = 1, const Scalar& color, int thickness = 1,
int lineType = 8, int shift = 0); int lineType = LINE_8, int shift = 0);
//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image //! draws an elliptic arc, ellipse sector or a rotated ellipse in the image
CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes, CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes,
double angle, double startAngle, double endAngle, double angle, double startAngle, double endAngle,
const Scalar& color, int thickness = 1, const Scalar& color, int thickness = 1,
int lineType = 8, int shift = 0); int lineType = LINE_8, int shift = 0);
//! draws a rotated ellipse in the image //! draws a rotated ellipse in the image
CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color, CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color,
int thickness = 1, int lineType = 8); int thickness = 1, int lineType = LINE_8);
//! draws a filled convex polygon in the image //! draws a filled convex polygon in the image
CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts,
const Scalar& color, int lineType = 8, const Scalar& color, int lineType = LINE_8,
int shift = 0); int shift = 0);
CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points,
const Scalar& color, int lineType = 8, const Scalar& color, int lineType = LINE_8,
int shift = 0); int shift = 0);
//! fills an area bounded by one or more polygons //! fills an area bounded by one or more polygons
CV_EXPORTS void fillPoly(Mat& img, const Point** pts, CV_EXPORTS void fillPoly(Mat& img, const Point** pts,
const int* npts, int ncontours, const int* npts, int ncontours,
const Scalar& color, int lineType = 8, int shift = 0, const Scalar& color, int lineType = LINE_8, int shift = 0,
Point offset = Point() ); Point offset = Point() );
CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts,
const Scalar& color, int lineType = 8, int shift = 0, const Scalar& color, int lineType = LINE_8, int shift = 0,
Point offset = Point() ); Point offset = Point() );
//! draws one or more polygonal curves //! draws one or more polygonal curves
CV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts, CV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts,
int ncontours, bool isClosed, const Scalar& color, int ncontours, bool isClosed, const Scalar& color,
int thickness = 1, int lineType = 8, int shift = 0 ); int thickness = 1, int lineType = LINE_8, int shift = 0 );
CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts,
bool isClosed, const Scalar& color, bool isClosed, const Scalar& color,
int thickness = 1, int lineType = 8, int shift = 0 ); int thickness = 1, int lineType = LINE_8, int shift = 0 );
//! draws contours in the image //! draws contours in the image
CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours,
int contourIdx, const Scalar& color, int contourIdx, const Scalar& color,
int thickness = 1, int lineType = 8, int thickness = 1, int lineType = LINE_8,
InputArray hierarchy = noArray(), InputArray hierarchy = noArray(),
int maxLevel = INT_MAX, Point offset = Point() ); int maxLevel = INT_MAX, Point offset = Point() );
@ -587,7 +593,7 @@ enum
//! renders text string in the image //! renders text string in the image
CV_EXPORTS_W void putText( Mat& img, const String& text, Point org, CV_EXPORTS_W void putText( Mat& img, const String& text, Point org,
int fontFace, double fontScale, Scalar color, int fontFace, double fontScale, Scalar color,
int thickness = 1, int lineType = 8, int thickness = 1, int lineType = LINE_8,
bool bottomLeftOrigin = false ); bool bottomLeftOrigin = false );
//! returns bounding box of the text string //! returns bounding box of the text string
@ -631,9 +637,9 @@ CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType);
PCA pca(pcaset, // pass the data PCA pca(pcaset, // pass the data
Mat(), // we do not have a pre-computed mean vector, Mat(), // we do not have a pre-computed mean vector,
// so let the PCA engine to compute it // so let the PCA engine to compute it
CV_PCA_DATA_AS_ROW, // indicate that the vectors PCA::DATA_AS_ROW, // indicate that the vectors
// are stored as matrix rows // are stored as matrix rows
// (use CV_PCA_DATA_AS_COL if the vectors are // (use PCA::DATA_AS_COL if the vectors are
// the matrix columns) // the matrix columns)
maxComponents // specify, how many principal components to retain maxComponents // specify, how many principal components to retain
); );
@ -663,6 +669,11 @@ CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType);
class CV_EXPORTS PCA class CV_EXPORTS PCA
{ {
public: public:
enum { DATA_AS_ROW = 0,
DATA_AS_COL = 1,
USE_AVG = 2
};
//! default constructor //! default constructor
PCA(); PCA();

View File

@ -221,6 +221,7 @@ endif()
if(WIN32) if(WIN32)
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests
endif() endif()
if(UNIX) if(UNIX)

View File

@ -43,32 +43,81 @@
#ifndef __OPENCV_HIGHGUI_HPP__ #ifndef __OPENCV_HIGHGUI_HPP__
#define __OPENCV_HIGHGUI_HPP__ #define __OPENCV_HIGHGUI_HPP__
#include "opencv2/highgui/highgui_c.h"
#ifdef __cplusplus
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
struct CvCapture;
struct CvVideoWriter;
///////////////////////// graphical user interface //////////////////////////
namespace cv namespace cv
{ {
enum { // Flags for namedWindow
// Flags for namedWindow enum { WINDOW_NORMAL = 0x00000000, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size WINDOW_AUTOSIZE = 0x00000001, // the user cannot resize the window, the size is constrainted by the image displayed
WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed WINDOW_OPENGL = 0x00001000, // window with opengl support
WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support
WINDOW_FULLSCREEN = 1, // change the window to fullscreen
WINDOW_FREERATIO = 0x00000100, // the image expends as much as it can (no ratio constraint)
WINDOW_KEEPRATIO = 0x00000000 // the ratio of the image is respected
};
// Flags for set / getWindowProperty
enum { WND_PROP_FULLSCREEN = 0, // fullscreen property (can be WINDOW_NORMAL or WINDOW_FULLSCREEN)
WND_PROP_AUTOSIZE = 1, // autosize property (can be WINDOW_NORMAL or WINDOW_AUTOSIZE)
WND_PROP_ASPECT_RATIO = 2, // window's aspect ration (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO);
WND_PROP_OPENGL = 3 // opengl support
};
enum { EVENT_MOUSEMOVE = 0,
EVENT_LBUTTONDOWN = 1,
EVENT_RBUTTONDOWN = 2,
EVENT_MBUTTONDOWN = 3,
EVENT_LBUTTONUP = 4,
EVENT_RBUTTONUP = 5,
EVENT_MBUTTONUP = 6,
EVENT_LBUTTONDBLCLK = 7,
EVENT_RBUTTONDBLCLK = 8,
EVENT_MBUTTONDBLCLK = 9
};
enum { EVENT_FLAG_LBUTTON = 1,
EVENT_FLAG_RBUTTON = 2,
EVENT_FLAG_MBUTTON = 4,
EVENT_FLAG_CTRLKEY = 8,
EVENT_FLAG_SHIFTKEY = 16,
EVENT_FLAG_ALTKEY = 32
};
// Qt font
enum { QT_FONT_LIGHT = 25, //QFont::Light,
QT_FONT_NORMAL = 50, //QFont::Normal,
QT_FONT_DEMIBOLD = 63, //QFont::DemiBold,
QT_FONT_BOLD = 75, //QFont::Bold,
QT_FONT_BLACK = 87 //QFont::Black
};
// Qt font style
enum { QT_STYLE_NORMAL = 0, //QFont::StyleNormal,
QT_STYLE_ITALIC = 1, //QFont::StyleItalic,
QT_STYLE_OBLIQUE = 2 //QFont::StyleOblique
};
// Qt "button" type
enum { QT_PUSH_BUTTON = 0,
QT_CHECKBOX = 1,
QT_RADIOBOX = 2
};
typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);
typedef void (*TrackbarCallback)(int pos, void* userdata);
typedef void (*OpenGlDrawCallback)(void* userdata);
typedef void (*ButtonCallback)(int state, void* userdata);
// Flags for set / getWindowProperty
WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property
WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property
WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration
WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support
};
CV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE); CV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE);
CV_EXPORTS_W void destroyWindow(const String& winname); CV_EXPORTS_W void destroyWindow(const String& winname);
CV_EXPORTS_W void destroyAllWindows(); CV_EXPORTS_W void destroyAllWindows();
CV_EXPORTS_W int startWindowThread(); CV_EXPORTS_W int startWindowThread();
@ -78,123 +127,373 @@ CV_EXPORTS_W int waitKey(int delay = 0);
CV_EXPORTS_W void imshow(const String& winname, InputArray mat); CV_EXPORTS_W void imshow(const String& winname, InputArray mat);
CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height); CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height);
CV_EXPORTS_W void moveWindow(const String& winname, int x, int y); CV_EXPORTS_W void moveWindow(const String& winname, int x, int y);
CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value);//YV CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value);
CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id);//YV
enum CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id);
{
EVENT_MOUSEMOVE =0,
EVENT_LBUTTONDOWN =1,
EVENT_RBUTTONDOWN =2,
EVENT_MBUTTONDOWN =3,
EVENT_LBUTTONUP =4,
EVENT_RBUTTONUP =5,
EVENT_MBUTTONUP =6,
EVENT_LBUTTONDBLCLK =7,
EVENT_RBUTTONDBLCLK =8,
EVENT_MBUTTONDBLCLK =9
};
enum
{
EVENT_FLAG_LBUTTON =1,
EVENT_FLAG_RBUTTON =2,
EVENT_FLAG_MBUTTON =4,
EVENT_FLAG_CTRLKEY =8,
EVENT_FLAG_SHIFTKEY =16,
EVENT_FLAG_ALTKEY =32
};
typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);
//! assigns callback for mouse events //! assigns callback for mouse events
CV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0); CV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0);
typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata);
CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname, CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname,
int* value, int count, int* value, int count,
TrackbarCallback onChange = 0, TrackbarCallback onChange = 0,
void* userdata = 0); void* userdata = 0);
CV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winname); CV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winname);
CV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winname, int pos); CV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winname, int pos);
// OpenGL support
typedef void (*OpenGlDrawCallback)(void* userdata); // OpenGL support
CV_EXPORTS void setOpenGlDrawCallback(const String& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0); CV_EXPORTS void setOpenGlDrawCallback(const String& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0);
CV_EXPORTS void setOpenGlContext(const String& winname); CV_EXPORTS void setOpenGlContext(const String& winname);
CV_EXPORTS void updateWindow(const String& winname); CV_EXPORTS void updateWindow(const String& winname);
//Only for Qt
CV_EXPORTS CvFont fontQt(const String& nameFont, int pointSize=-1, // Only for Qt
Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL,
int style=CV_STYLE_NORMAL, int spacing=0);
CV_EXPORTS void addText( const Mat& img, const String& text, Point org, CvFont font);
CV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms CV_DEFAULT(0)); struct QtFont
CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms CV_DEFAULT(0)); {
const char* nameFont; // Qt: nameFont
Scalar color; // Qt: ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component])
int font_face; // Qt: bool italic
const int* ascii; // font data and metrics
const int* greek;
const int* cyrillic;
float hscale, vscale;
float shear; // slope coefficient: 0 - normal, >0 - italic
int thickness; // Qt: weight
float dx; // horizontal interval between letters
int line_type; // Qt: PointSize
};
CV_EXPORTS QtFont fontQt(const String& nameFont, int pointSize = -1,
Scalar color = Scalar::all(0), int weight = QT_FONT_NORMAL,
int style = QT_STYLE_NORMAL, int spacing = 0);
CV_EXPORTS void addText( const Mat& img, const String& text, Point org, const QtFont& font);
CV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms = 0);
CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms = 0);
CV_EXPORTS void saveWindowParameters(const String& windowName); CV_EXPORTS void saveWindowParameters(const String& windowName);
CV_EXPORTS void loadWindowParameters(const String& windowName); CV_EXPORTS void loadWindowParameters(const String& windowName);
CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
CV_EXPORTS void stopLoop(); CV_EXPORTS void stopLoop();
typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata);
CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change, CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change,
void* userdata=NULL, int type=CV_PUSH_BUTTON, void* userdata = 0, int type = QT_PUSH_BUTTON,
bool initial_button_state=0); bool initial_button_state = false);
//------------------------- } // cv
enum
//////////////////////////////// image codec ////////////////////////////////
namespace cv
{ {
// 8bit, color or not
IMREAD_UNCHANGED =-1,
// 8bit, gray
IMREAD_GRAYSCALE =0,
// ?, color
IMREAD_COLOR =1,
// any depth, ?
IMREAD_ANYDEPTH =2,
// ?, any color
IMREAD_ANYCOLOR =4
};
enum enum { IMREAD_UNCHANGED = -1, // 8bit, color or not
{ IMREAD_GRAYSCALE = 0, // 8bit, gray
IMWRITE_JPEG_QUALITY =1, IMREAD_COLOR = 1, // ?, color
IMWRITE_PNG_COMPRESSION =16, IMREAD_ANYDEPTH = 2, // any depth, ?
IMWRITE_PNG_STRATEGY =17, IMREAD_ANYCOLOR = 4 // ?, any color
IMWRITE_PNG_BILEVEL =18, };
IMWRITE_PNG_STRATEGY_DEFAULT =0,
IMWRITE_PNG_STRATEGY_FILTERED =1, enum { IMWRITE_JPEG_QUALITY = 1,
IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, IMWRITE_PNG_COMPRESSION = 16,
IMWRITE_PNG_STRATEGY_RLE =3, IMWRITE_PNG_STRATEGY = 17,
IMWRITE_PNG_STRATEGY_FIXED =4, IMWRITE_PNG_BILEVEL = 18,
IMWRITE_PXM_BINARY =32 IMWRITE_PXM_BINARY = 32,
}; IMWRITE_WEBP_QUALITY = 64
};
enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0,
IMWRITE_PNG_STRATEGY_FILTERED = 1,
IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2,
IMWRITE_PNG_STRATEGY_RLE = 3,
IMWRITE_PNG_STRATEGY_FIXED = 4
};
CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );
CV_EXPORTS_W Mat imread( const String& filename, int flags=1 );
CV_EXPORTS_W bool imwrite( const String& filename, InputArray img, CV_EXPORTS_W bool imwrite( const String& filename, InputArray img,
const std::vector<int>& params=std::vector<int>()); const std::vector<int>& params = std::vector<int>());
CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); CV_EXPORTS_W Mat imdecode( InputArray buf, int flags );
CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst );
CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst);
CV_EXPORTS_W bool imencode( const String& ext, InputArray img, CV_EXPORTS_W bool imencode( const String& ext, InputArray img,
CV_OUT std::vector<uchar>& buf, CV_OUT std::vector<uchar>& buf,
const std::vector<int>& params=std::vector<int>()); const std::vector<int>& params = std::vector<int>());
#ifndef CV_NO_VIDEO_CAPTURE_CPP_API } // cv
////////////////////////////////// video io /////////////////////////////////
typedef struct CvCapture CvCapture;
typedef struct CvVideoWriter CvVideoWriter;
namespace cv
{
// Camera API
enum { CAP_ANY = 0, // autodetect
CAP_VFW = 200, // platform native
CAP_V4L = 200,
CAP_V4L2 = CAP_V4L,
CAP_FIREWARE = 300, // IEEE 1394 drivers
CAP_FIREWIRE = CAP_FIREWARE,
CAP_IEEE1394 = CAP_FIREWARE,
CAP_DC1394 = CAP_FIREWARE,
CAP_CMU1394 = CAP_FIREWARE,
CAP_QT = 500, // QuickTime
CAP_UNICAP = 600, // Unicap drivers
CAP_DSHOW = 700, // DirectShow (via videoInput)
CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK
CAP_OPENNI = 900, // OpenNI (for Kinect)
CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion)
CAP_ANDROID = 1000, // Android
CAP_XIAPI = 1100, // XIMEA Camera API
CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
CAP_MSMF = 1400 // Microsoft Media Foundation (via videoInput)
};
// generic properties (based on DC1394 properties)
enum { CAP_PROP_POS_MSEC =0,
CAP_PROP_POS_FRAMES =1,
CAP_PROP_POS_AVI_RATIO =2,
CAP_PROP_FRAME_WIDTH =3,
CAP_PROP_FRAME_HEIGHT =4,
CAP_PROP_FPS =5,
CAP_PROP_FOURCC =6,
CAP_PROP_FRAME_COUNT =7,
CAP_PROP_FORMAT =8,
CAP_PROP_MODE =9,
CAP_PROP_BRIGHTNESS =10,
CAP_PROP_CONTRAST =11,
CAP_PROP_SATURATION =12,
CAP_PROP_HUE =13,
CAP_PROP_GAIN =14,
CAP_PROP_EXPOSURE =15,
CAP_PROP_CONVERT_RGB =16,
CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CAP_PROP_RECTIFICATION =18,
CAP_PROP_MONOCROME =19,
CAP_PROP_SHARPNESS =20,
CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
CAP_PROP_GAMMA =22,
CAP_PROP_TEMPERATURE =23,
CAP_PROP_TRIGGER =24,
CAP_PROP_TRIGGER_DELAY =25,
CAP_PROP_WHITE_BALANCE_RED_V =26,
CAP_PROP_ZOOM =27,
CAP_PROP_FOCUS =28,
CAP_PROP_GUID =29,
CAP_PROP_ISO_SPEED =30,
CAP_PROP_BACKLIGHT =32,
CAP_PROP_PAN =33,
CAP_PROP_TILT =34,
CAP_PROP_ROLL =35,
CAP_PROP_IRIS =36,
CAP_PROP_SETTINGS =37
};
// DC1394 only
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
// every feature can have only one mode turned on at a time
enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
CAP_PROP_DC1394_MODE_AUTO = -2,
CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
CAP_PROP_DC1394_MAX = 31
};
// OpenNI map generators
enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR
};
// Properties of cameras available through OpenNI interfaces
enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100,
CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
CAP_PROP_OPENNI_BASELINE = 102, // in mm
CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map
// by changing depth generator's view point (if the flag is "on") or
// sets this view point to its normal one (if the flag is "off").
CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION,
CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
CAP_PROP_OPENNI_GENERATOR_PRESENT = 109
};
// OpenNI shortcats
enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,
CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,
CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,
CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION
};
// OpenNI data given from depth generator
enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
// Data given from RGB image generator
CAP_OPENNI_BGR_IMAGE = 5,
CAP_OPENNI_GRAY_IMAGE = 6
};
// Supported output modes of OpenNI image generator
enum { CAP_OPENNI_VGA_30HZ = 0,
CAP_OPENNI_SXGA_15HZ = 1,
CAP_OPENNI_SXGA_30HZ = 2,
CAP_OPENNI_QVGA_30HZ = 3,
CAP_OPENNI_QVGA_60HZ = 4
};
// GStreamer
enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1
};
// PVAPI
enum { CAP_PROP_PVAPI_MULTICASTIP = 300 // ip for anable multicast master mode. 0 for disable multicast
};
// Properties of cameras available through XIMEA SDK interface
enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds
};
// Properties for Android cameras
enum { CAP_PROP_ANDROID_AUTOGRAB = 1024,
CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed
CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed
CAP_PROP_ANDROID_FLASH_MODE = 8001,
CAP_PROP_ANDROID_FOCUS_MODE = 8002,
CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
CAP_PROP_ANDROID_ANTIBANDING = 8004,
CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008
};
// Android camera output formats
enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR,
CAP_ANDROID_GREY_FRAME = 1, //Y
CAP_ANDROID_COLOR_FRAME_RGB = 2,
CAP_ANDROID_COLOR_FRAME_BGRA = 3,
CAP_ANDROID_COLOR_FRAME_RGBA = 4
};
// Android camera flash modes
enum { CAP_ANDROID_FLASH_MODE_AUTO = 0,
CAP_ANDROID_FLASH_MODE_OFF = 1,
CAP_ANDROID_FLASH_MODE_ON = 2,
CAP_ANDROID_FLASH_MODE_RED_EYE = 3,
CAP_ANDROID_FLASH_MODE_TORCH = 4
};
// Android camera focus modes
enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0,
CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1,
CAP_ANDROID_FOCUS_MODE_EDOF = 2,
CAP_ANDROID_FOCUS_MODE_FIXED = 3,
CAP_ANDROID_FOCUS_MODE_INFINITY = 4,
CAP_ANDROID_FOCUS_MODE_MACRO = 5
};
// Android camera white balance modes
enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1,
CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2,
CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3,
CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4,
CAP_ANDROID_WHITE_BALANCE_SHADE = 5,
CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6,
CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7
};
// Android camera antibanding modes
enum { CAP_ANDROID_ANTIBANDING_50HZ = 0,
CAP_ANDROID_ANTIBANDING_60HZ = 1,
CAP_ANDROID_ANTIBANDING_AUTO = 2,
CAP_ANDROID_ANTIBANDING_OFF = 3
};
// Properties of cameras available through AVFOUNDATION interface
enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001,
CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
CAP_PROP_IOS_DEVICE_FLASH = 9003,
CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
CAP_PROP_IOS_DEVICE_TORCH = 9005
};
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006
};
template<> void CV_EXPORTS Ptr<CvCapture>::delete_obj();
template<> void CV_EXPORTS Ptr<CvVideoWriter>::delete_obj();
class CV_EXPORTS_W VideoCapture class CV_EXPORTS_W VideoCapture
{ {
@ -210,7 +509,7 @@ public:
CV_WRAP virtual void release(); CV_WRAP virtual void release();
CV_WRAP virtual bool grab(); CV_WRAP virtual bool grab();
CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0); CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int flag = 0);
virtual VideoCapture& operator >> (CV_OUT Mat& image); virtual VideoCapture& operator >> (CV_OUT Mat& image);
CV_WRAP virtual bool read(CV_OUT Mat& image); CV_WRAP virtual bool read(CV_OUT Mat& image);
@ -227,24 +526,25 @@ class CV_EXPORTS_W VideoWriter
public: public:
CV_WRAP VideoWriter(); CV_WRAP VideoWriter();
CV_WRAP VideoWriter(const String& filename, int fourcc, double fps, CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,
Size frameSize, bool isColor=true); Size frameSize, bool isColor = true);
virtual ~VideoWriter(); virtual ~VideoWriter();
CV_WRAP virtual bool open(const String& filename, int fourcc, double fps, CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,
Size frameSize, bool isColor=true); Size frameSize, bool isColor = true);
CV_WRAP virtual bool isOpened() const; CV_WRAP virtual bool isOpened() const;
CV_WRAP virtual void release(); CV_WRAP virtual void release();
virtual VideoWriter& operator << (const Mat& image); virtual VideoWriter& operator << (const Mat& image);
CV_WRAP virtual void write(const Mat& image); CV_WRAP virtual void write(const Mat& image);
CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);
protected: protected:
Ptr<CvVideoWriter> writer; Ptr<CvVideoWriter> writer;
}; };
#endif template<> void Ptr<CvCapture>::delete_obj();
template<> void Ptr<CvVideoWriter>::delete_obj();
} } // cv
#endif
#endif #endif

View File

@ -570,9 +570,6 @@ CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frame_size, double fps, CvSize frame_size,
int is_color CV_DEFAULT(1)); int is_color CV_DEFAULT(1));
//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename,
// int is_color CV_DEFAULT(1));
/* write frame to video file */ /* write frame to video file */
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );

View File

@ -23,7 +23,7 @@ PERF_TEST_P(VideoWriter_Writing, WriteFrame,
string filename = getDataPath(get<0>(GetParam())); string filename = getDataPath(get<0>(GetParam()));
bool isColor = get<1>(GetParam()); bool isColor = get<1>(GetParam());
VideoWriter writer(cv::tempfile(".avi"), CV_FOURCC('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor); VideoWriter writer(cv::tempfile(".avi"), VideoWriter::fourcc('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor);
TEST_CYCLE() { Mat image = imread(filename, 1); writer << image; } TEST_CYCLE() { Mat image = imread(filename, 1); writer << image; }

View File

@ -540,9 +540,9 @@ double VideoCapture::get(int propId)
VideoWriter::VideoWriter() VideoWriter::VideoWriter()
{} {}
VideoWriter::VideoWriter(const String& filename, int fourcc, double fps, Size frameSize, bool isColor) VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{ {
open(filename, fourcc, fps, frameSize, isColor); open(filename, _fourcc, fps, frameSize, isColor);
} }
void VideoWriter::release() void VideoWriter::release()
@ -555,9 +555,9 @@ VideoWriter::~VideoWriter()
release(); release();
} }
bool VideoWriter::open(const String& filename, int fourcc, double fps, Size frameSize, bool isColor) bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{ {
writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor); writer = cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor);
return isOpened(); return isOpened();
} }
@ -578,4 +578,9 @@ VideoWriter& VideoWriter::operator << (const Mat& image)
return *this; return *this;
} }
int VideoWriter::fourcc(char c1, char c2, char c3, char c4)
{
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
}
} }

View File

@ -41,7 +41,7 @@
#include "precomp.hpp" #include "precomp.hpp"
#ifdef HAVE_FFMPEG #ifndef WIN32
#include "cap_ffmpeg_impl.hpp" #include "cap_ffmpeg_impl.hpp"
#else #else
#include "cap_ffmpeg_api.hpp" #include "cap_ffmpeg_api.hpp"

View File

@ -61,7 +61,9 @@ extern "C" {
#endif #endif
#ifdef WIN32 #ifdef WIN32
#include <libavformat/avformat.h> # define AVUTIL_COMMON_H
# define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
# include <libavformat/avformat.h>
#else #else
// if the header path is not specified explicitly, let's deduce it // if the header path is not specified explicitly, let's deduce it

View File

@ -199,41 +199,4 @@ void cvSetRatioWindow_QT(const char* name,double prop_value);
double cvGetOpenGlProp_QT(const char* name); double cvGetOpenGlProp_QT(const char* name);
#endif #endif
/*namespace cv
{
class CV_EXPORTS BaseWindow
{
public:
BaseWindow(const String& name, int flags=0);
virtual ~BaseWindow();
virtual void close();
virtual void show(const Mat& mat);
virtual void resize(Size size);
virtual void move(Point topleft);
virtual Size size() const;
virtual Point topLeft() const;
virtual void setGeometry(Point topLeft, Size size);
virtual void getGeometry(Point& topLeft, Size& size) const;
virtual String getTitle() const;
virtual void setTitle(const String& str);
virtual String getName() const;
virtual void setScaleMode(int mode);
virtual int getScaleMode();
virtual void setScrollPos(double pos);
virtual double getScrollPos() const;
virtual void setScale(double scale);
virtual double getScale() const;
virtual Point getImageCoords(Point pos) const;
virtual Scalar getPixelValue(Point pos, const String& colorspace=String()) const;
virtual void addTrackbar( const String& trackbar, int low, int high, int step );
};
typedef Ptr<BaseWindow> Window;
}*/
#endif /* __HIGHGUI_H_ */ #endif /* __HIGHGUI_H_ */

View File

@ -342,15 +342,16 @@ CV_IMPL void cvUpdateWindow(const char*)
#if defined (HAVE_QT) #if defined (HAVE_QT)
CvFont cv::fontQt(const String& nameFont, int pointSize, Scalar color, int weight, int style, int /*spacing*/) cv::QtFont cv::fontQt(const String& nameFont, int pointSize, Scalar color, int weight, int style, int /*spacing*/)
{ {
return cvFontQt(nameFont.c_str(), pointSize,color,weight, style); CvFont f = cvFontQt(nameFont.c_str(), pointSize,color,weight, style);
return *(cv::QtFont*)(&f);
} }
void cv::addText( const Mat& img, const String& text, Point org, CvFont font) void cv::addText( const Mat& img, const String& text, Point org, const QtFont& font)
{ {
CvMat _img = img; CvMat _img = img;
cvAddText( &_img, text.c_str(), org,&font); cvAddText( &_img, text.c_str(), org, (CvFont*)&font);
} }
void cv::displayStatusBar(const String& name, const String& text, int delayms) void cv::displayStatusBar(const String& name, const String& text, int delayms)
@ -390,13 +391,13 @@ int cv::createButton(const String& button_name, ButtonCallback on_change, void*
#else #else
CvFont cv::fontQt(const String&, int, Scalar, int, int, int) cv::QtFont cv::fontQt(const String&, int, Scalar, int, int, int)
{ {
CV_Error(CV_StsNotImplemented, "The library is compiled without QT support"); CV_Error(CV_StsNotImplemented, "The library is compiled without QT support");
return CvFont(); return QtFont();
} }
void cv::addText( const Mat&, const String&, Point, CvFont) void cv::addText( const Mat&, const String&, Point, const QtFont&)
{ {
CV_Error(CV_StsNotImplemented, "The library is compiled without QT support"); CV_Error(CV_StsNotImplemented, "The library is compiled without QT support");
} }

View File

@ -95,15 +95,15 @@ public:
double fps = fps0; double fps = fps0;
Size frame_s = Size(img_c, img_r); Size frame_s = Size(img_c, img_r);
if( tag == CV_FOURCC('H', '2', '6', '1') ) if( tag == VideoWriter::fourcc('H', '2', '6', '1') )
frame_s = Size(352, 288); frame_s = Size(352, 288);
else if( tag == CV_FOURCC('H', '2', '6', '3') ) else if( tag == VideoWriter::fourcc('H', '2', '6', '3') )
frame_s = Size(704, 576); frame_s = Size(704, 576);
/*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') || /*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') ||
tag == CV_FOURCC('j', 'p', 'e', 'g') ) tag == CV_FOURCC('j', 'p', 'e', 'g') )
frame_s = Size(1920, 1080);*/ frame_s = Size(1920, 1080);*/
if( tag == CV_FOURCC('M', 'P', 'E', 'G') ) if( tag == VideoWriter::fourcc('M', 'P', 'E', 'G') )
fps = 25; fps = 25;
VideoWriter writer(filename, tag, fps, frame_s); VideoWriter writer(filename, tag, fps, frame_s);
@ -201,7 +201,7 @@ public:
std::string fileName = tempfile(stream.str().c_str()); std::string fileName = tempfile(stream.str().c_str());
files->operator[](i) = fileName; files->operator[](i) = fileName;
writers->operator[](i) = new VideoWriter(fileName, CV_FOURCC('X','V','I','D'), 25.0f, FrameSize); writers->operator[](i) = new VideoWriter(fileName, VideoWriter::fourcc('X','V','I','D'), 25.0f, FrameSize);
CV_Assert(writers->operator[](i)->isOpened()); CV_Assert(writers->operator[](i)->isOpened());
} }
@ -311,7 +311,7 @@ public:
CV_Assert(capture->isOpened()); CV_Assert(capture->isOpened());
const static double eps = 23.0; const static double eps = 23.0;
unsigned int frameCount = static_cast<unsigned int>(capture->get(CV_CAP_PROP_FRAME_COUNT)); unsigned int frameCount = static_cast<unsigned int>(capture->get(CAP_PROP_FRAME_COUNT));
CV_Assert(frameCount == WriteVideo_Invoker::FrameCount); CV_Assert(frameCount == WriteVideo_Invoker::FrameCount);
Mat reference(CreateVideoWriterInvoker::FrameSize, CV_8UC3); Mat reference(CreateVideoWriterInvoker::FrameSize, CV_8UC3);

View File

@ -41,7 +41,7 @@
//M*/ //M*/
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "opencv2/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include <stdio.h> #include <stdio.h>
using namespace cv; using namespace cv;
@ -91,7 +91,7 @@ void CV_FramecountTest::run(int)
FrameCount++; FrameCount++;
} }
int framecount = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT); int framecount = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT);
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n"\ ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n"\
"\nName: big_buck_bunny.%s\nActual frame count: %d\n"\ "\nName: big_buck_bunny.%s\nActual frame count: %d\n"\

View File

@ -113,7 +113,7 @@ public:
imwrite(img_path, img); imwrite(img_path, img);
ts->printf(ts->LOG, "reading test image : %s\n", img_path.c_str()); ts->printf(ts->LOG, "reading test image : %s\n", img_path.c_str());
Mat img_test = imread(img_path, CV_LOAD_IMAGE_UNCHANGED); Mat img_test = imread(img_path, IMREAD_UNCHANGED);
if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH); if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH);
@ -140,11 +140,11 @@ public:
string filename = cv::tempfile(".jpg"); string filename = cv::tempfile(".jpg");
imwrite(filename, img); imwrite(filename, img);
img = imread(filename, CV_LOAD_IMAGE_UNCHANGED); img = imread(filename, IMREAD_UNCHANGED);
filename = string(ts->get_data_path() + "readwrite/test_" + char(k + 48) + "_c" + char(num_channels + 48) + ".jpg"); filename = string(ts->get_data_path() + "readwrite/test_" + char(k + 48) + "_c" + char(num_channels + 48) + ".jpg");
ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str()); ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str());
Mat img_test = imread(filename, CV_LOAD_IMAGE_UNCHANGED); Mat img_test = imread(filename, IMREAD_UNCHANGED);
if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH); if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH);
@ -171,7 +171,7 @@ public:
string filename = cv::tempfile(".tiff"); string filename = cv::tempfile(".tiff");
imwrite(filename, img); imwrite(filename, img);
ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str()); ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str());
Mat img_test = imread(filename, CV_LOAD_IMAGE_UNCHANGED); Mat img_test = imread(filename, IMREAD_UNCHANGED);
if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH); if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH);
@ -242,12 +242,12 @@ public:
Mat im = Mat::zeros(1000,1000, CV_8U); Mat im = Mat::zeros(1000,1000, CV_8U);
//randu(im, 0, 256); //randu(im, 0, 256);
vector<int> param; vector<int> param;
param.push_back(CV_IMWRITE_PNG_COMPRESSION); param.push_back(IMWRITE_PNG_COMPRESSION);
param.push_back(3); //default(3) 0-9. param.push_back(3); //default(3) 0-9.
cv::imencode(".png" ,im ,buff, param); cv::imencode(".png" ,im ,buff, param);
// hangs // hangs
Mat im2 = imdecode(buff,CV_LOAD_IMAGE_ANYDEPTH); Mat im2 = imdecode(buff,IMREAD_ANYDEPTH);
} }
catch(...) catch(...)
{ {
@ -375,7 +375,7 @@ TEST(Highgui_WebP, encode_decode_lossless_webp)
remove(output.c_str()); remove(output.c_str());
cv::Mat decode = cv::imdecode(buf, CV_LOAD_IMAGE_COLOR); cv::Mat decode = cv::imdecode(buf, IMREAD_COLOR);
ASSERT_FALSE(decode.empty()); ASSERT_FALSE(decode.empty());
EXPECT_TRUE(cv::norm(decode, img_webp, NORM_INF) == 0); EXPECT_TRUE(cv::norm(decode, img_webp, NORM_INF) == 0);
@ -394,7 +394,7 @@ TEST(Highgui_WebP, encode_decode_lossy_webp)
for(int q = 100; q>=0; q-=10) for(int q = 100; q>=0; q-=10)
{ {
std::vector<int> params; std::vector<int> params;
params.push_back(CV_IMWRITE_WEBP_QUALITY); params.push_back(IMWRITE_WEBP_QUALITY);
params.push_back(q); params.push_back(q);
string output = cv::tempfile(".webp"); string output = cv::tempfile(".webp");

View File

@ -59,7 +59,7 @@ void Foo(int /*k*/, void* /*z*/) {}
void CV_HighGuiOnlyGuiTest::run( int /*start_from */) void CV_HighGuiOnlyGuiTest::run( int /*start_from */)
{ {
ts->printf(ts->LOG, "GUI 0\n"); ts->printf(ts->LOG, "GUI 0\n");
cvDestroyAllWindows(); destroyAllWindows();
ts->printf(ts->LOG, "GUI 1\n"); ts->printf(ts->LOG, "GUI 1\n");
namedWindow("Win"); namedWindow("Win");
@ -84,7 +84,7 @@ void CV_HighGuiOnlyGuiTest::run( int /*start_from */)
waitKey(500); waitKey(500);
ts->printf(ts->LOG, "GUI 8\n"); ts->printf(ts->LOG, "GUI 8\n");
cvDestroyAllWindows(); destroyAllWindows();
ts->set_failed_test_info(cvtest::TS::OK); ts->set_failed_test_info(cvtest::TS::OK);
} }

View File

@ -41,7 +41,7 @@
//M*/ //M*/
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "opencv2/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include <stdio.h> #include <stdio.h>
using namespace cv; using namespace cv;
@ -88,7 +88,7 @@ CV_VideoRandomPositioningTest::~CV_VideoRandomPositioningTest() {}
void CV_VideoPositioningTest::generate_idx_seq(CvCapture* cap, int method) void CV_VideoPositioningTest::generate_idx_seq(CvCapture* cap, int method)
{ {
idx.clear(); idx.clear();
int N = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT); int N = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT);
switch(method) switch(method)
{ {
case PROGRESSIVE: case PROGRESSIVE:
@ -147,7 +147,7 @@ void CV_VideoPositioningTest::run_test(int method)
failed_videos++; continue; failed_videos++; continue;
} }
cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, 0); cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, 0);
generate_idx_seq(cap, method); generate_idx_seq(cap, method);
@ -157,7 +157,7 @@ void CV_VideoPositioningTest::run_test(int method)
{ {
bool flag = false; bool flag = false;
cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, idx.at(j)); cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, idx.at(j));
/* IplImage* frame = cvRetrieveFrame(cap); /* IplImage* frame = cvRetrieveFrame(cap);
@ -173,7 +173,7 @@ void CV_VideoPositioningTest::run_test(int method)
flag = !flag; flag = !flag;
} */ } */
int val = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES); int val = (int)cvGetCaptureProperty(cap, CAP_PROP_POS_FRAMES);
if (idx.at(j) != val) if (idx.at(j) != val)
{ {

View File

@ -12,6 +12,7 @@
#include <iostream> #include <iostream>
#include "opencv2/ts.hpp" #include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/private.hpp" #include "opencv2/core/private.hpp"

View File

@ -41,7 +41,7 @@
//M*/ //M*/
#include "test_precomp.hpp" #include "test_precomp.hpp"
#include "opencv2/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
using namespace cv; using namespace cv;
using namespace std; using namespace std;
@ -56,15 +56,15 @@ string fourccToString(int fourcc)
const VideoFormat g_specific_fmt_list[] = const VideoFormat g_specific_fmt_list[] =
{ {
VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')), VideoFormat("avi", VideoWriter::fourcc('X', 'V', 'I', 'D')),
VideoFormat("avi", CV_FOURCC('M', 'P', 'E', 'G')), VideoFormat("avi", VideoWriter::fourcc('M', 'P', 'E', 'G')),
VideoFormat("avi", CV_FOURCC('M', 'J', 'P', 'G')), VideoFormat("avi", VideoWriter::fourcc('M', 'J', 'P', 'G')),
//VideoFormat("avi", CV_FOURCC('I', 'Y', 'U', 'V')), //VideoFormat("avi", VideoWriter::fourcc('I', 'Y', 'U', 'V')),
VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')), VideoFormat("mkv", VideoWriter::fourcc('X', 'V', 'I', 'D')),
VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')), VideoFormat("mkv", VideoWriter::fourcc('M', 'P', 'E', 'G')),
VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')), VideoFormat("mkv", VideoWriter::fourcc('M', 'J', 'P', 'G')),
VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')), VideoFormat("mov", VideoWriter::fourcc('m', 'p', '4', 'v')),
VideoFormat() VideoFormat()
}; };
@ -416,7 +416,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
for( size_t i = 0; i < IMAGE_COUNT; ++i ) for( size_t i = 0; i < IMAGE_COUNT; ++i )
{ {
string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i); string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i);
Mat img = imread(file_path, CV_LOAD_IMAGE_COLOR); Mat img = imread(file_path, IMREAD_COLOR);
if (img.empty()) if (img.empty())
{ {
@ -442,7 +442,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
writer.release(); writer.release();
VideoCapture cap(video_file); VideoCapture cap(video_file);
size_t FRAME_COUNT = (size_t)cap.get(CV_CAP_PROP_FRAME_COUNT); size_t FRAME_COUNT = (size_t)cap.get(CAP_PROP_FRAME_COUNT);
if (FRAME_COUNT != IMAGE_COUNT ) if (FRAME_COUNT != IMAGE_COUNT )
{ {

View File

@ -110,9 +110,9 @@ public:
return; return;
} }
int N0 = (int)cap.get(CV_CAP_PROP_FRAME_COUNT); int N0 = (int)cap.get(CAP_PROP_FRAME_COUNT);
cap.set(CV_CAP_PROP_POS_FRAMES, 0); cap.set(CAP_PROP_POS_FRAMES, 0);
int N = (int)cap.get(CV_CAP_PROP_FRAME_COUNT); int N = (int)cap.get(CAP_PROP_FRAME_COUNT);
if (N != n_frames || N != N0) if (N != n_frames || N != N0)
{ {
@ -125,14 +125,14 @@ public:
{ {
int idx = theRNG().uniform(0, N); int idx = theRNG().uniform(0, N);
if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) ) if( !cap.set(CAP_PROP_POS_FRAMES, idx) )
{ {
ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx); ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT); ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
return; return;
} }
int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES); int idx1 = (int)cap.get(CAP_PROP_POS_FRAMES);
Mat img; cap >> img; Mat img; cap >> img;
Mat img0 = drawFrame(idx); Mat img0 = drawFrame(idx);

View File

@ -1687,8 +1687,8 @@ TEST(Imgproc_ColorBayer, regression)
{ {
cvtest::TS* ts = cvtest::TS::ptr(); cvtest::TS* ts = cvtest::TS::ptr();
Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE); Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", CV_LOAD_IMAGE_UNCHANGED); Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", IMREAD_UNCHANGED);
Mat result; Mat result;
CV_Assert(given.data != NULL && gold.data != NULL); CV_Assert(given.data != NULL && gold.data != NULL);
@ -1709,9 +1709,9 @@ TEST(Imgproc_ColorBayerVNG, regression)
{ {
cvtest::TS* ts = cvtest::TS::ptr(); cvtest::TS* ts = cvtest::TS::ptr();
Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE); Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
string goldfname = string(ts->get_data_path()) + "/cvtcolor/bayerVNG_gold.png"; string goldfname = string(ts->get_data_path()) + "/cvtcolor/bayerVNG_gold.png";
Mat gold = imread(goldfname, CV_LOAD_IMAGE_UNCHANGED); Mat gold = imread(goldfname, IMREAD_UNCHANGED);
Mat result; Mat result;
CV_Assert(given.data != NULL); CV_Assert(given.data != NULL);
@ -1804,7 +1804,7 @@ TEST(Imgproc_ColorBayerVNG_Strict, regression)
Mat src, dst, bayer, reference; Mat src, dst, bayer, reference;
std::string full_path = parent_path + image_name; std::string full_path = parent_path + image_name;
src = imread(full_path, CV_LOAD_IMAGE_UNCHANGED); src = imread(full_path, IMREAD_UNCHANGED);
if (src.data == NULL) if (src.data == NULL)
{ {
@ -1824,7 +1824,7 @@ TEST(Imgproc_ColorBayerVNG_Strict, regression)
// reading a reference image // reading a reference image
full_path = parent_path + pattern[i] + image_name; full_path = parent_path + pattern[i] + image_name;
reference = imread(full_path, CV_LOAD_IMAGE_UNCHANGED); reference = imread(full_path, IMREAD_UNCHANGED);
if (reference.data == NULL) if (reference.data == NULL)
{ {
imwrite(full_path, dst); imwrite(full_path, dst);
@ -2091,7 +2091,7 @@ TEST(ImgProc_BayerEdgeAwareDemosaicing, accuracy)
Mat src, bayer; Mat src, bayer;
std::string full_path = parent_path + image_name; std::string full_path = parent_path + image_name;
src = imread(full_path, CV_LOAD_IMAGE_UNCHANGED); src = imread(full_path, IMREAD_UNCHANGED);
if (src.data == NULL) if (src.data == NULL)
{ {
@ -2141,7 +2141,7 @@ TEST(ImgProc_BayerEdgeAwareDemosaicing, accuracy)
TEST(ImgProc_Bayer2RGBA, accuracy) TEST(ImgProc_Bayer2RGBA, accuracy)
{ {
cvtest::TS* ts = cvtest::TS::ptr(); cvtest::TS* ts = cvtest::TS::ptr();
Mat raw = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE); Mat raw = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
Mat rgb, reference; Mat rgb, reference;
CV_Assert(raw.channels() == 1); CV_Assert(raw.channels() == 1);

View File

@ -143,8 +143,8 @@ TEST(Imgproc_GrabCut, repeatability)
{ {
cvtest::TS& ts = *cvtest::TS::ptr(); cvtest::TS& ts = *cvtest::TS::ptr();
Mat image_1 = imread(string(ts.get_data_path()) + "grabcut/image1652.ppm", CV_LOAD_IMAGE_COLOR); Mat image_1 = imread(string(ts.get_data_path()) + "grabcut/image1652.ppm", IMREAD_COLOR);
Mat mask_1 = imread(string(ts.get_data_path()) + "grabcut/mask1652.ppm", CV_LOAD_IMAGE_GRAYSCALE); Mat mask_1 = imread(string(ts.get_data_path()) + "grabcut/mask1652.ppm", IMREAD_GRAYSCALE);
Rect roi_1(0, 0, 150, 150); Rect roi_1(0, 0, 150, 150);
Mat image_2 = image_1.clone(); Mat image_2 = image_1.clone();

View File

@ -12,8 +12,8 @@
#include <iostream> #include <iostream>
#include "opencv2/ts.hpp" #include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui.hpp" #include "opencv2/highgui.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#endif #endif

View File

@ -4,7 +4,6 @@
#include "opencv2/opencv_modules.hpp" #include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/highgui.hpp" #include "opencv2/highgui.hpp"
using namespace cv; using namespace cv;
@ -394,7 +393,7 @@ JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_n_1getSupportedPr
VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL
union {double prop; const char* name;} u; union {double prop; const char* name;} u;
u.prop = me->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING); u.prop = me->get(CAP_PROP_ANDROID_PREVIEW_SIZES_STRING);
return env->NewStringUTF(u.name); return env->NewStringUTF(u.name);
} catch(cv::Exception e) { } catch(cv::Exception e) {
@ -432,4 +431,4 @@ JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1delete
} // extern "C" } // extern "C"
#endif // HAVE_OPENCV_HIGHGUI #endif // HAVE_OPENCV_HIGHGUI

View File

@ -669,7 +669,7 @@ namespace cv{
cvConvertScale(m_samples[i], patch, 255/maxval); cvConvertScale(m_samples[i], patch, 255/maxval);
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
cvSaveImage(buf, patch); cv::imwrite(buf, cv::cvarrToMat(patch));
#else #else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support"); CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support");
#endif #endif
@ -1801,17 +1801,16 @@ namespace cv{
sprintf(filename, "%s/%s", path, imagename); sprintf(filename, "%s/%s", path, imagename);
//printf("Reading image %s...", filename); //printf("Reading image %s...", filename);
IplImage* img = 0; IplImage img;
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
img = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE); Mat img2 = cv::imread(filename, IMREAD_GRAYSCALE);
img = img2;
#else #else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support"); CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support");
#endif #endif
//printf("done\n"); //printf("done\n");
extractPatches (img, patches, patch_size); extractPatches (&img, patches, patch_size);
cvReleaseImage(&img);
} }
fclose(pFile); fclose(pFile);
} }

View File

@ -389,7 +389,7 @@ int showRootFilterBoxes(IplImage *image,
color, thickness, line_type, shift); color, thickness, line_type, shift);
} }
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
cvShowImage("Initial image", image); cv::imshow("Initial image", cv::cvarrToMat(image));
#endif #endif
return LATENT_SVM_OK; return LATENT_SVM_OK;
} }
@ -445,7 +445,7 @@ int showPartFilterBoxes(IplImage *image,
} }
} }
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
cvShowImage("Initial image", image); cv::imshow("Initial image", cv::cvarrToMat(image));
#endif #endif
return LATENT_SVM_OK; return LATENT_SVM_OK;
} }
@ -481,7 +481,7 @@ int showBoxes(IplImage *img,
color, thickness, line_type, shift); color, thickness, line_type, shift);
} }
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
cvShowImage("Initial image", img); cv::imshow("Initial image", cv::cvarrToMat(img));
#endif #endif
return LATENT_SVM_OK; return LATENT_SVM_OK;
} }

View File

@ -82,8 +82,9 @@ void CV_LatentSVMDetectorTest::run( int /* start_from */)
init.initialize(numThreads); init.initialize(numThreads);
#endif #endif
IplImage* image = cvLoadImage(img_path.c_str()); Mat image2 = cv::imread(img_path.c_str());
if (!image) IplImage image = image2;
if (image2.empty())
{ {
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return; return;
@ -93,13 +94,12 @@ void CV_LatentSVMDetectorTest::run( int /* start_from */)
if (!detector) if (!detector)
{ {
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
cvReleaseImage(&image);
return; return;
} }
CvMemStorage* storage = cvCreateMemStorage(0); CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* detections = 0; CvSeq* detections = 0;
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads); detections = cvLatentSvmDetectObjects(&image, detector, storage, 0.5f, numThreads);
if (detections->total != num_detections) if (detections->total != num_detections)
{ {
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH ); ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
@ -124,7 +124,6 @@ void CV_LatentSVMDetectorTest::run( int /* start_from */)
#endif #endif
cvReleaseMemStorage( &storage ); cvReleaseMemStorage( &storage );
cvReleaseLatentSvmDetector( &detector ); cvReleaseLatentSvmDetector( &detector );
cvReleaseImage( &image );
} }
// Test for c++ version of Latent SVM // Test for c++ version of Latent SVM

View File

@ -47,7 +47,7 @@
///////////// Canny //////////////////////// ///////////// Canny ////////////////////////
TEST(Canny) TEST(Canny)
{ {
Mat img = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE);
if (img.empty()) if (img.empty())
{ {

View File

@ -84,7 +84,7 @@ public:
} }
TEST(Haar) TEST(Haar)
{ {
Mat img = imread(abspath("basketball1.png"), CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread(abspath("basketball1.png"), IMREAD_GRAYSCALE);
if (img.empty()) if (img.empty())
{ {

View File

@ -62,8 +62,8 @@ TEST(Photo_DenoisingGrayscale, regression)
string original_path = folder + "lena_noised_gaussian_sigma=10.png"; string original_path = folder + "lena_noised_gaussian_sigma=10.png";
string expected_path = folder + "lena_noised_denoised_grayscale_tw=7_sw=21_h=10.png"; string expected_path = folder + "lena_noised_denoised_grayscale_tw=7_sw=21_h=10.png";
Mat original = imread(original_path, CV_LOAD_IMAGE_GRAYSCALE); Mat original = imread(original_path, IMREAD_GRAYSCALE);
Mat expected = imread(expected_path, CV_LOAD_IMAGE_GRAYSCALE); Mat expected = imread(expected_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path; ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path;
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
@ -82,8 +82,8 @@ TEST(Photo_DenoisingColored, regression)
string original_path = folder + "lena_noised_gaussian_sigma=10.png"; string original_path = folder + "lena_noised_gaussian_sigma=10.png";
string expected_path = folder + "lena_noised_denoised_lab12_tw=7_sw=21_h=10_h2=10.png"; string expected_path = folder + "lena_noised_denoised_lab12_tw=7_sw=21_h=10_h2=10.png";
Mat original = imread(original_path, CV_LOAD_IMAGE_COLOR); Mat original = imread(original_path, IMREAD_COLOR);
Mat expected = imread(expected_path, CV_LOAD_IMAGE_COLOR); Mat expected = imread(expected_path, IMREAD_COLOR);
ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path; ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path;
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
@ -102,14 +102,14 @@ TEST(Photo_DenoisingGrayscaleMulti, regression)
string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/"; string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/";
string expected_path = folder + "lena_noised_denoised_multi_tw=7_sw=21_h=15.png"; string expected_path = folder + "lena_noised_denoised_multi_tw=7_sw=21_h=15.png";
Mat expected = imread(expected_path, CV_LOAD_IMAGE_GRAYSCALE); Mat expected = imread(expected_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
vector<Mat> original(imgs_count); vector<Mat> original(imgs_count);
for (int i = 0; i < imgs_count; i++) for (int i = 0; i < imgs_count; i++)
{ {
string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i); string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i);
original[i] = imread(original_path, CV_LOAD_IMAGE_GRAYSCALE); original[i] = imread(original_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path; ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path;
} }
@ -127,14 +127,14 @@ TEST(Photo_DenoisingColoredMulti, regression)
string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/"; string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/";
string expected_path = folder + "lena_noised_denoised_multi_lab12_tw=7_sw=21_h=10_h2=15.png"; string expected_path = folder + "lena_noised_denoised_multi_lab12_tw=7_sw=21_h=10_h2=15.png";
Mat expected = imread(expected_path, CV_LOAD_IMAGE_COLOR); Mat expected = imread(expected_path, IMREAD_COLOR);
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
vector<Mat> original(imgs_count); vector<Mat> original(imgs_count);
for (int i = 0; i < imgs_count; i++) for (int i = 0; i < imgs_count; i++)
{ {
string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i); string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i);
original[i] = imread(original_path, CV_LOAD_IMAGE_COLOR); original[i] = imread(original_path, IMREAD_COLOR);
ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path; ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path;
} }

View File

@ -23,6 +23,8 @@
#include "opencv2/photo.hpp" #include "opencv2/photo.hpp"
#include "opencv2/highgui.hpp" #include "opencv2/highgui.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/opencv_modules.hpp" #include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE #ifdef HAVE_OPENCV_NONFREE

View File

@ -1,5 +1,6 @@
#include "opencv2/legacy.hpp" #include "opencv2/legacy.hpp"
#include "opencv2/legacy/compat.hpp" #include "opencv2/legacy/compat.hpp"
#include "opencv2/highgui/highgui_c.h"
#define OLD_MODULESTR "cv2.cv" #define OLD_MODULESTR "cv2.cv"

View File

@ -211,6 +211,7 @@ gen_template_rw_prop_init = Template("""
simple_argtype_mapping = { simple_argtype_mapping = {
"bool": ("bool", "b", "0"), "bool": ("bool", "b", "0"),
"char": ("char", "b", "0"),
"int": ("int", "i", "0"), "int": ("int", "i", "0"),
"float": ("float", "f", "0.f"), "float": ("float", "f", "0.f"),
"double": ("double", "d", "0"), "double": ("double", "d", "0"),

View File

@ -345,7 +345,7 @@ _exit_:
if( code < 0 ) if( code < 0 )
{ {
#if defined _DEBUG && defined WIN32 #if 0 //defined _DEBUG && defined WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 ); IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 ); cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT ); cvCmpS( img, 0, img, CV_CMP_GT );
@ -484,7 +484,7 @@ _exit_:
if( code < 0 ) if( code < 0 )
{ {
#if defined _DEBUG && defined WIN32 #if 0// defined _DEBUG && defined WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 ); IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 ); cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT ); cvCmpS( img, 0, img, CV_CMP_GT );

View File

@ -72,8 +72,9 @@ void CV_OptFlowPyrLKTest::run( int )
CvMat *_u = 0, *_v = 0, *_v2 = 0; CvMat *_u = 0, *_v = 0, *_v2 = 0;
char* status = 0; char* status = 0;
IplImage* imgI = 0; IplImage imgI;
IplImage* imgJ = 0; IplImage imgJ;
cv::Mat imgI2, imgJ2;
int n = 0, i = 0; int n = 0, i = 0;
@ -115,9 +116,10 @@ void CV_OptFlowPyrLKTest::run( int )
/* read first image */ /* read first image */
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_1.bmp" ); sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_1.bmp" );
imgI = cvLoadImage( filename, -1 ); imgI2 = cv::imread( filename, cv::IMREAD_UNCHANGED );
imgI = imgI2;
if( !imgI ) if( imgI2.empty() )
{ {
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename ); ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA; code = cvtest::TS::FAIL_MISSING_TEST_DATA;
@ -126,9 +128,10 @@ void CV_OptFlowPyrLKTest::run( int )
/* read second image */ /* read second image */
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_2.bmp" ); sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_2.bmp" );
imgJ = cvLoadImage( filename, -1 ); imgJ2 = cv::imread( filename, cv::IMREAD_UNCHANGED );
imgJ = imgJ2;
if( !imgJ ) if( imgJ2.empty() )
{ {
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename ); ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA; code = cvtest::TS::FAIL_MISSING_TEST_DATA;
@ -139,7 +142,7 @@ void CV_OptFlowPyrLKTest::run( int )
status = (char*)cvAlloc(n*sizeof(status[0])); status = (char*)cvAlloc(n*sizeof(status[0]));
/* calculate flow */ /* calculate flow */
cvCalcOpticalFlowPyrLK( imgI, imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ), cvCalcOpticalFlowPyrLK( &imgI, &imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ),
4, status, 0, cvTermCriteria( CV_TERMCRIT_ITER| 4, status, 0, cvTermCriteria( CV_TERMCRIT_ITER|
CV_TERMCRIT_EPS, 30, 0.01f ), 0 ); CV_TERMCRIT_EPS, 30, 0.01f ), 0 );
@ -201,9 +204,6 @@ _exit_:
cvReleaseMat( &_v ); cvReleaseMat( &_v );
cvReleaseMat( &_v2 ); cvReleaseMat( &_v2 );
cvReleaseImage( &imgI );
cvReleaseImage( &imgJ );
if( code < 0 ) if( code < 0 )
ts->set_failed_test_info( code ); ts->set_failed_test_info( code );
} }

View File

@ -84,10 +84,10 @@ public:
} }
#ifdef HAVE_OPENCV_HIGHGUI #ifdef HAVE_OPENCV_HIGHGUI
int width() {return static_cast<int>(vc.get(CV_CAP_PROP_FRAME_WIDTH));} int width() {return static_cast<int>(vc.get(CAP_PROP_FRAME_WIDTH));}
int height() {return static_cast<int>(vc.get(CV_CAP_PROP_FRAME_HEIGHT));} int height() {return static_cast<int>(vc.get(CAP_PROP_FRAME_HEIGHT));}
int count() {return static_cast<int>(vc.get(CV_CAP_PROP_FRAME_COUNT));} int count() {return static_cast<int>(vc.get(CAP_PROP_FRAME_COUNT));}
double fps() {return vc.get(CV_CAP_PROP_FPS);} double fps() {return vc.get(CAP_PROP_FPS);}
#else #else
int width() {return 0;} int width() {return 0;}
int height() {return 0;} int height() {return 0;}

View File

@ -40,7 +40,7 @@
#include <cstring> #include <cstring>
#include <ctime> #include <ctime>
#include "opencv2/contrib/contrib.hpp" #include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
static void help(char **argv) static void help(char **argv)
{ {

View File

@ -24,7 +24,7 @@
#include "opencv2/core/utility.hpp" #include "opencv2/core/utility.hpp"
#include "opencv2/video/background_segm.hpp" #include "opencv2/video/background_segm.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy.hpp" #include "opencv2/legacy.hpp"
#include <stdio.h> #include <stdio.h>

View File

@ -1,7 +1,7 @@
#include "opencv2/video/background_segm.hpp" #include "opencv2/video/background_segm.hpp"
#include "opencv2/legacy/blobtrack.hpp" #include "opencv2/legacy/blobtrack.hpp"
#include "opencv2/legacy/legacy.hpp" #include "opencv2/legacy/legacy.hpp"
#include "opencv2/highgui/highgui.hpp" #include <opencv2/highgui/highgui_c.h>
#include <opencv2/imgproc/imgproc_c.h> #include <opencv2/imgproc/imgproc_c.h>
#include <stdio.h> #include <stdio.h>

View File

@ -1,5 +1,5 @@
#include "opencv2/objdetect/objdetect.hpp" #include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include <ctype.h> #include <ctype.h>
#include <stdio.h> #include <stdio.h>

View File

@ -1,8 +1,10 @@
#include "opencv2/objdetect/objdetect.hpp" #include "opencv2/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp" #include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui_c.h"
#include <cctype> #include <cctype>
#include <iostream> #include <iostream>
#include <iterator> #include <iterator>

View File

@ -1,5 +1,5 @@
#include "opencv2/video/tracking.hpp" #include "opencv2/video/tracking.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h> #include <stdio.h>

View File

@ -6,10 +6,10 @@
*/ */
#include "opencv2/objdetect/objdetect.hpp" #include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp" #include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp" #include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp" #include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy/legacy.hpp" #include "opencv2/legacy/legacy.hpp"
#include "opencv2/legacy/compat.hpp" #include "opencv2/legacy/compat.hpp"

View File

@ -61,7 +61,7 @@ static void trainCalonderClassifier( const string& classifierFilename, const str
string str; string str;
getline( is, str ); getline( is, str );
if (str.empty()) break; if (str.empty()) break;
Mat img = imread( str, CV_LOAD_IMAGE_GRAYSCALE ); Mat img = imread( str, IMREAD_GRAYSCALE );
if( !img.empty() ) if( !img.empty() )
trainImgs.push_back( img ); trainImgs.push_back( img );
} }
@ -106,7 +106,7 @@ static void trainCalonderClassifier( const string& classifierFilename, const str
*/ */
static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename ) static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{ {
Mat img1 = imread( imgFilename, CV_LOAD_IMAGE_GRAYSCALE ), img2, H12; Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12;
if( img1.empty() ) if( img1.empty() )
{ {
cout << "Test image can not be read." << endl; cout << "Test image can not be read." << endl;

View File

@ -32,8 +32,8 @@ int main(int argc, char** argv)
help(); help();
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE ); Mat object = imread( object_filename, IMREAD_GRAYSCALE );
Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE ); Mat scene = imread( scene_filename, IMREAD_GRAYSCALE );
if( !object.data || !scene.data ) if( !object.data || !scene.data )
{ {
@ -47,9 +47,9 @@ int main(int argc, char** argv)
resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC); resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
cvNamedWindow("Object", 1); namedWindow("Object", 1);
cvNamedWindow("Image", 1); namedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1); namedWindow("Object Correspondence", 1);
Size patchSize(32, 32); Size patchSize(32, 32);
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2); LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);

View File

@ -1,5 +1,5 @@
#include "opencv2/objdetect/objdetect.hpp" #include "opencv2/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include <stdio.h> #include <stdio.h>
#ifdef HAVE_CVCONFIG_H #ifdef HAVE_CVCONFIG_H

View File

@ -1,4 +1,4 @@
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h> #include <stdio.h>

View File

@ -1,5 +1,5 @@
#include "opencv2/video/tracking.hpp" #include "opencv2/video/tracking.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc_c.h"
#include <time.h> #include <time.h>
#include <stdio.h> #include <stdio.h>

View File

@ -49,8 +49,8 @@ int main(int argc, char** argv)
std::string img2_name = path_name + "/" + std::string(argv[3]); std::string img2_name = path_name + "/" + std::string(argv[3]);
printf("Reading the images...\n"); printf("Reading the images...\n");
Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE); Mat img1 = imread(img1_name, IMREAD_GRAYSCALE);
Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE); Mat img2 = imread(img2_name, IMREAD_GRAYSCALE);
// extract keypoints from the first image // extract keypoints from the first image
SURF surf_extractor(5.0e3); SURF surf_extractor(5.0e3);

View File

@ -1,8 +1,10 @@
#include "opencv2/objdetect/objdetect.hpp" #include "opencv2/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp" #include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp" #include "opencv2/core/utility.hpp"
#include "opencv2/highgui/highgui_c.h"
#include <cctype> #include <cctype>
#include <iostream> #include <iostream>
#include <iterator> #include <iterator>

View File

@ -4,7 +4,13 @@
#include <iostream> #include <iostream>
#include <vector> #include <vector>
#include <opencv/highgui.h>
#include <opencv2/core/core_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/legacy/compat.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/highgui.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE #if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h> #include <windows.h>
@ -20,9 +26,6 @@
#include <GL/gl.h> #include <GL/gl.h>
#endif #endif
#include <opencv/cxcore.h>
#include <opencv/cv.h>
using namespace std; using namespace std;
using namespace cv; using namespace cv;
@ -224,19 +227,20 @@ static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMat
int main(void) int main(void)
{ {
help(); help();
CvCapture* video = cvCaptureFromFile("cube4.avi"); VideoCapture video("cube4.avi");
CV_Assert(video); CV_Assert(video.isOpened());
IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3); Mat frame; video >> frame;
IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);
cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO); IplImage* grayImage = cvCreateImage(frame.size(),8,1);
cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000); displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug //For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE); //cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
cvSetOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix); setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints; vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints); initPOSIT(&modelPoints);
@ -251,26 +255,27 @@ int main(void)
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0)); vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
while(cvWaitKey(33) != 27) while(waitKey(33) != 27)
{ {
source=cvQueryFrame(video); video >> frame;
cvShowImage("original",source); imshow("original", frame);
foundCorners(&srcImagePoints,source,grayImage); IplImage source = frame;
foundCorners(&srcImagePoints, &source, grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector ); cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector); createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
cvShowImage("POSIT",source); imshow("POSIT", frame);
//For debug //For debug
//cvShowImage("tempGray",grayImage); //cvShowImage("tempGray",grayImage);
if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99) if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0); video.set(CAP_PROP_POS_AVI_RATIO, 0);
} }
cvDestroyAllWindows(); destroyAllWindows();
cvReleaseImage(&grayImage); cvReleaseImage(&grayImage);
cvReleaseCapture(&video); video.release();
cvReleasePOSITObject(&positObject); cvReleasePOSITObject(&positObject);
return 0; return 0;

View File

@ -47,10 +47,10 @@ int main(int argc, const char** argv)
return -1; return -1;
} }
namedWindow("image", CV_WINDOW_NORMAL); namedWindow("image", WINDOW_NORMAL);
namedWindow("foreground mask", CV_WINDOW_NORMAL); namedWindow("foreground mask", WINDOW_NORMAL);
namedWindow("foreground image", CV_WINDOW_NORMAL); namedWindow("foreground image", WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL); namedWindow("mean background image", WINDOW_NORMAL);
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2(); Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();

View File

@ -66,8 +66,8 @@ int main(int argc, const char ** argv)
string im1_name = parser.get<string>(0); string im1_name = parser.get<string>(0);
string im2_name = parser.get<string>(1); string im2_name = parser.get<string>(1);
Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE); Mat im1 = imread(im1_name, IMREAD_GRAYSCALE);
Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE); Mat im2 = imread(im2_name, IMREAD_GRAYSCALE);
if (im1.empty() || im2.empty()) if (im1.empty() || im2.empty())
{ {

View File

@ -120,7 +120,7 @@ int main()
imshow("Current chessboard", boards[i]); waitKey(1000); imshow("Current chessboard", boards[i]); waitKey(1000);
} }
cout << "Done" << endl; cout << "Done" << endl;
cvDestroyAllWindows(); destroyAllWindows();
Mat camMat_est; Mat camMat_est;
Mat distCoeffs_est; Mat distCoeffs_est;

View File

@ -33,12 +33,12 @@ static void onMouse( int event, int x, int y, int, void* )
switch( event ) switch( event )
{ {
case CV_EVENT_LBUTTONDOWN: case EVENT_LBUTTONDOWN:
origin = Point(x,y); origin = Point(x,y);
selection = Rect(x,y,0,0); selection = Rect(x,y,0,0);
selectObject = true; selectObject = true;
break; break;
case CV_EVENT_LBUTTONUP: case EVENT_LBUTTONUP:
selectObject = false; selectObject = false;
if( selection.width > 0 && selection.height > 0 ) if( selection.width > 0 && selection.height > 0 )
trackObject = -1; trackObject = -1;

View File

@ -28,7 +28,7 @@ static void on_trackbar(int, void*)
Mat cnt_img = Mat::zeros(w, w, CV_8UC3); Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
int _levels = levels - 3; int _levels = levels - 3;
drawContours( cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar(128,255,255), drawContours( cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar(128,255,255),
3, CV_AA, hierarchy, std::abs(_levels) ); 3, LINE_AA, hierarchy, std::abs(_levels) );
imshow("contours", cnt_img); imshow("contours", cnt_img);
} }

View File

@ -41,7 +41,7 @@ int main( int /*argc*/, char** /*argv*/ )
img = Scalar::all(0); img = Scalar::all(0);
for( i = 0; i < count; i++ ) for( i = 0; i < count; i++ )
circle(img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA); circle(img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA);
int hullcount = (int)hull.size(); int hullcount = (int)hull.size();
Point pt0 = points[hull[hullcount-1]]; Point pt0 = points[hull[hullcount-1]];
@ -49,7 +49,7 @@ int main( int /*argc*/, char** /*argv*/ )
for( i = 0; i < hullcount; i++ ) for( i = 0; i < hullcount; i++ )
{ {
Point pt = points[hull[i]]; Point pt = points[hull[i]];
line(img, pt0, pt, Scalar(0, 255, 0), 1, CV_AA); line(img, pt0, pt, Scalar(0, 255, 0), 1,LINE_AA);
pt0 = pt; pt0 = pt;
} }

View File

@ -84,7 +84,7 @@ int main(int , char** )
imshow(WindowName, ReferenceFrame); imshow(WindowName, ReferenceFrame);
if (cvWaitKey(30) >= 0) break; if (waitKey(30) >= 0) break;
} }
Detector.stop(); Detector.stop();

View File

@ -18,7 +18,7 @@ static void help()
static void draw_subdiv_point( Mat& img, Point2f fp, Scalar color ) static void draw_subdiv_point( Mat& img, Point2f fp, Scalar color )
{ {
circle( img, fp, 3, color, CV_FILLED, 8, 0 ); circle( img, fp, 3, color, FILLED, LINE_8, 0 );
} }
static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color ) static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color )
@ -34,9 +34,9 @@ static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color )
pt[0] = Point(cvRound(t[0]), cvRound(t[1])); pt[0] = Point(cvRound(t[0]), cvRound(t[1]));
pt[1] = Point(cvRound(t[2]), cvRound(t[3])); pt[1] = Point(cvRound(t[2]), cvRound(t[3]));
pt[2] = Point(cvRound(t[4]), cvRound(t[5])); pt[2] = Point(cvRound(t[4]), cvRound(t[5]));
line(img, pt[0], pt[1], delaunay_color, 1, CV_AA, 0); line(img, pt[0], pt[1], delaunay_color, 1, LINE_AA, 0);
line(img, pt[1], pt[2], delaunay_color, 1, CV_AA, 0); line(img, pt[1], pt[2], delaunay_color, 1, LINE_AA, 0);
line(img, pt[2], pt[0], delaunay_color, 1, CV_AA, 0); line(img, pt[2], pt[0], delaunay_color, 1, LINE_AA, 0);
} }
#else #else
vector<Vec4f> edgeList; vector<Vec4f> edgeList;
@ -46,7 +46,7 @@ static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color )
Vec4f e = edgeList[i]; Vec4f e = edgeList[i];
Point pt0 = Point(cvRound(e[0]), cvRound(e[1])); Point pt0 = Point(cvRound(e[0]), cvRound(e[1]));
Point pt1 = Point(cvRound(e[2]), cvRound(e[3])); Point pt1 = Point(cvRound(e[2]), cvRound(e[3]));
line(img, pt0, pt1, delaunay_color, 1, CV_AA, 0); line(img, pt0, pt1, delaunay_color, 1, LINE_AA, 0);
} }
#endif #endif
} }
@ -64,7 +64,7 @@ static void locate_point( Mat& img, Subdiv2D& subdiv, Point2f fp, Scalar active_
{ {
Point2f org, dst; Point2f org, dst;
if( subdiv.edgeOrg(e, &org) > 0 && subdiv.edgeDst(e, &dst) > 0 ) if( subdiv.edgeOrg(e, &org) > 0 && subdiv.edgeDst(e, &dst) > 0 )
line( img, org, dst, active_color, 3, CV_AA, 0 ); line( img, org, dst, active_color, 3, LINE_AA, 0 );
e = subdiv.getEdge(e, Subdiv2D::NEXT_AROUND_LEFT); e = subdiv.getEdge(e, Subdiv2D::NEXT_AROUND_LEFT);
} }
@ -97,8 +97,8 @@ static void paint_voronoi( Mat& img, Subdiv2D& subdiv )
fillConvexPoly(img, ifacet, color, 8, 0); fillConvexPoly(img, ifacet, color, 8, 0);
ifacets[0] = ifacet; ifacets[0] = ifacet;
polylines(img, ifacets, true, Scalar(), 1, CV_AA, 0); polylines(img, ifacets, true, Scalar(), 1, LINE_AA, 0);
circle(img, centers[i], 3, Scalar(), -1, CV_AA, 0); circle(img, centers[i], 3, Scalar(), FILLED, LINE_AA, 0);
} }
} }

View File

@ -44,7 +44,7 @@ static void updateBrightnessContrast( int /*arg*/, void* )
calcHist(&dst, 1, 0, Mat(), hist, 1, &histSize, 0); calcHist(&dst, 1, 0, Mat(), hist, 1, &histSize, 0);
Mat histImage = Mat::ones(200, 320, CV_8U)*255; Mat histImage = Mat::ones(200, 320, CV_8U)*255;
normalize(hist, hist, 0, histImage.rows, CV_MINMAX, CV_32F); normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, CV_32F);
histImage = Scalar::all(255); histImage = Scalar::all(255);
int binW = cvRound((double)histImage.cols/histSize); int binW = cvRound((double)histImage.cols/histSize);

View File

@ -27,7 +27,7 @@ int main(int argc, const char ** argv)
CommandLineParser parser(argc, argv, keys); CommandLineParser parser(argc, argv, keys);
string filename = parser.get<string>(0); string filename = parser.get<string>(0);
Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread(filename.c_str(), IMREAD_GRAYSCALE);
if( img.empty() ) if( img.empty() )
{ {
help(); help();
@ -74,7 +74,7 @@ int main(int argc, const char ** argv)
q2.copyTo(q1); q2.copyTo(q1);
tmp.copyTo(q2); tmp.copyTo(q2);
normalize(mag, mag, 0, 1, CV_MINMAX); normalize(mag, mag, 0, 1, NORM_MINMAX);
imshow("spectrum magnitude", mag); imshow("spectrum magnitude", mag);
waitKey(); waitKey();

View File

@ -130,7 +130,7 @@ int main( int argc, const char** argv )
// Call to update the view // Call to update the view
onTrackbar(0, 0); onTrackbar(0, 0);
int c = cvWaitKey(0) & 255; int c = waitKey() & 255;
if( c == 27 ) if( c == 27 )
break; break;

View File

@ -21,7 +21,7 @@ int main()
char wndname[] = "Drawing Demo"; char wndname[] = "Drawing Demo";
const int NUMBER = 100; const int NUMBER = 100;
const int DELAY = 5; const int DELAY = 5;
int lineType = CV_AA; // change it to 8 to see non-antialiased graphics int lineType = LINE_AA; // change it to LINE_8 to see non-antialiased graphics
int i, width = 1000, height = 700; int i, width = 1000, height = 700;
int x1 = -width/2, x2 = width*3/2, y1 = -height/2, y2 = height*3/2; int x1 = -width/2, x2 = width*3/2, y1 = -height/2, y2 = height*3/2;
RNG rng(0xFFFFFFFF); RNG rng(0xFFFFFFFF);
@ -157,14 +157,14 @@ int main()
return 0; return 0;
} }
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0); Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((width - textsize.width)/2, (height - textsize.height)/2); Point org((width - textsize.width)/2, (height - textsize.height)/2);
Mat image2; Mat image2;
for( i = 0; i < 255; i += 2 ) for( i = 0; i < 255; i += 2 )
{ {
image2 = image - Scalar::all(i); image2 = image - Scalar::all(i);
putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3, putText(image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType); Scalar(i, i, 255), 5, lineType);
imshow(wndname, image2); imshow(wndname, image2);

View File

@ -34,7 +34,7 @@ int newMaskVal = 255;
static void onMouse( int event, int x, int y, int, void* ) static void onMouse( int event, int x, int y, int, void* )
{ {
if( event != CV_EVENT_LBUTTONDOWN ) if( event != EVENT_LBUTTONDOWN )
return; return;
Point seed = Point(x,y); Point seed = Point(x,y);

View File

@ -82,12 +82,12 @@ void processImage(int /*h*/, void*)
continue; continue;
drawContours(cimage, contours, (int)i, Scalar::all(255), 1, 8); drawContours(cimage, contours, (int)i, Scalar::all(255), 1, 8);
ellipse(cimage, box, Scalar(0,0,255), 1, CV_AA); ellipse(cimage, box, Scalar(0,0,255), 1, LINE_AA);
ellipse(cimage, box.center, box.size*0.5f, box.angle, 0, 360, Scalar(0,255,255), 1, CV_AA); ellipse(cimage, box.center, box.size*0.5f, box.angle, 0, 360, Scalar(0,255,255), 1, LINE_AA);
Point2f vtx[4]; Point2f vtx[4];
box.points(vtx); box.points(vtx);
for( int j = 0; j < 4; j++ ) for( int j = 0; j < 4; j++ )
line(cimage, vtx[j], vtx[(j+1)%4], Scalar(0,255,0), 1, CV_AA); line(cimage, vtx[j], vtx[(j+1)%4], Scalar(0,255,0), 1, LINE_AA);
} }
imshow("result", cimage); imshow("result", cimage);

View File

@ -66,13 +66,13 @@ int main( int argc, char** argv ) {
} }
// Load images // Load images
Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE ); Mat imgA = imread(argv[1], IMREAD_GRAYSCALE );
if( !imgA.data ) { if( !imgA.data ) {
std::cout<< " --(!) Error reading image " << argv[1] << std::endl; std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
return -1; return -1;
} }
Mat imgB = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE ); Mat imgB = imread(argv[2], IMREAD_GRAYSCALE );
if( !imgA.data ) { if( !imgA.data ) {
std::cout << " --(!) Error reading image " << argv[2] << std::endl; std::cout << " --(!) Error reading image " << argv[2] << std::endl;
return -1; return -1;
@ -123,7 +123,7 @@ int main( int argc, char** argv ) {
Mat imgMatch; Mat imgMatch;
drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch); drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);
namedWindow("matches", CV_WINDOW_KEEPRATIO); namedWindow("matches", WINDOW_KEEPRATIO);
imshow("matches", imgMatch); imshow("matches", imgMatch);
waitKey(0); waitKey(0);
} }

View File

@ -40,8 +40,8 @@ int main(int argc, char** argv)
} }
//printf("Reading the images...\n"); //printf("Reading the images...\n");
Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE); Mat img1 = imread(img1_name, IMREAD_GRAYSCALE);
Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE); Mat img2 = imread(img2_name, IMREAD_GRAYSCALE);
// extract keypoints from the first image // extract keypoints from the first image
SURF surf_extractor(5.0e3); SURF surf_extractor(5.0e3);

View File

@ -33,13 +33,13 @@ const Scalar BLUE = Scalar(255,0,0);
const Scalar LIGHTBLUE = Scalar(255,255,160); const Scalar LIGHTBLUE = Scalar(255,255,160);
const Scalar GREEN = Scalar(0,255,0); const Scalar GREEN = Scalar(0,255,0);
const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY; const int BGD_KEY = EVENT_FLAG_CTRLKEY;
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; const int FGD_KEY = EVENT_FLAG_SHIFTKEY;
static void getBinMask( const Mat& comMask, Mat& binMask ) static void getBinMask( const Mat& comMask, Mat& binMask )
{ {
if( comMask.empty() || comMask.type()!=CV_8UC1 ) if( comMask.empty() || comMask.type()!=CV_8UC1 )
CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" ); CV_Error( Error::StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols ) if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )
binMask.create( comMask.size(), CV_8UC1 ); binMask.create( comMask.size(), CV_8UC1 );
binMask = comMask & 1; binMask = comMask & 1;
@ -132,7 +132,7 @@ void GCApplication::showImage() const
void GCApplication::setRectInMask() void GCApplication::setRectInMask()
{ {
assert( !mask.empty() ); CV_Assert( !mask.empty() );
mask.setTo( GC_BGD ); mask.setTo( GC_BGD );
rect.x = max(0, rect.x); rect.x = max(0, rect.x);
rect.y = max(0, rect.y); rect.y = max(0, rect.y);
@ -176,7 +176,7 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
// TODO add bad args check // TODO add bad args check
switch( event ) switch( event )
{ {
case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels case EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels
{ {
bool isb = (flags & BGD_KEY) != 0, bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0; isf = (flags & FGD_KEY) != 0;
@ -189,7 +189,7 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
lblsState = IN_PROCESS; lblsState = IN_PROCESS;
} }
break; break;
case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels case EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels
{ {
bool isb = (flags & BGD_KEY) != 0, bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0; isf = (flags & FGD_KEY) != 0;
@ -197,13 +197,13 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
prLblsState = IN_PROCESS; prLblsState = IN_PROCESS;
} }
break; break;
case CV_EVENT_LBUTTONUP: case EVENT_LBUTTONUP:
if( rectState == IN_PROCESS ) if( rectState == IN_PROCESS )
{ {
rect = Rect( Point(rect.x, rect.y), Point(x,y) ); rect = Rect( Point(rect.x, rect.y), Point(x,y) );
rectState = SET; rectState = SET;
setRectInMask(); setRectInMask();
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() ); CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
showImage(); showImage();
} }
if( lblsState == IN_PROCESS ) if( lblsState == IN_PROCESS )
@ -213,7 +213,7 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
showImage(); showImage();
} }
break; break;
case CV_EVENT_RBUTTONUP: case EVENT_RBUTTONUP:
if( prLblsState == IN_PROCESS ) if( prLblsState == IN_PROCESS )
{ {
setLblsInMask(flags, Point(x,y), true); setLblsInMask(flags, Point(x,y), true);
@ -221,11 +221,11 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
showImage(); showImage();
} }
break; break;
case CV_EVENT_MOUSEMOVE: case EVENT_MOUSEMOVE:
if( rectState == IN_PROCESS ) if( rectState == IN_PROCESS )
{ {
rect = Rect( Point(rect.x, rect.y), Point(x,y) ); rect = Rect( Point(rect.x, rect.y), Point(x,y) );
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() ); CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
showImage(); showImage();
} }
else if( lblsState == IN_PROCESS ) else if( lblsState == IN_PROCESS )
@ -296,15 +296,15 @@ int main( int argc, char** argv )
help(); help();
const string winName = "image"; const string winName = "image";
cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE ); namedWindow( winName.c_str(), WINDOW_AUTOSIZE );
cvSetMouseCallback( winName.c_str(), on_mouse, 0 ); setMouseCallback( winName.c_str(), on_mouse, 0 );
gcapp.setImageAndWinName( image, winName ); gcapp.setImageAndWinName( image, winName );
gcapp.showImage(); gcapp.showImage();
for(;;) for(;;)
{ {
int c = cvWaitKey(0); int c = waitKey();
switch( (char) c ) switch( (char) c )
{ {
case '\x1b': case '\x1b':
@ -331,6 +331,6 @@ int main( int argc, char** argv )
} }
exit_main: exit_main:
cvDestroyWindow( winName.c_str() ); destroyWindow( winName.c_str() );
return 0; return 0;
} }

View File

@ -37,8 +37,8 @@ int main(int argc, char** argv)
for( size_t i = 0; i < circles.size(); i++ ) for( size_t i = 0; i < circles.size(); i++ )
{ {
Vec3i c = circles[i]; Vec3i c = circles[i];
circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, CV_AA); circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA);
circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, CV_AA); circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA);
} }
imshow("detected circles", cimg); imshow("detected circles", cimg);

View File

@ -51,7 +51,7 @@ int main(int argc, char** argv)
for( size_t i = 0; i < lines.size(); i++ ) for( size_t i = 0; i < lines.size(); i++ )
{ {
Vec4i l = lines[i]; Vec4i l = lines[i];
line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA); line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, LINE_AA);
} }
#endif #endif
imshow("source", src); imshow("source", src);

View File

@ -47,12 +47,12 @@ static void onMouse(int event, int x, int y, int, void*) {
} }
switch (event) { switch (event) {
case CV_EVENT_LBUTTONDOWN: case EVENT_LBUTTONDOWN:
origin = Point(x, y); origin = Point(x, y);
selection = Rect(x, y, 0, 0); selection = Rect(x, y, 0, 0);
selectObject = true; selectObject = true;
break; break;
case CV_EVENT_LBUTTONUP: case EVENT_LBUTTONUP:
selectObject = false; selectObject = false;
trackObject = -1; trackObject = -1;
break; break;
@ -96,8 +96,8 @@ int main(int argc, char** argv)
return 0; return 0;
} }
cout << "Opened camera" << endl; cout << "Opened camera" << endl;
cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); cap.set(CAP_PROP_FRAME_WIDTH, 640);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); cap.set(CAP_PROP_FRAME_HEIGHT, 480);
cap >> frame; cap >> frame;
} }
@ -137,7 +137,7 @@ int main(int argc, char** argv)
int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]); int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
CV_Assert(values_read == 5); CV_Assert(values_read == 5);
sprintf(img_file, "seqG/%04d.png", i); sprintf(img_file, "seqG/%04d.png", i);
image = imread(img_file, CV_LOAD_IMAGE_COLOR); image = imread(img_file, IMREAD_COLOR);
if (image.empty()) if (image.empty())
break; break;
selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows), selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),

View File

@ -1,9 +1,9 @@
#include <stdio.h> #include <stdio.h>
#include <iostream> #include <iostream>
#include "opencv2/imgproc/imgproc.hpp" #include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/highgui/highgui.hpp" #include <opencv2/highgui/highgui.hpp>
#include "opencv2/flann/miniflann.hpp" #include <opencv2/flann/miniflann.hpp>
#include "opencv2/core/utility.hpp" #include <opencv2/core/utility.hpp>
using namespace cv; // all the new API is put into "cv" namespace. Export its content using namespace cv; // all the new API is put into "cv" namespace. Export its content
using namespace std; using namespace std;
@ -22,6 +22,10 @@ static void help()
// enable/disable use of mixed API in the code below. // enable/disable use of mixed API in the code below.
#define DEMO_MIXED_API_USE 1 #define DEMO_MIXED_API_USE 1
#ifdef DEMO_MIXED_API_USE
# include <opencv2/highgui/highgui_c.h>
#endif
int main( int argc, char** argv ) int main( int argc, char** argv )
{ {
help(); help();
@ -110,7 +114,7 @@ int main( int argc, char** argv )
cvtColor(img_yuv, img, COLOR_YCrCb2BGR); cvtColor(img_yuv, img, COLOR_YCrCb2BGR);
// this is counterpart for cvNamedWindow // this is counterpart for cvNamedWindow
namedWindow("image with grain", CV_WINDOW_AUTOSIZE); namedWindow("image with grain", WINDOW_AUTOSIZE);
#if DEMO_MIXED_API_USE #if DEMO_MIXED_API_USE
// this is to demonstrate that img and iplimg really share the data - the result of the above // this is to demonstrate that img and iplimg really share the data - the result of the above
// processing is stored in img and thus in iplimg too. // processing is stored in img and thus in iplimg too.

View File

@ -347,10 +347,10 @@ int main (const int argc, const char * argv[])
cout << "The warped image has been saved in the file: " << warpedImFile << endl << flush; cout << "The warped image has been saved in the file: " << warpedImFile << endl << flush;
namedWindow ("image", CV_WINDOW_AUTOSIZE); namedWindow ("image", WINDOW_AUTOSIZE);
namedWindow ("template", CV_WINDOW_AUTOSIZE); namedWindow ("template", WINDOW_AUTOSIZE);
namedWindow ("warped image", CV_WINDOW_AUTOSIZE); namedWindow ("warped image", WINDOW_AUTOSIZE);
namedWindow ("error (black: no error)", CV_WINDOW_AUTOSIZE); namedWindow ("error (black: no error)", WINDOW_AUTOSIZE);
moveWindow ("template", 350, 350); moveWindow ("template", 350, 350);
moveWindow ("warped image", 600, 300); moveWindow ("warped image", 600, 300);

View File

@ -27,11 +27,11 @@ Point prevPt(-1,-1);
static void onMouse( int event, int x, int y, int flags, void* ) static void onMouse( int event, int x, int y, int flags, void* )
{ {
if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) ) if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) )
prevPt = Point(-1,-1); prevPt = Point(-1,-1);
else if( event == CV_EVENT_LBUTTONDOWN ) else if( event == EVENT_LBUTTONDOWN )
prevPt = Point(x,y); prevPt = Point(x,y);
else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON) ) else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON) )
{ {
Point pt(x,y); Point pt(x,y);
if( prevPt.x < 0 ) if( prevPt.x < 0 )

View File

@ -47,13 +47,13 @@ int main( int /*argc*/, char** /*argv*/ )
Mat pointChunk = points.rowRange(k*sampleCount/clusterCount, Mat pointChunk = points.rowRange(k*sampleCount/clusterCount,
k == clusterCount - 1 ? sampleCount : k == clusterCount - 1 ? sampleCount :
(k+1)*sampleCount/clusterCount); (k+1)*sampleCount/clusterCount);
rng.fill(pointChunk, CV_RAND_NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05)); rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
} }
randShuffle(points, 1, &rng); randShuffle(points, 1, &rng);
kmeans(points, clusterCount, labels, kmeans(points, clusterCount, labels,
TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0), TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 10, 1.0),
3, KMEANS_PP_CENTERS, centers); 3, KMEANS_PP_CENTERS, centers);
img = Scalar::all(0); img = Scalar::all(0);
@ -62,7 +62,7 @@ int main( int /*argc*/, char** /*argv*/ )
{ {
int clusterIdx = labels.at<int>(i); int clusterIdx = labels.at<int>(i);
Point ipt = points.at<Point2f>(i); Point ipt = points.at<Point2f>(i);
circle( img, ipt, 2, colorTab[clusterIdx], CV_FILLED, CV_AA ); circle( img, ipt, 2, colorTab[clusterIdx], FILLED, LINE_AA );
} }
imshow("clusters", img); imshow("clusters", img);

View File

@ -34,15 +34,15 @@ int main( int argc, char** argv )
cap.open(argv[1]); cap.open(argv[1]);
if( cap.isOpened() ) if( cap.isOpened() )
cout << "Video " << argv[1] << cout << "Video " << argv[1] <<
": width=" << cap.get(CV_CAP_PROP_FRAME_WIDTH) << ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
", height=" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
", nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl; ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
if( argc > 2 && isdigit(argv[2][0]) ) if( argc > 2 && isdigit(argv[2][0]) )
{ {
int pos; int pos;
sscanf(argv[2], "%d", &pos); sscanf(argv[2], "%d", &pos);
cout << "seeking to frame #" << pos << endl; cout << "seeking to frame #" << pos << endl;
cap.set(CV_CAP_PROP_POS_FRAMES, pos); cap.set(CAP_PROP_POS_FRAMES, pos);
} }
} }

View File

@ -31,7 +31,7 @@ class Mouse
public: public:
static void start(const std::string& a_img_name) static void start(const std::string& a_img_name)
{ {
cvSetMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0); cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
} }
static int event(void) static int event(void)
{ {
@ -190,14 +190,14 @@ int main(int argc, char * argv[])
int num_modalities = (int)detector->getModalities().size(); int num_modalities = (int)detector->getModalities().size();
// Open Kinect sensor // Open Kinect sensor
cv::VideoCapture capture( CV_CAP_OPENNI ); cv::VideoCapture capture( cv::CAP_OPENNI );
if (!capture.isOpened()) if (!capture.isOpened())
{ {
printf("Could not open OpenNI-capable sensor\n"); printf("Could not open OpenNI-capable sensor\n");
return -1; return -1;
} }
capture.set(CV_CAP_PROP_OPENNI_REGISTRATION, 1); capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
double focal_length = capture.get(CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH); double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
//printf("Focal length = %f\n", focal_length); //printf("Focal length = %f\n", focal_length);
// Main loop // Main loop
@ -206,8 +206,8 @@ int main(int argc, char * argv[])
{ {
// Capture next color/depth pair // Capture next color/depth pair
capture.grab(); capture.grab();
capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP); capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
capture.retrieve(color, CV_CAP_OPENNI_BGR_IMAGE); capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);
std::vector<cv::Mat> sources; std::vector<cv::Mat> sources;
sources.push_back(color); sources.push_back(color);
@ -224,7 +224,7 @@ int main(int argc, char * argv[])
cv::Point pt1 = mouse - roi_offset; // top left cv::Point pt1 = mouse - roi_offset; // top left
cv::Point pt2 = mouse + roi_offset; // bottom right cv::Point pt2 = mouse + roi_offset; // bottom right
if (event == CV_EVENT_RBUTTONDOWN) if (event == cv::EVENT_RBUTTONDOWN)
{ {
// Compute object mask by subtracting the plane within the ROI // Compute object mask by subtracting the plane within the ROI
std::vector<CvPoint> chain(4); std::vector<CvPoint> chain(4);
@ -331,7 +331,7 @@ int main(int argc, char * argv[])
cv::imshow("normals", quantized_images[1]); cv::imshow("normals", quantized_images[1]);
cv::FileStorage fs; cv::FileStorage fs;
char key = (char)cvWaitKey(10); char key = (char)cv::waitKey(10);
if( key == 'q' ) if( key == 'q' )
break; break;

View File

@ -28,7 +28,7 @@ bool addRemovePt = false;
static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ ) static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
{ {
if( event == CV_EVENT_LBUTTONDOWN ) if( event == EVENT_LBUTTONDOWN )
{ {
point = Point2f((float)x,(float)y); point = Point2f((float)x,(float)y);
addRemovePt = true; addRemovePt = true;

View File

@ -23,8 +23,8 @@ int main(int argc, char** argv)
return -1; return -1;
} }
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
if(img1.empty() || img2.empty()) if(img1.empty() || img2.empty())
{ {
printf("Can't read one of the images\n"); printf("Can't read one of the images\n");

View File

@ -95,7 +95,7 @@ static bool readImages( const string& queryImageName, const string& trainFilenam
Mat& queryImage, vector <Mat>& trainImages, vector<string>& trainImageNames ) Mat& queryImage, vector <Mat>& trainImages, vector<string>& trainImageNames )
{ {
cout << "< Reading the images..." << endl; cout << "< Reading the images..." << endl;
queryImage = imread( queryImageName, CV_LOAD_IMAGE_GRAYSCALE); queryImage = imread( queryImageName, IMREAD_GRAYSCALE);
if( queryImage.empty() ) if( queryImage.empty() )
{ {
cout << "Query image can not be read." << endl << ">" << endl; cout << "Query image can not be read." << endl << ">" << endl;
@ -112,7 +112,7 @@ static bool readImages( const string& queryImageName, const string& trainFilenam
for( size_t i = 0; i < trainImageNames.size(); i++ ) for( size_t i = 0; i < trainImageNames.size(); i++ )
{ {
string filename = trainDirName + trainImageNames[i]; string filename = trainDirName + trainImageNames[i];
Mat img = imread( filename, CV_LOAD_IMAGE_GRAYSCALE ); Mat img = imread( filename, IMREAD_GRAYSCALE );
if( img.empty() ) if( img.empty() )
cout << "Train image " << filename << " can not be read." << endl; cout << "Train image " << filename << " can not be read." << endl;
else else

View File

@ -65,7 +65,7 @@ int main(int argc, char** argv)
colorRad = 10; colorRad = 10;
maxPyrLevel = 1; maxPyrLevel = 1;
namedWindow( winName, CV_WINDOW_AUTOSIZE ); namedWindow( winName, WINDOW_AUTOSIZE );
createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation ); createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation );
createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation ); createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation );

View File

@ -45,16 +45,16 @@ int main( int /*argc*/, char** /*argv*/ )
img = Scalar::all(0); img = Scalar::all(0);
for( i = 0; i < count; i++ ) for( i = 0; i < count; i++ )
circle( img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA ); circle( img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA );
for( i = 0; i < 4; i++ ) for( i = 0; i < 4; i++ )
line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA); line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, LINE_AA);
circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA); circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, LINE_AA);
imshow( "rect & circle", img ); imshow( "rect & circle", img );
char key = (char)cvWaitKey(); char key = (char)waitKey();
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC' if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break; break;
} }

View File

@ -79,7 +79,7 @@ int main( int argc, char** argv )
OpenClose(open_close_pos, 0); OpenClose(open_close_pos, 0);
ErodeDilate(erode_dilate_pos, 0); ErodeDilate(erode_dilate_pos, 0);
c = cvWaitKey(0); c = waitKey();
if( (char)c == 27 ) if( (char)c == 27 )
break; break;

View File

@ -79,8 +79,8 @@ static void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, f
static float getMaxDisparity( VideoCapture& capture ) static float getMaxDisparity( VideoCapture& capture )
{ {
const int minDistance = 400; // mm const int minDistance = 400; // mm
float b = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm float b = (float)capture.get( CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm
float F = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); // pixels float F = (float)capture.get( CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); // pixels
return b * F / minDistance; return b * F / minDistance;
} }
@ -142,7 +142,7 @@ static void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool
{ {
string mask( argv[++i] ); string mask( argv[++i] );
if( mask.size() != 5) if( mask.size() != 5)
CV_Error( CV_StsBadArg, "Incorrect length of -m argument string" ); CV_Error( Error::StsBadArg, "Incorrect length of -m argument string" );
int val = atoi(mask.c_str()); int val = atoi(mask.c_str());
int l = 100000, r = 10000, sum = 0; int l = 100000, r = 10000, sum = 0;
@ -191,7 +191,7 @@ int main( int argc, char* argv[] )
if( isVideoReading ) if( isVideoReading )
capture.open( filename ); capture.open( filename );
else else
capture.open( CV_CAP_OPENNI ); capture.open( CAP_OPENNI );
cout << "done." << endl; cout << "done." << endl;
@ -207,23 +207,23 @@ int main( int argc, char* argv[] )
switch ( imageMode ) switch ( imageMode )
{ {
case 0: case 0:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ ); modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_VGA_30HZ );
break; break;
case 1: case 1:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ ); modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_15HZ );
break; break;
case 2: case 2:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ ); modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_30HZ );
break; break;
//The following modes are only supported by the Xtion Pro Live //The following modes are only supported by the Xtion Pro Live
case 3: case 3:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ ); modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_30HZ );
break; break;
case 4: case 4:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ ); modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_60HZ );
break; break;
default: default:
CV_Error( CV_StsBadArg, "Unsupported image mode property.\n"); CV_Error( Error::StsBadArg, "Unsupported image mode property.\n");
} }
if (!modeRes) if (!modeRes)
cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl; cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
@ -231,18 +231,18 @@ int main( int argc, char* argv[] )
// Print some avalible device settings. // Print some avalible device settings.
cout << "\nDepth generator output mode:" << endl << cout << "\nDepth generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl << "FRAME_WIDTH " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl <<
"FRAME_HEIGHT " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl << "FRAME_HEIGHT " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl <<
"FRAME_MAX_DEPTH " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl << "FRAME_MAX_DEPTH " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
"FPS " << capture.get( CV_CAP_PROP_FPS ) << endl << "FPS " << capture.get( CAP_PROP_FPS ) << endl <<
"REGISTRATION " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl; "REGISTRATION " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl;
if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) ) if( capture.get( CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
{ {
cout << cout <<
"\nImage generator output mode:" << endl << "\nImage generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl << "FRAME_WIDTH " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_WIDTH ) << endl <<
"FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl << "FRAME_HEIGHT " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_HEIGHT ) << endl <<
"FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl; "FPS " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FPS ) << endl;
} }
else else
{ {
@ -266,14 +266,14 @@ int main( int argc, char* argv[] )
} }
else else
{ {
if( retrievedImageFlags[0] && capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) ) if( retrievedImageFlags[0] && capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) )
{ {
const float scaleFactor = 0.05f; const float scaleFactor = 0.05f;
Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor ); Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor );
imshow( "depth map", show ); imshow( "depth map", show );
} }
if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CV_CAP_OPENNI_DISPARITY_MAP ) ) if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CAP_OPENNI_DISPARITY_MAP ) )
{ {
if( isColorizeDisp ) if( isColorizeDisp )
{ {
@ -289,13 +289,13 @@ int main( int argc, char* argv[] )
} }
} }
if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CV_CAP_OPENNI_VALID_DEPTH_MASK ) ) if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CAP_OPENNI_VALID_DEPTH_MASK ) )
imshow( "valid depth mask", validDepthMap ); imshow( "valid depth mask", validDepthMap );
if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CAP_OPENNI_BGR_IMAGE ) )
imshow( "rgb image", bgrImage ); imshow( "rgb image", bgrImage );
if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) ) if( retrievedImageFlags[4] && capture.retrieve( grayImage, CAP_OPENNI_GRAY_IMAGE ) )
imshow( "gray image", grayImage ); imshow( "gray image", grayImage );
} }

View File

@ -54,7 +54,7 @@ static void read_imgList(const string& filename, vector<Mat>& images) {
std::ifstream file(filename.c_str(), ifstream::in); std::ifstream file(filename.c_str(), ifstream::in);
if (!file) { if (!file) {
string error_message = "No valid input file was given, please check the given filename."; string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message); CV_Error(Error::StsBadArg, error_message);
} }
string line; string line;
while (getline(file, line)) { while (getline(file, line)) {
@ -78,7 +78,7 @@ static Mat toGrayscale(InputArray _src) {
Mat src = _src.getMat(); Mat src = _src.getMat();
// only allow one channel // only allow one channel
if(src.channels() != 1) { if(src.channels() != 1) {
CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported"); CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported");
} }
// create and return normalized image // create and return normalized image
Mat dst; Mat dst;
@ -104,7 +104,7 @@ static void onTrackbar(int pos, void* ptr)
struct params *p = (struct params *)ptr; struct params *p = (struct params *)ptr;
p->pca = PCA(p->data, cv::Mat(), CV_PCA_DATA_AS_ROW, var); p->pca = PCA(p->data, cv::Mat(), PCA::DATA_AS_ROW, var);
Mat point = p->pca.project(p->data.row(0)); Mat point = p->pca.project(p->data.row(0));
Mat reconstruction = p->pca.backProject(point); Mat reconstruction = p->pca.backProject(point);
@ -142,14 +142,14 @@ int main(int argc, char** argv)
// Quit if there are not enough images for this demo. // Quit if there are not enough images for this demo.
if(images.size() <= 1) { if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!"; string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message); CV_Error(Error::StsError, error_message);
} }
// Reshape and stack images into a rowMatrix // Reshape and stack images into a rowMatrix
Mat data = formatImagesForPCA(images); Mat data = formatImagesForPCA(images);
// perform PCA // perform PCA
PCA pca(data, cv::Mat(), CV_PCA_DATA_AS_ROW, 0.95); // trackbar is initially set here, also this is a common value for retainedVariance PCA pca(data, cv::Mat(), PCA::DATA_AS_ROW, 0.95); // trackbar is initially set here, also this is a common value for retainedVariance
// Demonstration of the effect of retainedVariance on the first image // Demonstration of the effect of retainedVariance on the first image
Mat point = pca.project(data.row(0)); // project into the eigenspace, thus the image becomes a "point" Mat point = pca.project(data.row(0)); // project into the eigenspace, thus the image becomes a "point"
@ -159,7 +159,7 @@ int main(int argc, char** argv)
// init highgui window // init highgui window
string winName = "Reconstruction | press 'q' to quit"; string winName = "Reconstruction | press 'q' to quit";
namedWindow(winName, CV_WINDOW_NORMAL); namedWindow(winName, WINDOW_NORMAL);
// params struct to pass to the trackbar handler // params struct to pass to the trackbar handler
params p; params p;

View File

@ -31,8 +31,8 @@ int main(int, char* [])
{ {
// draw a circle and line indicating the shift direction... // draw a circle and line indicating the shift direction...
Point center(curr.cols >> 1, curr.rows >> 1); Point center(curr.cols >> 1, curr.rows >> 1);
circle(frame, center, (int)radius, Scalar(0, 255, 0), 3, CV_AA); circle(frame, center, (int)radius, Scalar(0, 255, 0), 3, LINE_AA);
line(frame, center, Point(center.x + (int)shift.x, center.y + (int)shift.y), Scalar(0, 255, 0), 3, CV_AA); line(frame, center, Point(center.x + (int)shift.x, center.y + (int)shift.y), Scalar(0, 255, 0), 3, LINE_AA);
} }
imshow("phase shift", frame); imshow("phase shift", frame);

View File

@ -36,7 +36,7 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
int updateFlag = 0; int updateFlag = 0;
if( event == CV_EVENT_LBUTTONUP ) if( event == EVENT_LBUTTONUP )
{ {
if( classColors.empty() ) if( classColors.empty() )
return; return;
@ -45,7 +45,7 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* )
trainedPointsMarkers.push_back( (int)(classColors.size()-1) ); trainedPointsMarkers.push_back( (int)(classColors.size()-1) );
updateFlag = true; updateFlag = true;
} }
else if( event == CV_EVENT_RBUTTONUP ) else if( event == EVENT_RBUTTONUP )
{ {
#if _BT_ #if _BT_
if( classColors.size() < 2 ) if( classColors.size() < 2 )
@ -503,7 +503,7 @@ int main()
imgDst.create( 480, 640, CV_8UC3 ); imgDst.create( 480, 640, CV_8UC3 );
imshow( "points", img ); imshow( "points", img );
cvSetMouseCallback( "points", on_mouse ); setMouseCallback( "points", on_mouse );
for(;;) for(;;)
{ {

View File

@ -53,7 +53,7 @@ static void refineSegments(const Mat& img, Mat& mask, Mat& dst)
} }
} }
Scalar color( 0, 0, 255 ); Scalar color( 0, 0, 255 );
drawContours( dst, contours, largestComp, color, CV_FILLED, 8, hierarchy ); drawContours( dst, contours, largestComp, color, FILLED, LINE_8, hierarchy );
} }

View File

@ -212,11 +212,11 @@ static int select3DBox(const string& windowname, const string& selWinName, const
for(;;) for(;;)
{ {
float Z = 0.f; float Z = 0.f;
bool dragging = (mouse.buttonState & CV_EVENT_FLAG_LBUTTON) != 0; bool dragging = (mouse.buttonState & EVENT_FLAG_LBUTTON) != 0;
int npt = nobjpt; int npt = nobjpt;
if( (mouse.event == CV_EVENT_LBUTTONDOWN || if( (mouse.event == EVENT_LBUTTONDOWN ||
mouse.event == CV_EVENT_LBUTTONUP || mouse.event == EVENT_LBUTTONUP ||
dragging) && nobjpt < 4 ) dragging) && nobjpt < 4 )
{ {
Point2f m = mouse.pt; Point2f m = mouse.pt;
@ -259,9 +259,9 @@ static int select3DBox(const string& windowname, const string& selWinName, const
} }
box[npt] = image2plane(imgpt[npt], R, tvec, cameraMatrix, npt<3 ? 0 : Z); box[npt] = image2plane(imgpt[npt], R, tvec, cameraMatrix, npt<3 ? 0 : Z);
if( (npt == 0 && mouse.event == CV_EVENT_LBUTTONDOWN) || if( (npt == 0 && mouse.event == EVENT_LBUTTONDOWN) ||
(npt > 0 && norm(box[npt] - box[npt-1]) > eps && (npt > 0 && norm(box[npt] - box[npt-1]) > eps &&
mouse.event == CV_EVENT_LBUTTONUP) ) mouse.event == EVENT_LBUTTONUP) )
{ {
nobjpt++; nobjpt++;
if( nobjpt < 4 ) if( nobjpt < 4 )

View File

@ -132,7 +132,7 @@ static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{ {
const Point* p = &squares[i][0]; const Point* p = &squares[i][0];
int n = (int)squares[i].size(); int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA); polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, LINE_AA);
} }
imshow(wndname, image); imshow(wndname, image);

View File

@ -46,15 +46,15 @@ bool readStringList(const string& filename, vector<string>& l)
int process(vector<string> images) int process(vector<string> images)
{ {
namedWindow("image",CV_WINDOW_KEEPRATIO); //resizable window; namedWindow("image", WINDOW_KEEPRATIO); //resizable window;
for (size_t i = 0; i < images.size(); i++) for (size_t i = 0; i < images.size(); i++)
{ {
Mat image = imread(images[i], CV_LOAD_IMAGE_GRAYSCALE); // do grayscale processing? Mat image = imread(images[i], IMREAD_GRAYSCALE); // do grayscale processing?
imshow("image",image); imshow("image",image);
cout << "Press a key to see the next image in the list." << endl; cout << "Press a key to see the next image in the list." << endl;
waitKey(); // wait indefinitely for a key to be pressed waitKey(); // wait indefinitely for a key to be pressed
} }
return 0; return 0;
} }
} }

View File

@ -36,7 +36,7 @@ namespace {
char filename[200]; char filename[200];
string window_name = "video | q or esc to quit"; string window_name = "video | q or esc to quit";
cout << "press space to save a picture. q or esc to quit" << endl; cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window; namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame; Mat frame;
for (;;) { for (;;) {
capture >> frame; capture >> frame;

View File

@ -60,10 +60,10 @@ int main(int argc, char *argv[])
return -1; return -1;
} }
Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH), Size refS = Size((int) captRefrnc.get(CAP_PROP_FRAME_WIDTH),
(int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)), (int) captRefrnc.get(CAP_PROP_FRAME_HEIGHT)),
uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH), uTSi = Size((int) captUndTst.get(CAP_PROP_FRAME_WIDTH),
(int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT)); (int) captUndTst.get(CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi) if (refS != uTSi)
{ {
@ -75,13 +75,13 @@ int main(int argc, char *argv[])
const char* WIN_RF = "Reference"; const char* WIN_RF = "Reference";
// Windows // Windows
namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE); namedWindow(WIN_RF, WINDOW_AUTOSIZE);
namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE); namedWindow(WIN_UT, WINDOW_AUTOSIZE);
cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0) moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2 moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl; << " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3) cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3)
<< psnrTriggerValue << endl; << psnrTriggerValue << endl;
@ -125,7 +125,7 @@ int main(int argc, char *argv[])
imshow(WIN_RF, frameReference); imshow(WIN_RF, frameReference);
imshow(WIN_UT, frameUnderTest); imshow(WIN_UT, frameUnderTest);
c = (char)cvWaitKey(delay); c = (char)waitKey(delay);
if (c == 27) break; if (c == 27) break;
} }

View File

@ -41,19 +41,19 @@ int main(int argc, char *argv[])
string::size_type pAt = source.find_last_of('.'); // Find extension point string::size_type pAt = source.find_last_of('.'); // Find extension point
const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container
int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form int ex = static_cast<int>(inputVideo.get(CAP_PROP_FOURCC)); // Get Codec Type- Int form
// Transform from int to char via Bitwise operators // Transform from int to char via Bitwise operators
char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0}; char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size Size S = Size((int) inputVideo.get(CAP_PROP_FRAME_WIDTH), // Acquire input size
(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); (int) inputVideo.get(CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo; // Open the output VideoWriter outputVideo; // Open the output
if (askOutputType) if (askOutputType)
outputVideo.open(NAME, ex=-1, inputVideo.get(CV_CAP_PROP_FPS), S, true); outputVideo.open(NAME, ex=-1, inputVideo.get(CAP_PROP_FPS), S, true);
else else
outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, true); outputVideo.open(NAME, ex, inputVideo.get(CAP_PROP_FPS), S, true);
if (!outputVideo.isOpened()) if (!outputVideo.isOpened())
{ {
@ -62,7 +62,7 @@ int main(int argc, char *argv[])
} }
cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height
<< " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl; << " of nr#: " << inputVideo.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "Input codec type: " << EXT << endl; cout << "Input codec type: " << EXT << endl;
int channel = 2; // Select the channel to save int channel = 2; // Select the channel to save

View File

@ -37,8 +37,8 @@ int main( int, char** argv )
equalizeHist( src, dst ); equalizeHist( src, dst );
/// Display results /// Display results
namedWindow( source_window, CV_WINDOW_AUTOSIZE ); namedWindow( source_window, WINDOW_AUTOSIZE );
namedWindow( equalized_window, CV_WINDOW_AUTOSIZE ); namedWindow( equalized_window, WINDOW_AUTOSIZE );
imshow( source_window, src ); imshow( source_window, src );
imshow( equalized_window, dst ); imshow( equalized_window, dst );

View File

@ -33,8 +33,8 @@ int main( int, char** argv )
templ = imread( argv[2], 1 ); templ = imread( argv[2], 1 );
/// Create windows /// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE ); namedWindow( image_window, WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE ); namedWindow( result_window, WINDOW_AUTOSIZE );
/// Create Trackbar /// Create Trackbar
const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED"; const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";

View File

@ -37,7 +37,7 @@ int main( int, char** argv )
/// Create Trackbar to enter the number of bins /// Create Trackbar to enter the number of bins
const char* window_image = "Source image"; const char* window_image = "Source image";
namedWindow( window_image, CV_WINDOW_AUTOSIZE ); namedWindow( window_image, WINDOW_AUTOSIZE );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj ); createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0); Hist_and_Backproj(0, 0);

View File

@ -34,7 +34,7 @@ int main( int, char** argv )
cvtColor( src, hsv, COLOR_BGR2HSV ); cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image /// Show the image
namedWindow( window_image, CV_WINDOW_AUTOSIZE ); namedWindow( window_image, WINDOW_AUTOSIZE );
imshow( window_image, src ); imshow( window_image, src );
/// Set Trackbars for floodfill thresholds /// Set Trackbars for floodfill thresholds
@ -52,7 +52,7 @@ int main( int, char** argv )
*/ */
void pickPoint (int event, int x, int y, int, void* ) void pickPoint (int event, int x, int y, int, void* )
{ {
if( event != CV_EVENT_LBUTTONDOWN ) if( event != EVENT_LBUTTONDOWN )
{ return; } { return; }
// Fill and get the mask // Fill and get the mask

Some files were not shown because too many files have changed in this diff Show More