Merged the trunk r8589:8653 - all changes related to build warnings
This commit is contained in:
@@ -76,11 +76,20 @@ if(HAVE_QT)
|
||||
endif()
|
||||
include(${QT_USE_FILE})
|
||||
|
||||
if(QT_INCLUDE_DIR)
|
||||
ocv_include_directories(${QT_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
QT4_ADD_RESOURCES(_RCC_OUTFILES src/window_QT.qrc)
|
||||
QT4_WRAP_CPP(_MOC_OUTFILES src/window_QT.h)
|
||||
|
||||
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES} ${QT_QTTEST_LIBRARY})
|
||||
list(APPEND highgui_srcs src/window_QT.cpp ${_MOC_OUTFILES} ${_RCC_OUTFILES} )
|
||||
|
||||
ocv_check_flag_support(CXX -Wno-missing-declarations HAVE_CXX_WNO_MISSING_DECLARATIONS)
|
||||
if(HAVE_CXX_WNO_MISSING_DECLARATIONS)
|
||||
set_source_files_properties(${_RCC_OUTFILES} PROPERTIES COMPILE_FLAGS -Wno-missing-declarations)
|
||||
endif()
|
||||
elseif(WIN32)
|
||||
list(APPEND highgui_srcs src/window_w32.cpp)
|
||||
elseif(HAVE_GTK)
|
||||
@@ -166,7 +175,6 @@ if(WITH_IMAGEIO)
|
||||
if(IOS)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework ImageIO")
|
||||
endif()
|
||||
#TODO: check if need to link with some framework on OS X: -framework ApplicationServices ??
|
||||
endif(WITH_IMAGEIO)
|
||||
|
||||
if(WITH_AVFOUNDATION)
|
||||
@@ -186,7 +194,7 @@ endif()
|
||||
|
||||
if(WIN32)
|
||||
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
|
||||
include_directories(AFTER "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
|
||||
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
@@ -220,10 +228,7 @@ endif()
|
||||
set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "")
|
||||
|
||||
ocv_add_precompiled_headers(${the_module})
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")
|
||||
endif()
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations)
|
||||
|
||||
if(WIN32 AND WITH_FFMPEG)
|
||||
#copy ffmpeg dll to the output folder
|
||||
|
||||
@@ -79,7 +79,7 @@ CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms C
|
||||
CVAPI(void) cvSaveWindowParameters(const char* name);
|
||||
CVAPI(void) cvLoadWindowParameters(const char* name);
|
||||
CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
|
||||
CVAPI(void) cvStopLoop();
|
||||
CVAPI(void) cvStopLoop( void );
|
||||
|
||||
typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata);
|
||||
enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2};
|
||||
@@ -90,7 +90,7 @@ CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCall
|
||||
/* this function is used to set some external parameters in case of X Window */
|
||||
CVAPI(int) cvInitSystem( int argc, char** argv );
|
||||
|
||||
CVAPI(int) cvStartWindowThread();
|
||||
CVAPI(int) cvStartWindowThread( void );
|
||||
|
||||
// --------- YV ---------
|
||||
enum
|
||||
@@ -100,16 +100,16 @@ enum
|
||||
CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property
|
||||
CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property
|
||||
CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support
|
||||
|
||||
|
||||
//These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
|
||||
CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
|
||||
CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed
|
||||
CV_WINDOW_OPENGL = 0x00001000, //window with opengl support
|
||||
|
||||
|
||||
//Those flags are only for Qt
|
||||
CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar
|
||||
CV_GUI_NORMAL = 0x00000010, //old fashious way
|
||||
|
||||
|
||||
//These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty
|
||||
CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen
|
||||
CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint)
|
||||
@@ -303,10 +303,10 @@ enum
|
||||
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
|
||||
|
||||
CV_CAP_ANDROID =1000, // Android
|
||||
|
||||
|
||||
CV_CAP_XIAPI =1100, // XIMEA Camera API
|
||||
|
||||
CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
|
||||
CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
};
|
||||
|
||||
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
|
||||
@@ -367,15 +367,15 @@ enum
|
||||
CV_CAP_PROP_TRIGGER_DELAY =25,
|
||||
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
|
||||
CV_CAP_PROP_ZOOM =27,
|
||||
CV_CAP_PROP_FOCUS =28,
|
||||
CV_CAP_PROP_GUID =29,
|
||||
CV_CAP_PROP_ISO_SPEED =30,
|
||||
CV_CAP_PROP_FOCUS =28,
|
||||
CV_CAP_PROP_GUID =29,
|
||||
CV_CAP_PROP_ISO_SPEED =30,
|
||||
CV_CAP_PROP_MAX_DC1394 =31,
|
||||
CV_CAP_PROP_BACKLIGHT =32,
|
||||
CV_CAP_PROP_PAN =33,
|
||||
CV_CAP_PROP_TILT =34,
|
||||
CV_CAP_PROP_ROLL =35,
|
||||
CV_CAP_PROP_IRIS =36,
|
||||
CV_CAP_PROP_BACKLIGHT =32,
|
||||
CV_CAP_PROP_PAN =33,
|
||||
CV_CAP_PROP_TILT =34,
|
||||
CV_CAP_PROP_ROLL =35,
|
||||
CV_CAP_PROP_IRIS =36,
|
||||
CV_CAP_PROP_SETTINGS =37,
|
||||
|
||||
CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
|
||||
@@ -409,24 +409,24 @@ enum
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
|
||||
|
||||
|
||||
// Properties of cameras available through GStreamer interface
|
||||
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
|
||||
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
|
||||
|
||||
|
||||
// Properties of cameras available through XIMEA SDK interface
|
||||
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
|
||||
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
|
||||
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
|
||||
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
|
||||
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
|
||||
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
|
||||
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
|
||||
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
|
||||
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
|
||||
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
|
||||
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
|
||||
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
|
||||
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
|
||||
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
|
||||
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
|
||||
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
|
||||
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
|
||||
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
|
||||
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
|
||||
@@ -436,7 +436,7 @@ enum
|
||||
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
|
||||
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
|
||||
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
|
||||
|
||||
|
||||
// Properties for Android cameras
|
||||
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
|
||||
@@ -532,7 +532,7 @@ CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
|
||||
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
|
||||
|
||||
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
|
||||
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
|
||||
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
|
||||
|
||||
/* "black box" video file writer structure */
|
||||
typedef struct CvVideoWriter CvVideoWriter;
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
#endif
|
||||
|
||||
#ifndef __OPENCV_PERF_PRECOMP_HPP__
|
||||
#define __OPENCV_PERF_PRECOMP_HPP__
|
||||
|
||||
#include "opencv2/ts/ts.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#if GTEST_CREATE_SHARED_LIBRARY
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
||||
#endif
|
||||
|
||||
|
||||
@@ -41,13 +41,9 @@
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#if _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4711 )
|
||||
#endif
|
||||
|
||||
#if defined _M_X64 && defined _MSC_VER && !defined CV_ICC
|
||||
#pragma optimize("",off)
|
||||
#pragma warning( disable: 4748 )
|
||||
#pragma warning(disable: 4748)
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
@@ -282,7 +278,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
|
||||
return capture;
|
||||
break;
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_PVAPI
|
||||
case CV_CAP_PVAPI:
|
||||
capture = cvCreateCameraCapture_PvAPI (index);
|
||||
@@ -306,7 +302,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
|
||||
return capture;
|
||||
break;
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_XIMEA
|
||||
case CV_CAP_XIAPI:
|
||||
capture = cvCreateCameraCapture_XIMEA (index);
|
||||
@@ -354,7 +350,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
|
||||
if (! result)
|
||||
result = cvCreateFileCapture_QT (filename);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_AVFOUNDATION
|
||||
if (! result)
|
||||
result = cvCreateFileCapture_AVFoundation (filename);
|
||||
@@ -364,7 +360,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
|
||||
if (! result)
|
||||
result = cvCreateFileCapture_OpenNI (filename);
|
||||
#endif
|
||||
|
||||
|
||||
if (! result)
|
||||
result = cvCreateFileCapture_Images (filename);
|
||||
|
||||
@@ -378,29 +374,29 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
|
||||
CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color )
|
||||
{
|
||||
//CV_FUNCNAME( "cvCreateVideoWriter" );
|
||||
//CV_FUNCNAME( "cvCreateVideoWriter" );
|
||||
|
||||
CvVideoWriter *result = 0;
|
||||
CvVideoWriter *result = 0;
|
||||
|
||||
if(!fourcc || !fps)
|
||||
result = cvCreateVideoWriter_Images(filename);
|
||||
if(!fourcc || !fps)
|
||||
result = cvCreateVideoWriter_Images(filename);
|
||||
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
|
||||
|
||||
/* #ifdef HAVE_XINE
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
|
||||
#endif
|
||||
/* #ifdef HAVE_XINE
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
|
||||
#endif
|
||||
*/
|
||||
#ifdef HAVE_AVFOUNDATION
|
||||
#ifdef HAVE_AVFOUNDATION
|
||||
if (! result)
|
||||
result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color);
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_QUICKTIME
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_GSTREAMER
|
||||
@@ -408,10 +404,10 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
|
||||
result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
|
||||
#endif
|
||||
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_Images(filename);
|
||||
if(!result)
|
||||
result = cvCreateVideoWriter_Images(filename);
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
CV_IMPL int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
|
||||
@@ -434,12 +430,12 @@ namespace cv
|
||||
|
||||
VideoCapture::VideoCapture()
|
||||
{}
|
||||
|
||||
|
||||
VideoCapture::VideoCapture(const string& filename)
|
||||
{
|
||||
open(filename);
|
||||
}
|
||||
|
||||
|
||||
VideoCapture::VideoCapture(int device)
|
||||
{
|
||||
open(device);
|
||||
@@ -449,21 +445,21 @@ VideoCapture::~VideoCapture()
|
||||
{
|
||||
cap.release();
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture::open(const string& filename)
|
||||
{
|
||||
cap = cvCreateFileCapture(filename.c_str());
|
||||
return isOpened();
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture::open(int device)
|
||||
{
|
||||
cap = cvCreateCameraCapture(device);
|
||||
return isOpened();
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture::isOpened() const { return !cap.empty(); }
|
||||
|
||||
|
||||
void VideoCapture::release()
|
||||
{
|
||||
cap.release();
|
||||
@@ -473,7 +469,7 @@ bool VideoCapture::grab()
|
||||
{
|
||||
return cvGrabFrame(cap) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture::retrieve(Mat& image, int channel)
|
||||
{
|
||||
IplImage* _img = cvRetrieveFrame(cap, channel);
|
||||
@@ -500,18 +496,18 @@ bool VideoCapture::read(Mat& image)
|
||||
image.release();
|
||||
return !image.empty();
|
||||
}
|
||||
|
||||
|
||||
VideoCapture& VideoCapture::operator >> (Mat& image)
|
||||
{
|
||||
read(image);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
bool VideoCapture::set(int propId, double value)
|
||||
{
|
||||
return cvSetCaptureProperty(cap, propId, value) != 0;
|
||||
}
|
||||
|
||||
|
||||
double VideoCapture::get(int propId)
|
||||
{
|
||||
return cvGetCaptureProperty(cap, propId);
|
||||
@@ -519,7 +515,7 @@ double VideoCapture::get(int propId)
|
||||
|
||||
VideoWriter::VideoWriter()
|
||||
{}
|
||||
|
||||
|
||||
VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
|
||||
{
|
||||
open(filename, fourcc, fps, frameSize, isColor);
|
||||
@@ -528,13 +524,13 @@ VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size fr
|
||||
void VideoWriter::release()
|
||||
{
|
||||
writer.release();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
VideoWriter::~VideoWriter()
|
||||
{
|
||||
release();
|
||||
}
|
||||
|
||||
|
||||
bool VideoWriter::open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
|
||||
{
|
||||
writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor);
|
||||
@@ -544,18 +540,18 @@ bool VideoWriter::open(const string& filename, int fourcc, double fps, Size fram
|
||||
bool VideoWriter::isOpened() const
|
||||
{
|
||||
return !writer.empty();
|
||||
}
|
||||
}
|
||||
|
||||
void VideoWriter::write(const Mat& image)
|
||||
{
|
||||
IplImage _img = image;
|
||||
cvWriteFrame(writer, &_img);
|
||||
}
|
||||
|
||||
|
||||
VideoWriter& VideoWriter::operator << (const Mat& image)
|
||||
{
|
||||
write(image);
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
|
||||
/****************** Capturing video from camera via CMU lib *******************/
|
||||
|
||||
#if HAVE_CMU1394
|
||||
#ifdef HAVE_CMU1394
|
||||
|
||||
// This firewire capability added by Philip Gruebele (pgruebele@cox.net).
|
||||
// For this to work you need to install the CMU firewire DCAM drivers,
|
||||
|
||||
@@ -126,8 +126,6 @@ static dc1394error_t adaptBufferStereoLocal(dc1394video_frame_t *in, dc1394video
|
||||
static dc1394error_t dc1394_deinterlace_stereo_frames_fixed(dc1394video_frame_t *in,
|
||||
dc1394video_frame_t *out, dc1394stereo_method_t method)
|
||||
{
|
||||
dc1394error_t err;
|
||||
|
||||
if((in->color_coding == DC1394_COLOR_CODING_RAW16) ||
|
||||
(in->color_coding == DC1394_COLOR_CODING_MONO16) ||
|
||||
(in->color_coding == DC1394_COLOR_CODING_YUV422))
|
||||
@@ -136,14 +134,14 @@ static dc1394error_t dc1394_deinterlace_stereo_frames_fixed(dc1394video_frame_t
|
||||
{
|
||||
|
||||
case DC1394_STEREO_METHOD_INTERLACED:
|
||||
err = adaptBufferStereoLocal(in, out);
|
||||
adaptBufferStereoLocal(in, out);
|
||||
//FIXED by AB:
|
||||
// dc1394_deinterlace_stereo(in->image, out->image, in->size[0], in->size[1]);
|
||||
dc1394_deinterlace_stereo(in->image, out->image, out->size[0], out->size[1]);
|
||||
break;
|
||||
|
||||
case DC1394_STEREO_METHOD_FIELD:
|
||||
err = adaptBufferStereoLocal(in, out);
|
||||
adaptBufferStereoLocal(in, out);
|
||||
memcpy(out->image, in->image, out->image_bytes);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -89,7 +89,8 @@ Thanks to:
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
#if _MSC_VER >= 100
|
||||
#if defined _MSC_VER && _MSC_VER >= 100
|
||||
//'sprintf': name was marked as #pragma deprecated
|
||||
#pragma warning(disable: 4995)
|
||||
#endif
|
||||
|
||||
@@ -103,17 +104,18 @@ Thanks to:
|
||||
#include <vector>
|
||||
|
||||
//Include Directshow stuff here so we don't worry about needing all the h files.
|
||||
#if _MSC_VER >= 1500
|
||||
#include "DShow.h"
|
||||
#include "strmif.h"
|
||||
#include "Aviriff.h"
|
||||
#include "dvdmedia.h"
|
||||
#include "bdaiface.h"
|
||||
#if defined _MSC_VER && _MSC_VER >= 1500
|
||||
# include "DShow.h"
|
||||
# include "strmif.h"
|
||||
# include "Aviriff.h"
|
||||
# include "dvdmedia.h"
|
||||
# include "bdaiface.h"
|
||||
#else
|
||||
#ifdef _MSC_VER
|
||||
#define __extension__
|
||||
typedef BOOL WINBOOL;
|
||||
# ifdef _MSC_VER
|
||||
# define __extension__
|
||||
typedef BOOL WINBOOL;
|
||||
#endif
|
||||
|
||||
#include "dshow/dshow.h"
|
||||
#include "dshow/dvdmedia.h"
|
||||
#include "dshow/bdatypes.h"
|
||||
@@ -133,6 +135,8 @@ public:
|
||||
|
||||
virtual HRESULT STDMETHODCALLTYPE Clone(
|
||||
/* [out] */ IEnumPIDMap **ppIEnumPIDMap) = 0;
|
||||
|
||||
virtual ~IEnumPIDMap() {}
|
||||
};
|
||||
|
||||
interface IMPEG2PIDMap : public IUnknown
|
||||
@@ -148,6 +152,8 @@ interface IMPEG2PIDMap : public IUnknown
|
||||
|
||||
virtual HRESULT STDMETHODCALLTYPE EnumPIDMap(
|
||||
/* [out] */ IEnumPIDMap **pIEnumPIDMap) = 0;
|
||||
|
||||
virtual ~IMPEG2PIDMap() {}
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -234,6 +240,7 @@ interface ISampleGrabberCB : public IUnknown
|
||||
BYTE *pBuffer,
|
||||
LONG BufferLen) = 0;
|
||||
|
||||
virtual ~ISampleGrabberCB() {}
|
||||
};
|
||||
|
||||
interface ISampleGrabber : public IUnknown
|
||||
@@ -261,6 +268,7 @@ interface ISampleGrabber : public IUnknown
|
||||
ISampleGrabberCB *pCallback,
|
||||
LONG WhichMethodToCallback) = 0;
|
||||
|
||||
virtual ~ISampleGrabber() {}
|
||||
};
|
||||
|
||||
#ifndef HEADER
|
||||
@@ -519,12 +527,12 @@ class videoInput{
|
||||
|
||||
//Manual control over settings thanks.....
|
||||
//These are experimental for now.
|
||||
bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
|
||||
bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = NULL);
|
||||
bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = 0, bool useDefaultValue = false);
|
||||
bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = 0);
|
||||
bool getVideoSettingFilter(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long ¤tValue, long &flags, long &defaultValue);
|
||||
|
||||
bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
|
||||
bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = NULL);
|
||||
bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = 0, bool useDefaultValue = false);
|
||||
bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = 0);
|
||||
bool getVideoSettingCamera(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long ¤tValue, long &flags, long &defaultValue);
|
||||
|
||||
//bool setVideoSettingCam(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
|
||||
@@ -597,7 +605,7 @@ class videoInput{
|
||||
|
||||
/////////////////////////// HANDY FUNCTIONS /////////////////////////////
|
||||
|
||||
void MyFreeMediaType(AM_MEDIA_TYPE& mt){
|
||||
static void MyFreeMediaType(AM_MEDIA_TYPE& mt){
|
||||
if (mt.cbFormat != 0)
|
||||
{
|
||||
CoTaskMemFree((PVOID)mt.pbFormat);
|
||||
@@ -612,7 +620,7 @@ void MyFreeMediaType(AM_MEDIA_TYPE& mt){
|
||||
}
|
||||
}
|
||||
|
||||
void MyDeleteMediaType(AM_MEDIA_TYPE *pmt)
|
||||
static void MyDeleteMediaType(AM_MEDIA_TYPE *pmt)
|
||||
{
|
||||
if (pmt != NULL)
|
||||
{
|
||||
@@ -642,7 +650,7 @@ public:
|
||||
|
||||
|
||||
//------------------------------------------------
|
||||
~SampleGrabberCallback(){
|
||||
virtual ~SampleGrabberCallback(){
|
||||
ptrBuffer = NULL;
|
||||
DeleteCriticalSection(&critSection);
|
||||
CloseHandle(hEvent);
|
||||
@@ -849,7 +857,7 @@ void videoDevice::NukeDownstream(IBaseFilter *pBF){
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
void videoDevice::destroyGraph(){
|
||||
HRESULT hr = NULL;
|
||||
HRESULT hr = 0;
|
||||
//int FuncRetval=0;
|
||||
//int NumFilters=0;
|
||||
|
||||
@@ -867,7 +875,7 @@ void videoDevice::destroyGraph(){
|
||||
IBaseFilter * pFilter = NULL;
|
||||
if (pEnum->Next(1, &pFilter, &cFetched) == S_OK)
|
||||
{
|
||||
FILTER_INFO FilterInfo={0};
|
||||
FILTER_INFO FilterInfo;
|
||||
memset(&FilterInfo, 0, sizeof(FilterInfo));
|
||||
hr = pFilter->QueryFilterInfo(&FilterInfo);
|
||||
FilterInfo.pGraph->Release();
|
||||
@@ -1163,10 +1171,10 @@ bool videoInput::setupDevice(int deviceNumber){
|
||||
//
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
bool videoInput::setupDevice(int deviceNumber, int connection){
|
||||
bool videoInput::setupDevice(int deviceNumber, int _connection){
|
||||
if(deviceNumber >= VI_MAX_CAMERAS || VDList[deviceNumber]->readyToCapture) return false;
|
||||
|
||||
setPhyCon(deviceNumber, connection);
|
||||
setPhyCon(deviceNumber, _connection);
|
||||
if(setup(deviceNumber))return true;
|
||||
return false;
|
||||
}
|
||||
@@ -1213,11 +1221,11 @@ bool videoInput::setupDeviceFourcc(int deviceNumber, int w, int h,int fourcc){
|
||||
//
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
bool videoInput::setupDevice(int deviceNumber, int w, int h, int connection){
|
||||
bool videoInput::setupDevice(int deviceNumber, int w, int h, int _connection){
|
||||
if(deviceNumber >= VI_MAX_CAMERAS || VDList[deviceNumber]->readyToCapture) return false;
|
||||
|
||||
setAttemptCaptureSize(deviceNumber,w,h);
|
||||
setPhyCon(deviceNumber, connection);
|
||||
setPhyCon(deviceNumber, _connection);
|
||||
if(setup(deviceNumber))return true;
|
||||
return false;
|
||||
}
|
||||
@@ -1620,14 +1628,15 @@ void __cdecl videoInput::basicThread(void * objPtr){
|
||||
void videoInput::showSettingsWindow(int id){
|
||||
|
||||
if(isDeviceSetup(id)){
|
||||
HANDLE myTempThread;
|
||||
//HANDLE myTempThread;
|
||||
|
||||
//we reconnect to the device as we have freed our reference to it
|
||||
//why have we freed our reference? because there seemed to be an issue
|
||||
//with some mpeg devices if we didn't
|
||||
HRESULT hr = getDevice(&VDList[id]->pVideoInputFilter, id, VDList[id]->wDeviceName, VDList[id]->nDeviceName);
|
||||
if(hr == S_OK){
|
||||
myTempThread = (HANDLE)_beginthread(basicThread, 0, (void *)&VDList[id]);
|
||||
//myTempThread = (HANDLE)
|
||||
_beginthread(basicThread, 0, (void *)&VDList[id]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1705,7 +1714,7 @@ bool videoInput::setVideoSettingFilterPct(int deviceID, long Property, float pct
|
||||
float halfStep = (float)stepAmnt * 0.5f;
|
||||
if( mod < halfStep ) rasterValue -= mod;
|
||||
else rasterValue += stepAmnt - mod;
|
||||
printf("RASTER - pctValue is %f - value is %i - step is %i - mod is %i - rasterValue is %i\n", pctValue, value, stepAmnt, mod, rasterValue);
|
||||
printf("RASTER - pctValue is %f - value is %li - step is %li - mod is %li - rasterValue is %li\n", pctValue, value, stepAmnt, mod, rasterValue);
|
||||
}
|
||||
|
||||
return setVideoSettingFilter(deviceID, Property, rasterValue, Flags, false);
|
||||
@@ -1795,7 +1804,7 @@ bool videoInput::setVideoSettingCameraPct(int deviceID, long Property, float pct
|
||||
float halfStep = (float)stepAmnt * 0.5f;
|
||||
if( mod < halfStep ) rasterValue -= mod;
|
||||
else rasterValue += stepAmnt - mod;
|
||||
printf("RASTER - pctValue is %f - value is %i - step is %i - mod is %i - rasterValue is %i\n", pctValue, value, stepAmnt, mod, rasterValue);
|
||||
printf("RASTER - pctValue is %f - value is %li - step is %li - mod is %li - rasterValue is %li\n", pctValue, value, stepAmnt, mod, rasterValue);
|
||||
}
|
||||
|
||||
return setVideoSettingCamera(deviceID, Property, rasterValue, Flags, false);
|
||||
@@ -1920,7 +1929,7 @@ bool videoInput::restartDevice(int id){
|
||||
stopDevice(id);
|
||||
|
||||
//set our fps if needed
|
||||
if( avgFrameTime != -1){
|
||||
if( avgFrameTime != (unsigned long)-1){
|
||||
VDList[id]->requestedFrameTime = avgFrameTime;
|
||||
}
|
||||
|
||||
@@ -2300,7 +2309,7 @@ static void findClosestSizeAndSubtype(videoDevice * VD, int widthIn, int heightI
|
||||
//find perfect match or closest size
|
||||
int nearW = 9999999;
|
||||
int nearH = 9999999;
|
||||
bool foundClosestMatch = true;
|
||||
//bool foundClosestMatch = true;
|
||||
|
||||
int iCount = 0;
|
||||
int iSize = 0;
|
||||
@@ -2360,7 +2369,7 @@ static void findClosestSizeAndSubtype(videoDevice * VD, int widthIn, int heightI
|
||||
|
||||
//see if we have an exact match!
|
||||
if(exactMatchX && exactMatchY){
|
||||
foundClosestMatch = false;
|
||||
//foundClosestMatch = false;
|
||||
exactMatch = true;
|
||||
|
||||
widthOut = widthIn;
|
||||
@@ -2937,7 +2946,7 @@ HRESULT videoInput::ShowFilterPropertyPages(IBaseFilter *pFilter){
|
||||
return hr;
|
||||
}
|
||||
|
||||
HRESULT videoInput::ShowStreamPropertyPages(IAMStreamConfig *pStream){
|
||||
HRESULT videoInput::ShowStreamPropertyPages(IAMStreamConfig * /*pStream*/){
|
||||
|
||||
HRESULT hr = NOERROR;
|
||||
return hr;
|
||||
@@ -3027,11 +3036,11 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter *
|
||||
LONG lInpin, lOutpin;
|
||||
hr = Crossbar->get_PinCounts(&lOutpin , &lInpin);
|
||||
|
||||
BOOL IPin=TRUE; LONG pIndex=0 , pRIndex=0 , pType=0;
|
||||
BOOL iPin=TRUE; LONG pIndex=0 , pRIndex=0 , pType=0;
|
||||
|
||||
while( pIndex < lInpin)
|
||||
{
|
||||
hr = Crossbar->get_CrossbarPinInfo( IPin , pIndex , &pRIndex , &pType);
|
||||
hr = Crossbar->get_CrossbarPinInfo( iPin , pIndex , &pRIndex , &pType);
|
||||
|
||||
if( pType == conType){
|
||||
if(verbose)printf("SETUP: Found Physical Interface");
|
||||
|
||||
@@ -123,7 +123,7 @@ icvInitFFMPEG(void)
|
||||
icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
|
||||
icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
|
||||
#endif
|
||||
|
||||
|
||||
ffmpegInitialized = 1;
|
||||
}
|
||||
}
|
||||
@@ -151,7 +151,7 @@ public:
|
||||
{
|
||||
unsigned char* data = 0;
|
||||
int step=0, width=0, height=0, cn=0;
|
||||
|
||||
|
||||
if(!ffmpegCapture ||
|
||||
!icvRetrieveFrame_FFMPEG_p(ffmpegCapture,&data,&step,&width,&height,&cn))
|
||||
return 0;
|
||||
@@ -193,7 +193,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
|
||||
return cvCreateFileCapture_VFW(filename);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -247,5 +247,5 @@ CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourc
|
||||
return cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, isColor);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ extern "C" {
|
||||
#ifndef HAVE_FFMPEG_SWSCALE
|
||||
#error "libswscale is necessary to build the newer OpenCV ffmpeg wrapper"
|
||||
#endif
|
||||
|
||||
|
||||
// if the header path is not specified explicitly, let's deduce it
|
||||
#if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
|
||||
|
||||
@@ -140,7 +140,7 @@ extern "C" {
|
||||
#define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
|
||||
#endif
|
||||
|
||||
int get_number_of_cpus(void)
|
||||
static int get_number_of_cpus(void)
|
||||
{
|
||||
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(52, 111, 0)
|
||||
return 1;
|
||||
@@ -210,7 +210,7 @@ struct CvCapture_FFMPEG
|
||||
|
||||
void seek(int64_t frame_number);
|
||||
void seek(double sec);
|
||||
bool slowSeek( int framenumber );
|
||||
bool slowSeek( int framenumber );
|
||||
|
||||
int64_t get_total_frames();
|
||||
double get_duration_sec();
|
||||
@@ -225,8 +225,8 @@ struct CvCapture_FFMPEG
|
||||
AVCodec * avcodec;
|
||||
int video_stream;
|
||||
AVStream * video_st;
|
||||
AVFrame * picture;
|
||||
AVFrame rgb_picture;
|
||||
AVFrame * picture;
|
||||
AVFrame rgb_picture;
|
||||
int64_t picture_pts;
|
||||
|
||||
AVPacket packet;
|
||||
@@ -274,7 +274,7 @@ void CvCapture_FFMPEG::close()
|
||||
sws_freeContext(img_convert_ctx);
|
||||
img_convert_ctx = 0;
|
||||
}
|
||||
|
||||
|
||||
if( picture )
|
||||
av_free(picture);
|
||||
|
||||
@@ -293,9 +293,9 @@ void CvCapture_FFMPEG::close()
|
||||
if( ic )
|
||||
{
|
||||
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
|
||||
av_close_input_file(ic);
|
||||
av_close_input_file(ic);
|
||||
#else
|
||||
avformat_close_input(&ic);
|
||||
avformat_close_input(&ic);
|
||||
#endif
|
||||
|
||||
ic = NULL;
|
||||
@@ -337,7 +337,7 @@ static void icvInitFFMPEG_internal()
|
||||
av_register_all();
|
||||
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
|
||||
initialized = true;
|
||||
}
|
||||
}
|
||||
@@ -345,18 +345,18 @@ static void icvInitFFMPEG_internal()
|
||||
bool CvCapture_FFMPEG::open( const char* _filename )
|
||||
{
|
||||
icvInitFFMPEG_internal();
|
||||
|
||||
|
||||
unsigned i;
|
||||
bool valid = false;
|
||||
|
||||
close();
|
||||
|
||||
|
||||
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
|
||||
int err = avformat_open_input(&ic, _filename, NULL, NULL);
|
||||
#else
|
||||
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
if (err < 0) {
|
||||
CV_WARN("Error opening file");
|
||||
goto exit_func;
|
||||
@@ -438,13 +438,13 @@ bool CvCapture_FFMPEG::grabFrame()
|
||||
const int max_number_of_attempts = 1 << 16;
|
||||
|
||||
if( !ic || !video_st ) return false;
|
||||
|
||||
|
||||
if( ic->streams[video_stream]->nb_frames > 0 &&
|
||||
frame_number > ic->streams[video_stream]->nb_frames )
|
||||
return false;
|
||||
|
||||
av_free_packet (&packet);
|
||||
|
||||
|
||||
picture_pts = AV_NOPTS_VALUE_;
|
||||
|
||||
// get the next frame
|
||||
@@ -463,7 +463,7 @@ bool CvCapture_FFMPEG::grabFrame()
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// Decode video frame
|
||||
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
|
||||
avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
|
||||
@@ -498,7 +498,7 @@ bool CvCapture_FFMPEG::grabFrame()
|
||||
|
||||
if( valid && first_frame_number < 0 )
|
||||
first_frame_number = dts_to_frame_number(picture_pts);
|
||||
|
||||
|
||||
// return if we have a new picture or not
|
||||
return valid;
|
||||
}
|
||||
@@ -518,7 +518,7 @@ bool CvCapture_FFMPEG::retrieveFrame(int, unsigned char** data, int* step, int*
|
||||
{
|
||||
if( img_convert_ctx )
|
||||
sws_freeContext(img_convert_ctx);
|
||||
|
||||
|
||||
frame.width = video_st->codec->width;
|
||||
frame.height = video_st->codec->height;
|
||||
|
||||
@@ -629,7 +629,7 @@ double CvCapture_FFMPEG::get_fps()
|
||||
{
|
||||
fps = r2d(ic->streams[video_stream]->avg_frame_rate);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (fps < eps_zero)
|
||||
{
|
||||
@@ -666,12 +666,12 @@ void CvCapture_FFMPEG::seek(int64_t _frame_number)
|
||||
{
|
||||
_frame_number = std::min(_frame_number, get_total_frames());
|
||||
int delta = 16;
|
||||
|
||||
|
||||
// if we have not grabbed a single frame before first seek, let's read the first frame
|
||||
// and get some valuable information during the process
|
||||
if( first_frame_number < 0 )
|
||||
grabFrame();
|
||||
|
||||
|
||||
for(;;)
|
||||
{
|
||||
int64_t _frame_number_temp = std::max(_frame_number-delta, (int64_t)0);
|
||||
@@ -684,13 +684,13 @@ void CvCapture_FFMPEG::seek(int64_t _frame_number)
|
||||
if( _frame_number > 0 )
|
||||
{
|
||||
grabFrame();
|
||||
|
||||
|
||||
if( _frame_number > 1 )
|
||||
{
|
||||
frame_number = dts_to_frame_number(picture_pts) - first_frame_number;
|
||||
//printf("_frame_number = %d, frame_number = %d, delta = %d\n",
|
||||
// (int)_frame_number, (int)frame_number, delta);
|
||||
|
||||
|
||||
if( frame_number < 0 || frame_number > _frame_number-1 )
|
||||
{
|
||||
if( _frame_number_temp == 0 || delta >= INT_MAX/4 )
|
||||
@@ -771,7 +771,7 @@ struct CvVideoWriter_FFMPEG
|
||||
|
||||
void init();
|
||||
|
||||
AVOutputFormat * fmt;
|
||||
AVOutputFormat * fmt;
|
||||
AVFormatContext * oc;
|
||||
uint8_t * outbuf;
|
||||
uint32_t outbuf_size;
|
||||
@@ -1010,7 +1010,7 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
|
||||
|
||||
static const int OPENCV_NO_FRAMES_WRITTEN_CODE = 1000;
|
||||
|
||||
int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
|
||||
static int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
|
||||
{
|
||||
#if LIBAVFORMAT_BUILD > 4628
|
||||
AVCodecContext * c = video_st->codec;
|
||||
@@ -1046,7 +1046,7 @@ int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_
|
||||
|
||||
#if LIBAVFORMAT_BUILD > 4752
|
||||
if(c->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
|
||||
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
|
||||
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
|
||||
#else
|
||||
pkt.pts = c->coded_frame->pts;
|
||||
#endif
|
||||
@@ -1069,7 +1069,7 @@ int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_
|
||||
bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
|
||||
if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
|
||||
return false;
|
||||
width = frame_width;
|
||||
@@ -1180,7 +1180,7 @@ void CvVideoWriter_FFMPEG::close()
|
||||
// nothing to do if already released
|
||||
if ( !picture )
|
||||
return;
|
||||
|
||||
|
||||
/* no more frame to compress. The codec has a latency of a few
|
||||
frames if using B frames, so we get the last frames by
|
||||
passing the same picture again */
|
||||
@@ -1200,7 +1200,7 @@ void CvVideoWriter_FFMPEG::close()
|
||||
}
|
||||
av_write_trailer(oc);
|
||||
}
|
||||
|
||||
|
||||
if( img_convert_ctx )
|
||||
{
|
||||
sws_freeContext(img_convert_ctx);
|
||||
@@ -1272,7 +1272,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
|
||||
double fps, int width, int height, bool is_color )
|
||||
{
|
||||
icvInitFFMPEG_internal();
|
||||
|
||||
|
||||
CodecID codec_id = CODEC_ID_NONE;
|
||||
int err, codec_pix_fmt;
|
||||
double bitrate_scale = 1;
|
||||
@@ -1284,7 +1284,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
|
||||
return false;
|
||||
if(fps <= 0)
|
||||
return false;
|
||||
|
||||
|
||||
// we allow frames of odd width or height, but in this case we truncate
|
||||
// the rightmost column/the bottom row. Probably, this should be handled more elegantly,
|
||||
// but some internal functions inside FFMPEG swscale require even width/height.
|
||||
@@ -1363,7 +1363,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
|
||||
codec_pix_fmt = PIX_FMT_YUV420P;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);
|
||||
|
||||
// TODO -- safe to ignore output audio stream?
|
||||
@@ -1480,8 +1480,8 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
|
||||
err=avformat_write_header(oc, NULL);
|
||||
#else
|
||||
err=av_write_header( oc );
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
if(err < 0)
|
||||
{
|
||||
close();
|
||||
|
||||
@@ -283,7 +283,7 @@ void CvCapture_GStreamer::newPad(GstElement *uridecodebin,
|
||||
|
||||
|
||||
sinkpad = gst_element_get_static_pad (color, "sink");
|
||||
|
||||
|
||||
// printf("linking dynamic pad to colourconverter %p %p\n", uridecodebin, pad);
|
||||
|
||||
gst_pad_link (pad, sinkpad);
|
||||
@@ -357,13 +357,13 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
|
||||
if(manualpipeline) {
|
||||
GstIterator *it = gst_bin_iterate_sinks(GST_BIN(uridecodebin));
|
||||
if(gst_iterator_next(it, (gpointer *)&sink) != GST_ITERATOR_OK) {
|
||||
CV_ERROR(CV_StsError, "GStreamer: cannot find appsink in manual pipeline\n");
|
||||
return false;
|
||||
CV_ERROR(CV_StsError, "GStreamer: cannot find appsink in manual pipeline\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
pipeline = uridecodebin;
|
||||
pipeline = uridecodebin;
|
||||
} else {
|
||||
pipeline = gst_pipeline_new (NULL);
|
||||
pipeline = gst_pipeline_new (NULL);
|
||||
|
||||
color = gst_element_factory_make("ffmpegcolorspace", NULL);
|
||||
sink = gst_element_factory_make("appsink", NULL);
|
||||
@@ -381,16 +381,12 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
|
||||
gst_app_sink_set_max_buffers (GST_APP_SINK(sink), 1);
|
||||
gst_app_sink_set_drop (GST_APP_SINK(sink), stream);
|
||||
|
||||
{
|
||||
GstCaps* caps;
|
||||
caps = gst_caps_new_simple("video/x-raw-rgb",
|
||||
"red_mask", G_TYPE_INT, 0x0000FF,
|
||||
"green_mask", G_TYPE_INT, 0x00FF00,
|
||||
"blue_mask", G_TYPE_INT, 0xFF0000,
|
||||
NULL);
|
||||
gst_app_sink_set_caps(GST_APP_SINK(sink), caps);
|
||||
gst_app_sink_set_caps(GST_APP_SINK(sink), gst_caps_new_simple("video/x-raw-rgb",
|
||||
"red_mask", G_TYPE_INT, 0x0000FF,
|
||||
"green_mask", G_TYPE_INT, 0x00FF00,
|
||||
"blue_mask", G_TYPE_INT, 0xFF0000,
|
||||
NULL));
|
||||
gst_caps_unref(caps);
|
||||
}
|
||||
|
||||
if(gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_READY) ==
|
||||
GST_STATE_CHANGE_FAILURE) {
|
||||
|
||||
@@ -779,9 +779,9 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
return -1;
|
||||
} else {
|
||||
buffer_number--;
|
||||
fprintf (stderr, "Insufficient buffer memory on %s -- decreaseing buffers\n", deviceName);
|
||||
fprintf (stderr, "Insufficient buffer memory on %s -- decreaseing buffers\n", deviceName);
|
||||
|
||||
goto try_again;
|
||||
goto try_again;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -824,8 +824,8 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
if (capture->buffers[MAX_V4L_BUFFERS].start) {
|
||||
free(capture->buffers[MAX_V4L_BUFFERS].start);
|
||||
capture->buffers[MAX_V4L_BUFFERS].start = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
capture->buffers[MAX_V4L_BUFFERS].start = malloc(buf.length);
|
||||
capture->buffers[MAX_V4L_BUFFERS].length = buf.length;
|
||||
};
|
||||
@@ -1080,11 +1080,11 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) {
|
||||
|
||||
#ifdef USE_TEMP_BUFFER
|
||||
memcpy(capture->buffers[MAX_V4L_BUFFERS].start,
|
||||
capture->buffers[buf.index].start,
|
||||
capture->buffers[MAX_V4L_BUFFERS].length );
|
||||
capture->buffers[buf.index].start,
|
||||
capture->buffers[MAX_V4L_BUFFERS].length );
|
||||
capture->bufferIndex = MAX_V4L_BUFFERS;
|
||||
//printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n",
|
||||
// buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);
|
||||
// buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);
|
||||
#else
|
||||
capture->bufferIndex = buf.index;
|
||||
#endif
|
||||
@@ -1211,9 +1211,9 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) {
|
||||
capture->mmaps[capture->bufferIndex].format = capture->imageProperties.palette;
|
||||
|
||||
if (v4l1_ioctl (capture->deviceHandle, VIDIOCMCAPTURE,
|
||||
&capture->mmaps[capture->bufferIndex]) == -1) {
|
||||
/* capture is on the way, so just exit */
|
||||
return 1;
|
||||
&capture->mmaps[capture->bufferIndex]) == -1) {
|
||||
/* capture is on the way, so just exit */
|
||||
return 1;
|
||||
}
|
||||
|
||||
++capture->bufferIndex;
|
||||
@@ -1273,11 +1273,11 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
|
||||
if (capture->is_v4l2_device == 1)
|
||||
{
|
||||
|
||||
if(capture->buffers[capture->bufferIndex].start){
|
||||
memcpy((char *)capture->frame.imageData,
|
||||
(char *)capture->buffers[capture->bufferIndex].start,
|
||||
capture->frame.imageSize);
|
||||
}
|
||||
if(capture->buffers[capture->bufferIndex].start){
|
||||
memcpy((char *)capture->frame.imageData,
|
||||
(char *)capture->buffers[capture->bufferIndex].start,
|
||||
capture->frame.imageSize);
|
||||
}
|
||||
|
||||
} else
|
||||
#endif /* HAVE_CAMV4L2 */
|
||||
@@ -1353,7 +1353,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
sprintf(name, "<unknown property string>");
|
||||
capture->control.id = property_id;
|
||||
}
|
||||
|
||||
|
||||
if(v4l2_ioctl(capture->deviceHandle, VIDIOC_G_CTRL, &capture->control) == 0) {
|
||||
/* all went well */
|
||||
is_v4l2_device = 1;
|
||||
@@ -1519,7 +1519,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val
|
||||
|
||||
CLEAR (capture->control);
|
||||
CLEAR (capture->queryctrl);
|
||||
|
||||
|
||||
/* get current values */
|
||||
switch (property_id) {
|
||||
case CV_CAP_PROP_BRIGHTNESS:
|
||||
@@ -1688,8 +1688,8 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){
|
||||
if (xioctl(capture->deviceHandle, VIDIOC_STREAMOFF, &capture->type) < 0) {
|
||||
perror ("Unable to stop the stream.");
|
||||
}
|
||||
for (unsigned int n_buffers = 0; n_buffers < capture->req.count; ++n_buffers) {
|
||||
if (-1 == v4l2_munmap (capture->buffers[n_buffers].start, capture->buffers[n_buffers].length)) {
|
||||
for (unsigned int n_buffers2 = 0; n_buffers2 < capture->req.count; ++n_buffers2) {
|
||||
if (-1 == v4l2_munmap (capture->buffers[n_buffers2].start, capture->buffers[n_buffers2].length)) {
|
||||
perror ("munmap");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,20 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
|
||||
#ifndef i386
|
||||
# define i386 0
|
||||
#endif
|
||||
#ifndef __arm__
|
||||
# define __arm__ 0
|
||||
#endif
|
||||
#ifndef _ARC
|
||||
# define _ARC 0
|
||||
#endif
|
||||
#ifndef __APPLE__
|
||||
# define __APPLE__ 0
|
||||
#endif
|
||||
|
||||
#include "XnCppWrapper.h"
|
||||
|
||||
const std::string XMLConfig =
|
||||
@@ -85,12 +99,12 @@ const std::string XMLConfig =
|
||||
class ApproximateSyncGrabber
|
||||
{
|
||||
public:
|
||||
ApproximateSyncGrabber( xn::Context &context,
|
||||
xn::DepthGenerator &depthGenerator,
|
||||
xn::ImageGenerator &imageGenerator,
|
||||
int maxBufferSize, bool isCircleBuffer, int maxTimeDuration ) :
|
||||
context(context), depthGenerator(depthGenerator), imageGenerator(imageGenerator),
|
||||
maxBufferSize(maxBufferSize), isCircleBuffer(isCircleBuffer), maxTimeDuration(maxTimeDuration)
|
||||
ApproximateSyncGrabber( xn::Context &_context,
|
||||
xn::DepthGenerator &_depthGenerator,
|
||||
xn::ImageGenerator &_imageGenerator,
|
||||
int _maxBufferSize, bool _isCircleBuffer, int _maxTimeDuration ) :
|
||||
context(_context), depthGenerator(_depthGenerator), imageGenerator(_imageGenerator),
|
||||
maxBufferSize(_maxBufferSize), isCircleBuffer(_isCircleBuffer), maxTimeDuration(_maxTimeDuration)
|
||||
{
|
||||
task = 0;
|
||||
|
||||
@@ -165,10 +179,12 @@ private:
|
||||
class ApproximateSynchronizerBase
|
||||
{
|
||||
public:
|
||||
ApproximateSynchronizerBase( ApproximateSyncGrabber& approxSyncGrabber ) :
|
||||
approxSyncGrabber(approxSyncGrabber), isDepthFilled(false), isImageFilled(false)
|
||||
ApproximateSynchronizerBase( ApproximateSyncGrabber& _approxSyncGrabber ) :
|
||||
approxSyncGrabber(_approxSyncGrabber), isDepthFilled(false), isImageFilled(false)
|
||||
{}
|
||||
|
||||
virtual ~ApproximateSynchronizerBase() {}
|
||||
|
||||
virtual bool isSpinContinue() const = 0;
|
||||
virtual void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) = 0;
|
||||
virtual void pushImageMetaData( xn::ImageMetaData& imageMetaData ) = 0;
|
||||
@@ -183,8 +199,8 @@ private:
|
||||
if( status != XN_STATUS_OK )
|
||||
continue;
|
||||
|
||||
xn::DepthMetaData depth;
|
||||
xn::ImageMetaData image;
|
||||
//xn::DepthMetaData depth;
|
||||
//xn::ImageMetaData image;
|
||||
approxSyncGrabber.depthGenerator.GetMetaData(depth);
|
||||
approxSyncGrabber.imageGenerator.GetMetaData(image);
|
||||
|
||||
@@ -242,8 +258,8 @@ private:
|
||||
class ApproximateSynchronizer: public ApproximateSynchronizerBase
|
||||
{
|
||||
public:
|
||||
ApproximateSynchronizer( ApproximateSyncGrabber& approxSyncGrabber ) :
|
||||
ApproximateSynchronizerBase(approxSyncGrabber)
|
||||
ApproximateSynchronizer( ApproximateSyncGrabber& _approxSyncGrabber ) :
|
||||
ApproximateSynchronizerBase(_approxSyncGrabber)
|
||||
{}
|
||||
|
||||
virtual bool isSpinContinue() const
|
||||
@@ -410,7 +426,7 @@ class CvCapture_OpenNI : public CvCapture
|
||||
{
|
||||
public:
|
||||
enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 };
|
||||
|
||||
|
||||
static const int INVALID_PIXEL_VAL = 0;
|
||||
static const int INVALID_COORDINATE_VAL = 0;
|
||||
|
||||
@@ -445,6 +461,8 @@ protected:
|
||||
|
||||
static const int outputMapsTypesCount = 7;
|
||||
|
||||
static XnMapOutputMode defaultMapOutputMode();
|
||||
|
||||
IplImage* retrieveDepthMap();
|
||||
IplImage* retrievePointCloudMap();
|
||||
IplImage* retrieveDisparityMap();
|
||||
@@ -508,7 +526,7 @@ bool CvCapture_OpenNI::isOpened() const
|
||||
return isContextOpened;
|
||||
}
|
||||
|
||||
XnMapOutputMode defaultMapOutputMode()
|
||||
XnMapOutputMode CvCapture_OpenNI::defaultMapOutputMode()
|
||||
{
|
||||
XnMapOutputMode mode;
|
||||
mode.nXRes = XN_VGA_X_RES;
|
||||
@@ -517,17 +535,16 @@ XnMapOutputMode defaultMapOutputMode()
|
||||
return mode;
|
||||
}
|
||||
|
||||
|
||||
CvCapture_OpenNI::CvCapture_OpenNI( int index )
|
||||
{
|
||||
int deviceType = DEVICE_DEFAULT;
|
||||
XnStatus status;
|
||||
|
||||
|
||||
isContextOpened = false;
|
||||
maxBufferSize = DEFAULT_MAX_BUFFER_SIZE;
|
||||
isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER;
|
||||
maxTimeDuration = DEFAULT_MAX_TIME_DURATION;
|
||||
|
||||
|
||||
if( index >= 10 )
|
||||
{
|
||||
deviceType = index / 10;
|
||||
@@ -1201,7 +1218,7 @@ IplImage* CvCapture_OpenNI::retrievePointCloudMap()
|
||||
return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
|
||||
}
|
||||
|
||||
void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
|
||||
static void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
|
||||
XnUInt64 noSampleValue, XnUInt64 shadowValue )
|
||||
{
|
||||
cv::Mat depth;
|
||||
|
||||
@@ -1736,7 +1736,7 @@ mjpeg_to_rgb24 (int width, int height,
|
||||
*
|
||||
*/
|
||||
|
||||
void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
|
||||
static void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
|
||||
{
|
||||
long int i;
|
||||
unsigned char *rawpt, *scanpt;
|
||||
@@ -1814,7 +1814,7 @@ void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned c
|
||||
// at least for 046d:092f Logitech, Inc. QuickCam Express Plus to work
|
||||
//see: http://www.siliconimaging.com/RGB%20Bayer.htm
|
||||
//and 4.6 at http://tldp.org/HOWTO/html_single/libdc1394-HOWTO/
|
||||
void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
|
||||
static void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
|
||||
{
|
||||
long int i;
|
||||
unsigned char *rawpt, *scanpt;
|
||||
@@ -1921,7 +1921,7 @@ static int init_done = 0;
|
||||
present at the MSB of byte x.
|
||||
|
||||
*/
|
||||
void sonix_decompress_init(void)
|
||||
static void sonix_decompress_init(void)
|
||||
{
|
||||
int i;
|
||||
int is_abs, val, len;
|
||||
@@ -1999,7 +1999,7 @@ void sonix_decompress_init(void)
|
||||
Returns <0 if operation failed.
|
||||
|
||||
*/
|
||||
int sonix_decompress(int width, int height, unsigned char *inp, unsigned char *outp)
|
||||
static int sonix_decompress(int width, int height, unsigned char *inp, unsigned char *outp)
|
||||
{
|
||||
int row, col;
|
||||
int val;
|
||||
@@ -2769,9 +2769,9 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){
|
||||
perror ("Unable to stop the stream.");
|
||||
}
|
||||
|
||||
for (unsigned int n_buffers = 0; n_buffers < capture->req.count; ++n_buffers)
|
||||
for (unsigned int n_buffers_ = 0; n_buffers_ < capture->req.count; ++n_buffers_)
|
||||
{
|
||||
if (-1 == munmap (capture->buffers[n_buffers].start, capture->buffers[n_buffers].length)) {
|
||||
if (-1 == munmap (capture->buffers[n_buffers_].start, capture->buffers[n_buffers_].length)) {
|
||||
perror ("munmap");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,16 +43,12 @@
|
||||
|
||||
#include <vfw.h>
|
||||
|
||||
#if _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4711 )
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define WM_CAP_FIRSTA (WM_USER)
|
||||
#define capSendMessage(hwnd,m,w,l) (IsWindow(hwnd)?SendMessage(hwnd,m,w,l):0)
|
||||
#endif
|
||||
|
||||
#if defined _M_X64
|
||||
#if defined _M_X64 && defined _MSC_VER
|
||||
#pragma optimize("",off)
|
||||
#pragma warning(disable: 4748)
|
||||
#endif
|
||||
@@ -177,13 +173,13 @@ bool CvCaptureAVI_VFW::open( const char* filename )
|
||||
{
|
||||
size.width = aviinfo.rcFrame.right - aviinfo.rcFrame.left;
|
||||
size.height = aviinfo.rcFrame.bottom - aviinfo.rcFrame.top;
|
||||
BITMAPINFOHEADER bmih = icvBitmapHeader( size.width, size.height, 24 );
|
||||
BITMAPINFOHEADER bmihdr = icvBitmapHeader( size.width, size.height, 24 );
|
||||
|
||||
film_range.start_index = (int)aviinfo.dwStart;
|
||||
film_range.end_index = film_range.start_index + (int)aviinfo.dwLength;
|
||||
fps = (double)aviinfo.dwRate/aviinfo.dwScale;
|
||||
pos = film_range.start_index;
|
||||
getframe = AVIStreamGetFrameOpen( avistream, &bmih );
|
||||
getframe = AVIStreamGetFrameOpen( avistream, &bmihdr );
|
||||
if( getframe != 0 )
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -45,7 +45,8 @@
|
||||
#ifdef HAVE_JPEG
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable: 4324 4611)
|
||||
//interaction between '_setjmp' and C++ object destruction is non-portable
|
||||
#pragma warning(disable: 4611)
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -69,11 +70,18 @@ extern "C" {
|
||||
namespace cv
|
||||
{
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(push)
|
||||
# pragma warning(disable:4324) //structure was padded due to __declspec(align())
|
||||
#endif
|
||||
struct JpegErrorMgr
|
||||
{
|
||||
struct jpeg_error_mgr pub;
|
||||
jmp_buf setjmp_buffer;
|
||||
};
|
||||
#ifdef _MSC_VER
|
||||
# pragma warning(pop)
|
||||
#endif
|
||||
|
||||
struct JpegSource
|
||||
{
|
||||
@@ -126,8 +134,7 @@ skip_input_data(j_decompress_ptr cinfo, long num_bytes)
|
||||
}
|
||||
|
||||
|
||||
GLOBAL(void)
|
||||
jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
|
||||
static void jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
|
||||
{
|
||||
cinfo->src = &source->pub;
|
||||
|
||||
@@ -498,8 +505,7 @@ empty_output_buffer (j_compress_ptr cinfo)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
GLOBAL(void)
|
||||
jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
|
||||
static void jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
|
||||
{
|
||||
cinfo->dest = &destination->pub;
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
#include "grfmt_png.hpp"
|
||||
|
||||
#if defined _MSC_VER && _MSC_VER >= 1200
|
||||
// disable warnings related to _setjmp
|
||||
// interaction between '_setjmp' and C++ object destruction is non-portable
|
||||
#pragma warning( disable: 4611 )
|
||||
#endif
|
||||
|
||||
@@ -157,22 +157,22 @@ bool PngDecoder::readHeader()
|
||||
|
||||
if( !m_buf.empty() || m_f )
|
||||
{
|
||||
png_uint_32 width, height;
|
||||
png_uint_32 wdth, hght;
|
||||
int bit_depth, color_type;
|
||||
|
||||
png_read_info( png_ptr, info_ptr );
|
||||
|
||||
png_get_IHDR( png_ptr, info_ptr, &width, &height,
|
||||
png_get_IHDR( png_ptr, info_ptr, &wdth, &hght,
|
||||
&bit_depth, &color_type, 0, 0, 0 );
|
||||
|
||||
m_width = (int)width;
|
||||
m_height = (int)height;
|
||||
m_width = (int)wdth;
|
||||
m_height = (int)hght;
|
||||
m_color_type = color_type;
|
||||
m_bit_depth = bit_depth;
|
||||
|
||||
if( bit_depth <= 8 || bit_depth == 16 )
|
||||
{
|
||||
switch(color_type)
|
||||
switch(color_type)
|
||||
{
|
||||
case PNG_COLOR_TYPE_RGB:
|
||||
case PNG_COLOR_TYPE_PALETTE:
|
||||
@@ -224,7 +224,7 @@ bool PngDecoder::readData( Mat& img )
|
||||
else if( !isBigEndian() )
|
||||
png_set_swap( png_ptr );
|
||||
|
||||
if(img.channels() < 4)
|
||||
if(img.channels() < 4)
|
||||
{
|
||||
/* observation: png_read_image() writes 400 bytes beyond
|
||||
* end of data when reading a 400x118 color png
|
||||
@@ -247,7 +247,7 @@ bool PngDecoder::readData( Mat& img )
|
||||
#else
|
||||
png_set_gray_1_2_4_to_8( png_ptr );
|
||||
#endif
|
||||
|
||||
|
||||
if( CV_MAT_CN(m_type) > 1 && color )
|
||||
png_set_bgr( png_ptr ); // convert RGB to BGR
|
||||
else if( color )
|
||||
@@ -330,7 +330,7 @@ bool PngEncoder::write( const Mat& img, const vector<int>& params )
|
||||
if( params[i] == CV_IMWRITE_PNG_STRATEGY )
|
||||
{
|
||||
compression_strategy = params[i+1];
|
||||
compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
|
||||
compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -115,19 +115,19 @@ bool TiffDecoder::readHeader()
|
||||
|
||||
if( tif )
|
||||
{
|
||||
int width = 0, height = 0, photometric = 0;
|
||||
int wdth = 0, hght = 0, photometric = 0;
|
||||
m_tif = tif;
|
||||
|
||||
if( TIFFGetField( tif, TIFFTAG_IMAGEWIDTH, &width ) &&
|
||||
TIFFGetField( tif, TIFFTAG_IMAGELENGTH, &height ) &&
|
||||
if( TIFFGetField( tif, TIFFTAG_IMAGEWIDTH, &wdth ) &&
|
||||
TIFFGetField( tif, TIFFTAG_IMAGELENGTH, &hght ) &&
|
||||
TIFFGetField( tif, TIFFTAG_PHOTOMETRIC, &photometric ))
|
||||
{
|
||||
int bpp=8, ncn = photometric > 1 ? 3 : 1;
|
||||
TIFFGetField( tif, TIFFTAG_BITSPERSAMPLE, &bpp );
|
||||
TIFFGetField( tif, TIFFTAG_SAMPLESPERPIXEL, &ncn );
|
||||
|
||||
m_width = width;
|
||||
m_height = height;
|
||||
|
||||
m_width = wdth;
|
||||
m_height = hght;
|
||||
if( bpp > 8 &&
|
||||
((photometric != 2 && photometric != 1) ||
|
||||
(ncn != 1 && ncn != 3 && ncn != 4)))
|
||||
@@ -169,7 +169,7 @@ bool TiffDecoder::readData( Mat& img )
|
||||
bool color = img.channels() > 1;
|
||||
uchar* data = img.data;
|
||||
int step = (int)img.step;
|
||||
|
||||
|
||||
if( img.depth() != CV_8U && img.depth() != CV_16U && img.depth() != CV_32F && img.depth() != CV_64F )
|
||||
return false;
|
||||
|
||||
@@ -422,9 +422,9 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector<int>& /*params*/)
|
||||
default:
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const int bitsPerByte = 8;
|
||||
size_t fileStep = (width * channels * bitsPerChannel) / bitsPerByte;
|
||||
int rowsPerStrip = (int)((1 << 13)/fileStep);
|
||||
@@ -443,7 +443,7 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector<int>& /*params*/)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// defaults for now, maybe base them on params in the future
|
||||
int compression = COMPRESSION_LZW;
|
||||
int predictor = PREDICTOR_HORIZONTAL;
|
||||
@@ -516,7 +516,7 @@ bool TiffEncoder::writeLibTiff( const Mat& img, const vector<int>& /*params*/)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TIFFClose(pTiffHandle);
|
||||
return true;
|
||||
}
|
||||
@@ -546,7 +546,7 @@ bool TiffEncoder::write( const Mat& img, const vector<int>& /*params*/)
|
||||
if( !strm.open(*m_buf) )
|
||||
return false;
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
#ifdef HAVE_TIFF
|
||||
return writeLibTiff(img, params);
|
||||
|
||||
@@ -57,7 +57,7 @@ namespace cv
|
||||
static vector<ImageDecoder> decoders;
|
||||
static vector<ImageEncoder> encoders;
|
||||
|
||||
ImageDecoder findDecoder( const string& filename )
|
||||
static ImageDecoder findDecoder( const string& filename )
|
||||
{
|
||||
size_t i, maxlen = 0;
|
||||
for( i = 0; i < decoders.size(); i++ )
|
||||
@@ -83,7 +83,7 @@ ImageDecoder findDecoder( const string& filename )
|
||||
return ImageDecoder();
|
||||
}
|
||||
|
||||
ImageDecoder findDecoder( const Mat& buf )
|
||||
static ImageDecoder findDecoder( const Mat& buf )
|
||||
{
|
||||
size_t i, maxlen = 0;
|
||||
|
||||
@@ -110,7 +110,7 @@ ImageDecoder findDecoder( const Mat& buf )
|
||||
return ImageDecoder();
|
||||
}
|
||||
|
||||
ImageEncoder findEncoder( const string& _ext )
|
||||
static ImageEncoder findEncoder( const string& _ext )
|
||||
{
|
||||
if( _ext.size() <= 1 )
|
||||
return ImageEncoder();
|
||||
@@ -395,7 +395,7 @@ Mat imdecode( InputArray _buf, int flags )
|
||||
imdecode_( buf, flags, LOAD_MAT, &img );
|
||||
return img;
|
||||
}
|
||||
|
||||
|
||||
bool imencode( const string& ext, InputArray _image,
|
||||
vector<uchar>& buf, const vector<int>& params )
|
||||
{
|
||||
|
||||
@@ -42,10 +42,6 @@
|
||||
#ifndef __HIGHGUI_H_
|
||||
#define __HIGHGUI_H_
|
||||
|
||||
#if _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4251 )
|
||||
#endif
|
||||
|
||||
#include "cvconfig.h"
|
||||
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
@@ -724,10 +724,10 @@ CV_IMPL void cvSaveWindowParameters(const char* name)
|
||||
CV_NO_GUI_ERROR("cvSaveWindowParameters");
|
||||
}
|
||||
|
||||
CV_IMPL void cvLoadWindowParameterss(const char* name)
|
||||
{
|
||||
CV_NO_GUI_ERROR("cvLoadWindowParameters");
|
||||
}
|
||||
// CV_IMPL void cvLoadWindowParameterss(const char* name)
|
||||
// {
|
||||
// CV_NO_GUI_ERROR("cvLoadWindowParameters");
|
||||
// }
|
||||
|
||||
CV_IMPL int cvCreateButton(const char*, void (*)(int, void*), void*, int, int)
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -55,16 +55,6 @@
|
||||
#include <GL/glu.h>
|
||||
#endif
|
||||
|
||||
/*#if _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4505 )
|
||||
#pragma comment(lib,"gtk-win32-2.0.lib")
|
||||
#pragma comment(lib,"glib-2.0.lib")
|
||||
#pragma comment(lib,"gobject-2.0.lib")
|
||||
#pragma comment(lib,"gdk-win32-2.0.lib")
|
||||
#pragma comment(lib,"gdk_pixbuf-2.0.lib")
|
||||
#endif*/
|
||||
|
||||
|
||||
// TODO Fix the initial window size when flags=0. Right now the initial window is by default
|
||||
// 320x240 size. A better default would be actual size of the image. Problem
|
||||
// is determining desired window size with trackbars while still allowing resizing.
|
||||
@@ -1372,17 +1362,17 @@ cvDestroyAllWindows( void )
|
||||
CV_UNLOCK_MUTEX();
|
||||
}
|
||||
|
||||
CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
|
||||
CvSize window_size;
|
||||
GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
|
||||
gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
|
||||
&window_size.width, &window_size.height );
|
||||
// CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
|
||||
// CvSize window_size;
|
||||
// GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
|
||||
// gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
|
||||
// &window_size.width, &window_size.height );
|
||||
|
||||
window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
|
||||
window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
|
||||
// window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
|
||||
// window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
|
||||
|
||||
return window_size;
|
||||
}
|
||||
// return window_size;
|
||||
// }
|
||||
|
||||
CV_IMPL void
|
||||
cvShowImage( const char* name, const CvArr* arr )
|
||||
|
||||
@@ -43,10 +43,6 @@
|
||||
|
||||
#if defined WIN32 || defined _WIN32
|
||||
|
||||
#if _MSC_VER >= 1200
|
||||
#pragma warning( disable: 4710 )
|
||||
#endif
|
||||
|
||||
#define COMPILE_MULTIMON_STUBS // Required for multi-monitor support
|
||||
#ifndef _MULTIMON_USE_SECURE_CRT
|
||||
# define _MULTIMON_USE_SECURE_CRT 0 // some MinGW platforms have no strncpy_s
|
||||
@@ -61,6 +57,10 @@
|
||||
#ifndef __inout
|
||||
# define __inout
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
#endif
|
||||
#include <MultiMon.h>
|
||||
|
||||
#include <commctrl.h>
|
||||
@@ -166,7 +166,7 @@ typedef struct CvWindow
|
||||
HGDIOBJ image;
|
||||
int last_key;
|
||||
int flags;
|
||||
int status;//0 normal, 1 fullscreen (YV)
|
||||
int status;//0 normal, 1 fullscreen (YV)
|
||||
|
||||
CvMouseCallback on_mouse;
|
||||
void* on_mouse_param;
|
||||
@@ -360,7 +360,7 @@ icvSaveWindowPos( const char* name, CvRect rect )
|
||||
char rootKey[1024];
|
||||
strcpy( szKey, icvWindowPosRootKey );
|
||||
strcat( szKey, name );
|
||||
|
||||
|
||||
if( RegOpenKeyEx( HKEY_CURRENT_USER,szKey,0,KEY_READ,&hkey) != ERROR_SUCCESS )
|
||||
{
|
||||
HKEY hroot;
|
||||
@@ -405,7 +405,7 @@ icvSaveWindowPos( const char* name, CvRect rect )
|
||||
if( RegOpenKeyEx( HKEY_CURRENT_USER,szKey,0,KEY_WRITE,&hkey) != ERROR_SUCCESS )
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
RegSetValueEx(hkey, "Left", 0, REG_DWORD, (BYTE*)&rect.x, sizeof(rect.x));
|
||||
RegSetValueEx(hkey, "Top", 0, REG_DWORD, (BYTE*)&rect.y, sizeof(rect.y));
|
||||
RegSetValueEx(hkey, "Width", 0, REG_DWORD, (BYTE*)&rect.width, sizeof(rect.width));
|
||||
@@ -415,9 +415,9 @@ icvSaveWindowPos( const char* name, CvRect rect )
|
||||
|
||||
double cvGetModeWindow_W32(const char* name)//YV
|
||||
{
|
||||
double result = -1;
|
||||
|
||||
CV_FUNCNAME( "cvGetModeWindow_W32" );
|
||||
double result = -1;
|
||||
|
||||
CV_FUNCNAME( "cvGetModeWindow_W32" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
@@ -429,75 +429,75 @@ double cvGetModeWindow_W32(const char* name)//YV
|
||||
window = icvFindWindowByName( name );
|
||||
if (!window)
|
||||
EXIT; // keep silence here
|
||||
|
||||
|
||||
result = window->status;
|
||||
|
||||
|
||||
__END__;
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
void cvSetModeWindow_W32( const char* name, double prop_value)//Yannick Verdie
|
||||
{
|
||||
CV_FUNCNAME( "cvSetModeWindow_W32" );
|
||||
CV_FUNCNAME( "cvSetModeWindow_W32" );
|
||||
|
||||
__BEGIN__;
|
||||
__BEGIN__;
|
||||
|
||||
CvWindow* window;
|
||||
CvWindow* window;
|
||||
|
||||
if(!name)
|
||||
CV_ERROR( CV_StsNullPtr, "NULL name string" );
|
||||
if(!name)
|
||||
CV_ERROR( CV_StsNullPtr, "NULL name string" );
|
||||
|
||||
window = icvFindWindowByName( name );
|
||||
if( !window )
|
||||
CV_ERROR( CV_StsNullPtr, "NULL window" );
|
||||
window = icvFindWindowByName( name );
|
||||
if( !window )
|
||||
CV_ERROR( CV_StsNullPtr, "NULL window" );
|
||||
|
||||
if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set
|
||||
EXIT;
|
||||
if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set
|
||||
EXIT;
|
||||
|
||||
{
|
||||
DWORD dwStyle = (DWORD)GetWindowLongPtr(window->frame, GWL_STYLE);
|
||||
CvRect position;
|
||||
{
|
||||
DWORD dwStyle = (DWORD)GetWindowLongPtr(window->frame, GWL_STYLE);
|
||||
CvRect position;
|
||||
|
||||
if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL)
|
||||
{
|
||||
icvLoadWindowPos(window->name,position );
|
||||
SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle | WS_CAPTION | WS_THICKFRAME);
|
||||
if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL)
|
||||
{
|
||||
icvLoadWindowPos(window->name,position );
|
||||
SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle | WS_CAPTION | WS_THICKFRAME);
|
||||
|
||||
SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
|
||||
window->status=CV_WINDOW_NORMAL;
|
||||
SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
|
||||
window->status=CV_WINDOW_NORMAL;
|
||||
|
||||
EXIT;
|
||||
}
|
||||
EXIT;
|
||||
}
|
||||
|
||||
if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN)
|
||||
{
|
||||
//save dimension
|
||||
RECT rect;
|
||||
GetWindowRect(window->frame, &rect);
|
||||
CvRect RectCV = cvRect(rect.left, rect.top,rect.right - rect.left, rect.bottom - rect.top);
|
||||
icvSaveWindowPos(window->name,RectCV );
|
||||
if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN)
|
||||
{
|
||||
//save dimension
|
||||
RECT rect;
|
||||
GetWindowRect(window->frame, &rect);
|
||||
CvRect RectCV = cvRect(rect.left, rect.top,rect.right - rect.left, rect.bottom - rect.top);
|
||||
icvSaveWindowPos(window->name,RectCV );
|
||||
|
||||
//Look at coordinate for fullscreen
|
||||
HMONITOR hMonitor;
|
||||
MONITORINFO mi;
|
||||
hMonitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONEAREST);
|
||||
//Look at coordinate for fullscreen
|
||||
HMONITOR hMonitor;
|
||||
MONITORINFO mi;
|
||||
hMonitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONEAREST);
|
||||
|
||||
mi.cbSize = sizeof(mi);
|
||||
GetMonitorInfo(hMonitor, &mi);
|
||||
mi.cbSize = sizeof(mi);
|
||||
GetMonitorInfo(hMonitor, &mi);
|
||||
|
||||
//fullscreen
|
||||
position.x=mi.rcMonitor.left;position.y=mi.rcMonitor.top;
|
||||
position.width=mi.rcMonitor.right - mi.rcMonitor.left;position.height=mi.rcMonitor.bottom - mi.rcMonitor.top;
|
||||
SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle & ~WS_CAPTION & ~WS_THICKFRAME);
|
||||
//fullscreen
|
||||
position.x=mi.rcMonitor.left;position.y=mi.rcMonitor.top;
|
||||
position.width=mi.rcMonitor.right - mi.rcMonitor.left;position.height=mi.rcMonitor.bottom - mi.rcMonitor.top;
|
||||
SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle & ~WS_CAPTION & ~WS_THICKFRAME);
|
||||
|
||||
SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
|
||||
window->status=CV_WINDOW_FULLSCREEN;
|
||||
SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
|
||||
window->status=CV_WINDOW_FULLSCREEN;
|
||||
|
||||
EXIT;
|
||||
}
|
||||
}
|
||||
EXIT;
|
||||
}
|
||||
}
|
||||
|
||||
__END__;
|
||||
__END__;
|
||||
}
|
||||
|
||||
double cvGetPropWindowAutoSize_W32(const char* name)
|
||||
@@ -526,9 +526,9 @@ double cvGetPropWindowAutoSize_W32(const char* name)
|
||||
|
||||
double cvGetRatioWindow_W32(const char* name)
|
||||
{
|
||||
double result = -1;
|
||||
|
||||
CV_FUNCNAME( "cvGetRatioWindow_W32" );
|
||||
double result = -1;
|
||||
|
||||
CV_FUNCNAME( "cvGetRatioWindow_W32" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
@@ -540,20 +540,20 @@ double cvGetRatioWindow_W32(const char* name)
|
||||
window = icvFindWindowByName( name );
|
||||
if (!window)
|
||||
EXIT; // keep silence here
|
||||
|
||||
|
||||
result = static_cast<double>(window->width) / window->height;
|
||||
|
||||
|
||||
__END__;
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
double cvGetOpenGlProp_W32(const char* name)
|
||||
{
|
||||
double result = -1;
|
||||
double result = -1;
|
||||
|
||||
#ifdef HAVE_OPENGL
|
||||
CV_FUNCNAME( "cvGetOpenGlProp_W32" );
|
||||
#ifdef HAVE_OPENGL
|
||||
CV_FUNCNAME( "cvGetOpenGlProp_W32" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
@@ -565,14 +565,14 @@ double cvGetOpenGlProp_W32(const char* name)
|
||||
window = icvFindWindowByName( name );
|
||||
if (!window)
|
||||
EXIT; // keep silence here
|
||||
|
||||
|
||||
result = window->useGl;
|
||||
|
||||
|
||||
__END__;
|
||||
#endif
|
||||
(void)name;
|
||||
(void)name;
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -626,7 +626,7 @@ namespace
|
||||
void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const;
|
||||
|
||||
bool isGlContextInitialized() const;
|
||||
|
||||
|
||||
PFNGLGENBUFFERSPROC glGenBuffersExt;
|
||||
PFNGLDELETEBUFFERSPROC glDeleteBuffersExt;
|
||||
|
||||
@@ -787,8 +787,8 @@ namespace
|
||||
weight, // font weight
|
||||
italic ? TRUE : FALSE, // Italic
|
||||
underline ? TRUE : FALSE, // Underline
|
||||
FALSE, // StrikeOut
|
||||
ANSI_CHARSET, // CharSet
|
||||
FALSE, // StrikeOut
|
||||
ANSI_CHARSET, // CharSet
|
||||
OUT_TT_PRECIS, // OutPrecision
|
||||
CLIP_DEFAULT_PRECIS, // ClipPrecision
|
||||
ANTIALIASED_QUALITY, // Quality
|
||||
@@ -870,12 +870,12 @@ namespace
|
||||
0, // Shift Bit Ignored
|
||||
0, // No Accumulation Buffer
|
||||
0, 0, 0, 0, // Accumulation Bits Ignored
|
||||
32, // 32 Bit Z-Buffer (Depth Buffer)
|
||||
32, // 32 Bit Z-Buffer (Depth Buffer)
|
||||
0, // No Stencil Buffer
|
||||
0, // No Auxiliary Buffer
|
||||
PFD_MAIN_PLANE, // Main Drawing Layer
|
||||
0, // Reserved
|
||||
0, 0, 0 // Layer Masks Ignored
|
||||
0, 0, 0 // Layer Masks Ignored
|
||||
};
|
||||
|
||||
hGLDC = GetDC(hWnd);
|
||||
@@ -903,7 +903,7 @@ namespace
|
||||
|
||||
void releaseGlContext(CvWindow* window)
|
||||
{
|
||||
CV_FUNCNAME( "releaseGlContext" );
|
||||
//CV_FUNCNAME( "releaseGlContext" );
|
||||
|
||||
__BEGIN__;
|
||||
|
||||
@@ -915,7 +915,7 @@ namespace
|
||||
window->hGLRC = NULL;
|
||||
}
|
||||
|
||||
if (window->dc)
|
||||
if (window->dc)
|
||||
{
|
||||
ReleaseDC(window->hwnd, window->dc);
|
||||
window->dc = NULL;
|
||||
@@ -935,7 +935,7 @@ namespace
|
||||
if (!wglMakeCurrent(window->dc, window->hGLRC))
|
||||
CV_ERROR( CV_OpenGlApiCallError, "Can't Activate The GL Rendering Context" );
|
||||
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
|
||||
if (window->glDrawCallback)
|
||||
window->glDrawCallback(window->glDrawData);
|
||||
@@ -1009,7 +1009,7 @@ CV_IMPL int cvNamedWindow( const char* name, int flags )
|
||||
|
||||
ShowWindow(mainhWnd, SW_SHOW);
|
||||
|
||||
//YV- remove one border by changing the style
|
||||
//YV- remove one border by changing the style
|
||||
hWnd = CreateWindow("HighGUI class", "", (defStyle & ~WS_SIZEBOX) | WS_CHILD, CW_USEDEFAULT, 0, rect.width, rect.height, mainhWnd, 0, hg_hinstance, 0);
|
||||
if( !hWnd )
|
||||
CV_ERROR( CV_StsError, "Frame window can not be created" );
|
||||
@@ -1400,16 +1400,16 @@ cvShowImage( const char* name, const CvArr* arr )
|
||||
CV_ERROR( CV_StsNullPtr, "NULL name" );
|
||||
|
||||
window = icvFindWindowByName(name);
|
||||
if(!window)
|
||||
{
|
||||
if(!window)
|
||||
{
|
||||
#ifndef HAVE_OPENGL
|
||||
cvNamedWindow(name, CV_WINDOW_AUTOSIZE);
|
||||
cvNamedWindow(name, CV_WINDOW_AUTOSIZE);
|
||||
#else
|
||||
cvNamedWindow(name, CV_WINDOW_AUTOSIZE | CV_WINDOW_OPENGL);
|
||||
cvNamedWindow(name, CV_WINDOW_AUTOSIZE | CV_WINDOW_OPENGL);
|
||||
#endif
|
||||
|
||||
window = icvFindWindowByName(name);
|
||||
}
|
||||
window = icvFindWindowByName(name);
|
||||
}
|
||||
|
||||
if( !window || !arr )
|
||||
EXIT; // keep silence here.
|
||||
@@ -1467,6 +1467,7 @@ cvShowImage( const char* name, const CvArr* arr )
|
||||
__END__;
|
||||
}
|
||||
|
||||
#if 0
|
||||
CV_IMPL void
|
||||
cvShowImageHWND(HWND w_hWnd, const CvArr* arr)
|
||||
{
|
||||
@@ -1494,7 +1495,7 @@ cvShowImageHWND(HWND w_hWnd, const CvArr* arr)
|
||||
if( CV_IS_IMAGE_HDR( arr ) )
|
||||
origin = ((IplImage*)arr)->origin;
|
||||
|
||||
CV_CALL( image = cvGetMat( arr, &stub ) );
|
||||
CV_CALL( image = cvGetMat( arr, &stub ) );
|
||||
|
||||
if ( hdc )
|
||||
{
|
||||
@@ -1512,7 +1513,7 @@ cvShowImageHWND(HWND w_hWnd, const CvArr* arr)
|
||||
dst_ptr = bmp.bmBits;
|
||||
}
|
||||
|
||||
if( size.cx != image->width || size.cy != image->height || channels != channels0 )
|
||||
if( size.cx != image->width || size.cy != image->height || channels != channels0 )
|
||||
{
|
||||
changed_size = true;
|
||||
|
||||
@@ -1544,6 +1545,7 @@ cvShowImageHWND(HWND w_hWnd, const CvArr* arr)
|
||||
|
||||
__END__;
|
||||
}
|
||||
#endif
|
||||
|
||||
CV_IMPL void cvResizeWindow(const char* name, int width, int height )
|
||||
{
|
||||
@@ -1666,7 +1668,7 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam )
|
||||
{
|
||||
// Snap window to screen edges with multi-monitor support. // Adi Shavit
|
||||
LPWINDOWPOS pos = (LPWINDOWPOS)lParam;
|
||||
|
||||
|
||||
RECT rect;
|
||||
GetWindowRect(window->frame, &rect);
|
||||
|
||||
@@ -1679,15 +1681,15 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam )
|
||||
|
||||
const int SNAP_DISTANCE = 15;
|
||||
|
||||
if (abs(pos->x - mi.rcMonitor.left) <= SNAP_DISTANCE)
|
||||
if (abs(pos->x - mi.rcMonitor.left) <= SNAP_DISTANCE)
|
||||
pos->x = mi.rcMonitor.left; // snap to left edge
|
||||
else
|
||||
else
|
||||
if (abs(pos->x + pos->cx - mi.rcMonitor.right) <= SNAP_DISTANCE)
|
||||
pos->x = mi.rcMonitor.right - pos->cx; // snap to right edge
|
||||
|
||||
if (abs(pos->y - mi.rcMonitor.top) <= SNAP_DISTANCE)
|
||||
pos->y = mi.rcMonitor.top; // snap to top edge
|
||||
else
|
||||
else
|
||||
if (abs(pos->y + pos->cy - mi.rcMonitor.bottom) <= SNAP_DISTANCE)
|
||||
pos->y = mi.rcMonitor.bottom - pos->cy; // snap to bottom edge
|
||||
}
|
||||
@@ -1848,9 +1850,9 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM
|
||||
EndPaint(hwnd, &paint);
|
||||
}
|
||||
#ifdef HAVE_OPENGL
|
||||
else if(window->useGl)
|
||||
else if(window->useGl)
|
||||
{
|
||||
drawGl(window);
|
||||
drawGl(window);
|
||||
return DefWindowProc(hwnd, uMsg, wParam, lParam);
|
||||
}
|
||||
#endif
|
||||
@@ -1901,18 +1903,18 @@ static LRESULT CALLBACK WindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM
|
||||
if( hg_on_preprocess )
|
||||
{
|
||||
int was_processed = 0;
|
||||
int ret = hg_on_preprocess(hwnd, uMsg, wParam, lParam, &was_processed);
|
||||
int rethg = hg_on_preprocess(hwnd, uMsg, wParam, lParam, &was_processed);
|
||||
if( was_processed )
|
||||
return ret;
|
||||
return rethg;
|
||||
}
|
||||
ret = HighGUIProc(hwnd, uMsg, wParam, lParam);
|
||||
|
||||
if(hg_on_postprocess)
|
||||
{
|
||||
int was_processed = 0;
|
||||
int ret = hg_on_postprocess(hwnd, uMsg, wParam, lParam, &was_processed);
|
||||
int rethg = hg_on_postprocess(hwnd, uMsg, wParam, lParam, &was_processed);
|
||||
if( was_processed )
|
||||
return ret;
|
||||
return rethg;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -60,9 +60,9 @@ protected:
|
||||
void CV_DrawingTest::run( int )
|
||||
{
|
||||
Mat testImg, valImg;
|
||||
const string name = "drawing/image.jpg";
|
||||
const string fname = "drawing/image.jpg";
|
||||
string path = ts->get_data_path(), filename;
|
||||
filename = path + name;
|
||||
filename = path + fname;
|
||||
|
||||
draw( testImg );
|
||||
|
||||
@@ -415,30 +415,30 @@ class CV_FillConvexPolyTest : public cvtest::BaseTest
|
||||
{
|
||||
public:
|
||||
CV_FillConvexPolyTest() {}
|
||||
~CV_FillConvexPolyTest() {}
|
||||
~CV_FillConvexPolyTest() {}
|
||||
protected:
|
||||
void run(int)
|
||||
{
|
||||
vector<Point> line1;
|
||||
vector<Point> line2;
|
||||
|
||||
|
||||
line1.push_back(Point(1, 1));
|
||||
line1.push_back(Point(5, 1));
|
||||
line1.push_back(Point(5, 8));
|
||||
line1.push_back(Point(1, 8));
|
||||
|
||||
|
||||
line2.push_back(Point(2, 2));
|
||||
line2.push_back(Point(10, 2));
|
||||
line2.push_back(Point(10, 16));
|
||||
line2.push_back(Point(2, 16));
|
||||
|
||||
|
||||
Mat gray0(10,10,CV_8U, Scalar(0));
|
||||
fillConvexPoly(gray0, line1, Scalar(255), 8, 0);
|
||||
int nz1 = countNonZero(gray0);
|
||||
|
||||
|
||||
fillConvexPoly(gray0, line2, Scalar(0), 8, 1);
|
||||
int nz2 = countNonZero(gray0)/255;
|
||||
|
||||
|
||||
CV_Assert( nz1 == 40 && nz2 == 0 );
|
||||
}
|
||||
};
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#if defined HAVE_GTK || defined HAVE_QT || defined WIN32 || defined _WIN32 || HAVE_CARBON || HAVE_COCOA
|
||||
#if defined HAVE_GTK || defined HAVE_QT || defined WIN32 || defined _WIN32 || defined HAVE_CARBON || defined HAVE_COCOA
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
#endif
|
||||
|
||||
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||
|
||||
@@ -40,7 +44,7 @@
|
||||
/*defined(HAVE_OPENNI) || too specialized */ \
|
||||
defined(HAVE_FFMPEG) || \
|
||||
defined(WIN32) /* assume that we have ffmpeg */
|
||||
|
||||
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
|
||||
@@ -61,19 +65,19 @@ namespace cvtest
|
||||
{
|
||||
|
||||
string fourccToString(int fourcc);
|
||||
|
||||
|
||||
struct VideoFormat
|
||||
{
|
||||
VideoFormat() { fourcc = -1; }
|
||||
VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {}
|
||||
bool empty() const { return ext.empty(); }
|
||||
|
||||
|
||||
string ext;
|
||||
int fourcc;
|
||||
};
|
||||
|
||||
|
||||
extern const VideoFormat g_specific_fmt_list[];
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -53,7 +53,7 @@ string fourccToString(int fourcc)
|
||||
{
|
||||
return format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
|
||||
}
|
||||
|
||||
|
||||
const VideoFormat g_specific_fmt_list[] =
|
||||
{
|
||||
VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')),
|
||||
@@ -63,11 +63,11 @@ const VideoFormat g_specific_fmt_list[] =
|
||||
VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')),
|
||||
VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')),
|
||||
VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')),
|
||||
|
||||
|
||||
VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')),
|
||||
VideoFormat()
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
class CV_HighGuiTest : public cvtest::BaseTest
|
||||
@@ -246,7 +246,7 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt
|
||||
|
||||
if (!img)
|
||||
break;
|
||||
|
||||
|
||||
frames.push_back(Mat(img).clone());
|
||||
|
||||
if (writer == 0)
|
||||
@@ -393,7 +393,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
|
||||
{
|
||||
string ext = fmt.ext;
|
||||
int fourcc = fmt.fourcc;
|
||||
|
||||
|
||||
string fourcc_str = cvtest::fourccToString(fourcc);
|
||||
const string video_file = "video_" + fourcc_str + "." + ext;
|
||||
|
||||
@@ -403,7 +403,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
|
||||
if (!writer.isOpened())
|
||||
{
|
||||
// call it repeatedly for easier debugging
|
||||
VideoWriter writer(video_file, fourcc, 25, frame_size, true);
|
||||
VideoWriter writer2(video_file, fourcc, 25, frame_size, true);
|
||||
ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
|
||||
ts->printf(ts->LOG, "Cannot create VideoWriter object with codec %s.\n", fourcc_str.c_str());
|
||||
ts->set_failed_test_info(ts->FAIL_MISMATCH);
|
||||
@@ -412,7 +412,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
|
||||
|
||||
const size_t IMAGE_COUNT = 30;
|
||||
vector<Mat> images;
|
||||
|
||||
|
||||
for( size_t i = 0; i < IMAGE_COUNT; ++i )
|
||||
{
|
||||
string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i);
|
||||
@@ -432,7 +432,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor
|
||||
if (img.at<Vec3b>(k, l) == Vec3b::all(0))
|
||||
img.at<Vec3b>(k, l) = Vec3b(0, 255, 0);
|
||||
else img.at<Vec3b>(k, l) = Vec3b(0, 0, 255);
|
||||
|
||||
|
||||
resize(img, img, frame_size, 0.0, 0.0, INTER_CUBIC);
|
||||
|
||||
images.push_back(img);
|
||||
|
||||
@@ -1,180 +1,180 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
class CV_PositioningTest : public cvtest::BaseTest
|
||||
{
|
||||
public:
|
||||
CV_PositioningTest()
|
||||
{
|
||||
framesize = Size(640, 480);
|
||||
}
|
||||
|
||||
Mat drawFrame(int i)
|
||||
{
|
||||
Mat mat = Mat::zeros(framesize, CV_8UC3);
|
||||
|
||||
mat = Scalar(fabs(cos(i*0.08)*255), fabs(sin(i*0.05)*255), i);
|
||||
putText(mat, format("%03d", i), Point(10, 350), 0, 10, Scalar(128, 255, 255), 15);
|
||||
return mat;
|
||||
}
|
||||
|
||||
string getFilename(const cvtest::VideoFormat& fmt)
|
||||
{
|
||||
return format("test_video_%s.%s", cvtest::fourccToString(fmt.fourcc).c_str(), fmt.ext.c_str());
|
||||
}
|
||||
|
||||
bool CreateTestVideo(const cvtest::VideoFormat& fmt, int framecount)
|
||||
{
|
||||
string filename = getFilename(fmt);
|
||||
|
||||
VideoWriter writer(filename, fmt.fourcc, 25, framesize, true);
|
||||
if( !writer.isOpened() )
|
||||
return false;
|
||||
|
||||
for (int i = 0; i < framecount; ++i)
|
||||
{
|
||||
Mat img = drawFrame(i);
|
||||
writer << img;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void run(int)
|
||||
{
|
||||
int n_frames = 100;
|
||||
|
||||
for( int testcase = 0; ; testcase++ )
|
||||
{
|
||||
const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[testcase];
|
||||
if( fmt.empty() )
|
||||
break;
|
||||
string filename = getFilename(fmt);
|
||||
ts->printf(ts->LOG, "\nFile: %s\n", filename.c_str());
|
||||
|
||||
if( !CreateTestVideo(fmt, n_frames) )
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot create video file");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
VideoCapture cap(filename);
|
||||
|
||||
if (!cap.isOpened())
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot read video file.");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
|
||||
return;
|
||||
}
|
||||
|
||||
int N0 = cap.get(CV_CAP_PROP_FRAME_COUNT);
|
||||
cap.set(CV_CAP_PROP_POS_FRAMES, 0);
|
||||
int N = cap.get(CV_CAP_PROP_FRAME_COUNT);
|
||||
|
||||
if (N != n_frames || N != N0)
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: returned frame count (N0=%d, N=%d) is different from the reference number %d\n", N0, N, n_frames);
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int k = 0; k < N; ++k)
|
||||
{
|
||||
int idx = theRNG().uniform(0, N);
|
||||
|
||||
if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) )
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES);
|
||||
|
||||
Mat img; cap >> img;
|
||||
Mat img0 = drawFrame(idx);
|
||||
|
||||
if( idx != idx1 )
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: the current position (%d) after seek is different from specified (%d)\n",
|
||||
idx1, idx);
|
||||
ts->printf(ts->LOG, "Saving both frames ...\n");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
imwrite("opencv_test_highgui_postest_actual.png", img);
|
||||
imwrite("opencv_test_highgui_postest_expected.png", img0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (img.empty())
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot read a frame at position %d.\n", idx);
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
double err = PSNR(img, img0);
|
||||
|
||||
if( err < 20 )
|
||||
{
|
||||
ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);
|
||||
ts->printf(ts->LOG, "Saving both frames ...\n");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
imwrite("opencv_test_highgui_postest_actual.png", img);
|
||||
imwrite("opencv_test_highgui_postest_expected.png", img0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Size framesize;
|
||||
};
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
|
||||
TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
|
||||
#endif
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
class CV_PositioningTest : public cvtest::BaseTest
|
||||
{
|
||||
public:
|
||||
CV_PositioningTest()
|
||||
{
|
||||
framesize = Size(640, 480);
|
||||
}
|
||||
|
||||
Mat drawFrame(int i)
|
||||
{
|
||||
Mat mat = Mat::zeros(framesize, CV_8UC3);
|
||||
|
||||
mat = Scalar(fabs(cos(i*0.08)*255), fabs(sin(i*0.05)*255), i);
|
||||
putText(mat, format("%03d", i), Point(10, 350), 0, 10, Scalar(128, 255, 255), 15);
|
||||
return mat;
|
||||
}
|
||||
|
||||
string getFilename(const cvtest::VideoFormat& fmt)
|
||||
{
|
||||
return format("test_video_%s.%s", cvtest::fourccToString(fmt.fourcc).c_str(), fmt.ext.c_str());
|
||||
}
|
||||
|
||||
bool CreateTestVideo(const cvtest::VideoFormat& fmt, int framecount)
|
||||
{
|
||||
string filename = getFilename(fmt);
|
||||
|
||||
VideoWriter writer(filename, fmt.fourcc, 25, framesize, true);
|
||||
if( !writer.isOpened() )
|
||||
return false;
|
||||
|
||||
for (int i = 0; i < framecount; ++i)
|
||||
{
|
||||
Mat img = drawFrame(i);
|
||||
writer << img;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void run(int)
|
||||
{
|
||||
int n_frames = 100;
|
||||
|
||||
for( int testcase = 0; ; testcase++ )
|
||||
{
|
||||
const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[testcase];
|
||||
if( fmt.empty() )
|
||||
break;
|
||||
string filename = getFilename(fmt);
|
||||
ts->printf(ts->LOG, "\nFile: %s\n", filename.c_str());
|
||||
|
||||
if( !CreateTestVideo(fmt, n_frames) )
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot create video file");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
VideoCapture cap(filename);
|
||||
|
||||
if (!cap.isOpened())
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot read video file.");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
|
||||
return;
|
||||
}
|
||||
|
||||
int N0 = (int)cap.get(CV_CAP_PROP_FRAME_COUNT);
|
||||
cap.set(CV_CAP_PROP_POS_FRAMES, 0);
|
||||
int N = (int)cap.get(CV_CAP_PROP_FRAME_COUNT);
|
||||
|
||||
if (N != n_frames || N != N0)
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: returned frame count (N0=%d, N=%d) is different from the reference number %d\n", N0, N, n_frames);
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int k = 0; k < N; ++k)
|
||||
{
|
||||
int idx = theRNG().uniform(0, N);
|
||||
|
||||
if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) )
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES);
|
||||
|
||||
Mat img; cap >> img;
|
||||
Mat img0 = drawFrame(idx);
|
||||
|
||||
if( idx != idx1 )
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: the current position (%d) after seek is different from specified (%d)\n",
|
||||
idx1, idx);
|
||||
ts->printf(ts->LOG, "Saving both frames ...\n");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
imwrite("opencv_test_highgui_postest_actual.png", img);
|
||||
imwrite("opencv_test_highgui_postest_expected.png", img0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (img.empty())
|
||||
{
|
||||
ts->printf(ts->LOG, "\nError: cannot read a frame at position %d.\n", idx);
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
return;
|
||||
}
|
||||
|
||||
double err = PSNR(img, img0);
|
||||
|
||||
if( err < 20 )
|
||||
{
|
||||
ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);
|
||||
ts->printf(ts->LOG, "Saving both frames ...\n");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
imwrite("opencv_test_highgui_postest_actual.png", img);
|
||||
imwrite("opencv_test_highgui_postest_expected.png", img0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Size framesize;
|
||||
};
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
|
||||
TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user