Split highgui module to videoio and highgui
This commit is contained in:
@@ -2981,7 +2981,7 @@ The class provides the following features for all derived classes:
|
||||
|
||||
* so called "virtual constructor". That is, each Algorithm derivative is registered at program start and you can get the list of registered algorithms and create instance of a particular algorithm by its name (see ``Algorithm::create``). If you plan to add your own algorithms, it is good practice to add a unique prefix to your algorithms to distinguish them from other algorithms.
|
||||
|
||||
* setting/retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with ``cvSetCaptureProperty()``, ``cvGetCaptureProperty()``, ``VideoCapture::set()`` and ``VideoCapture::get()``. ``Algorithm`` provides similar method where instead of integer id's you specify the parameter names as text strings. See ``Algorithm::set`` and ``Algorithm::get`` for details.
|
||||
* setting/retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV videoio module, you are probably familar with ``cvSetCaptureProperty()``, ``cvGetCaptureProperty()``, ``VideoCapture::set()`` and ``VideoCapture::get()``. ``Algorithm`` provides similar method where instead of integer id's you specify the parameter names as text strings. See ``Algorithm::set`` and ``Algorithm::get`` for details.
|
||||
|
||||
* reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store all its parameters and then read them back. There is no need to re-implement it each time.
|
||||
|
||||
|
@@ -14,7 +14,8 @@ OpenCV has a modular structure, which means that the package includes several sh
|
||||
* **calib3d** - basic multiple-view geometry algorithms, single and stereo camera calibration, object pose estimation, stereo correspondence algorithms, and elements of 3D reconstruction.
|
||||
* **features2d** - salient feature detectors, descriptors, and descriptor matchers.
|
||||
* **objdetect** - detection of objects and instances of the predefined classes (for example, faces, eyes, mugs, people, cars, and so on).
|
||||
* **highgui** - an easy-to-use interface to video capturing, image and video codecs, as well as simple UI capabilities.
|
||||
* **highgui** - an easy-to-use interface to simple UI capabilities.
|
||||
* **videoio** - an easy-to-use interface to video capturing and video codecs.
|
||||
* **gpu** - GPU-accelerated algorithms from different OpenCV modules.
|
||||
* ... some other helper modules, such as FLANN and Google test wrappers, Python bindings, and others.
|
||||
|
||||
|
@@ -6,7 +6,7 @@ set(the_description "CUDA-accelerated Video Encoding/Decoding")
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef)
|
||||
|
||||
ocv_add_module(cudacodec opencv_highgui OPTIONAL opencv_cudev)
|
||||
ocv_add_module(cudacodec OPTIONAL opencv_cudev)
|
||||
|
||||
ocv_module_include_directories()
|
||||
ocv_glob_module_sources()
|
||||
|
@@ -18,13 +18,9 @@ endif()
|
||||
|
||||
set(highgui_hdrs
|
||||
src/precomp.hpp
|
||||
src/cap_ffmpeg_impl.hpp
|
||||
)
|
||||
|
||||
set(highgui_srcs
|
||||
src/cap.cpp
|
||||
src/cap_images.cpp
|
||||
src/cap_ffmpeg.cpp
|
||||
src/window.cpp
|
||||
)
|
||||
|
||||
@@ -76,128 +72,6 @@ elseif(HAVE_COCOA)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
|
||||
endif()
|
||||
|
||||
if(WIN32 AND NOT ARM)
|
||||
list(APPEND highgui_srcs src/cap_cmu.cpp)
|
||||
endif()
|
||||
|
||||
if (WIN32 AND HAVE_DSHOW)
|
||||
list(APPEND highgui_srcs src/cap_dshow.cpp)
|
||||
endif()
|
||||
|
||||
if (WIN32 AND HAVE_MSMF)
|
||||
list(APPEND highgui_srcs src/cap_msmf.cpp)
|
||||
endif()
|
||||
|
||||
if (WIN32 AND HAVE_VFW)
|
||||
list(APPEND highgui_srcs src/cap_vfw.cpp)
|
||||
endif()
|
||||
|
||||
if(HAVE_XINE)
|
||||
list(APPEND highgui_srcs src/cap_xine.cpp)
|
||||
endif(HAVE_XINE)
|
||||
|
||||
if(HAVE_DC1394_2)
|
||||
list(APPEND highgui_srcs src/cap_dc1394_v2.cpp)
|
||||
endif(HAVE_DC1394_2)
|
||||
|
||||
if(HAVE_DC1394)
|
||||
list(APPEND highgui_srcs src/cap_dc1394.cpp)
|
||||
endif(HAVE_DC1394)
|
||||
|
||||
if(HAVE_GSTREAMER)
|
||||
list(APPEND highgui_srcs src/cap_gstreamer.cpp)
|
||||
endif(HAVE_GSTREAMER)
|
||||
|
||||
if(HAVE_UNICAP)
|
||||
list(APPEND highgui_srcs src/cap_unicap.cpp)
|
||||
endif(HAVE_UNICAP)
|
||||
|
||||
if(HAVE_LIBV4L)
|
||||
list(APPEND highgui_srcs src/cap_libv4l.cpp)
|
||||
elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO)
|
||||
list(APPEND highgui_srcs src/cap_v4l.cpp)
|
||||
endif()
|
||||
|
||||
if(HAVE_OPENNI)
|
||||
list(APPEND highgui_srcs src/cap_openni.cpp)
|
||||
ocv_include_directories(${OPENNI_INCLUDE_DIR})
|
||||
list(APPEND HIGHGUI_LIBRARIES ${OPENNI_LIBRARY})
|
||||
endif(HAVE_OPENNI)
|
||||
|
||||
if(HAVE_opencv_androidcamera)
|
||||
list(APPEND highgui_srcs src/cap_android.cpp)
|
||||
add_definitions(-DHAVE_ANDROID_NATIVE_CAMERA)#TODO: remove this line
|
||||
endif(HAVE_opencv_androidcamera)
|
||||
|
||||
if(HAVE_XIMEA)
|
||||
list(APPEND highgui_srcs src/cap_ximea.cpp)
|
||||
ocv_include_directories(${XIMEA_PATH})
|
||||
if(XIMEA_LIBRARY_DIR)
|
||||
link_directories("${XIMEA_LIBRARY_DIR}")
|
||||
endif()
|
||||
if(X86_64)
|
||||
list(APPEND HIGHGUI_LIBRARIES m3apiX64)
|
||||
else()
|
||||
list(APPEND HIGHGUI_LIBRARIES m3api)
|
||||
endif()
|
||||
endif(HAVE_XIMEA)
|
||||
|
||||
if(HAVE_FFMPEG)
|
||||
if(UNIX AND BZIP2_LIBRARIES)
|
||||
list(APPEND HIGHGUI_LIBRARIES ${BZIP2_LIBRARIES})
|
||||
endif()
|
||||
if(APPLE)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework VideoDecodeAcceleration" bz2)
|
||||
endif()
|
||||
endif(HAVE_FFMPEG)
|
||||
|
||||
if(HAVE_PVAPI)
|
||||
add_definitions(-DHAVE_PVAPI)
|
||||
add_definitions(${PVAPI_DEFINITIONS})
|
||||
ocv_include_directories(${PVAPI_INCLUDE_PATH})
|
||||
set(highgui_srcs src/cap_pvapi.cpp ${highgui_srcs})
|
||||
list(APPEND HIGHGUI_LIBRARIES ${PVAPI_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(HAVE_GIGE_API)
|
||||
add_definitions(-DHAVE_GIGE_API)
|
||||
ocv_include_directories(${GIGEAPI_INCLUDE_PATH})
|
||||
set(highgui_srcs src/cap_giganetix.cpp ${highgui_srcs})
|
||||
list(APPEND HIGHGUI_LIBRARIES ${GIGEAPI_LIBRARIES})
|
||||
list(APPEND highgui_srcs src/cap_giganetix.cpp)
|
||||
endif(HAVE_GIGE_API)
|
||||
|
||||
if(HAVE_AVFOUNDATION)
|
||||
list(APPEND highgui_srcs src/cap_avfoundation.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework AVFoundation" "-framework QuartzCore")
|
||||
endif()
|
||||
|
||||
if(HAVE_QUICKTIME)
|
||||
list(APPEND highgui_srcs src/cap_qt.cpp)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
|
||||
elseif(HAVE_QTKIT)
|
||||
list(APPEND highgui_srcs src/cap_qtkit.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
|
||||
endif()
|
||||
|
||||
if(HAVE_INTELPERC)
|
||||
list(APPEND highgui_srcs src/cap_intelperc.cpp)
|
||||
ocv_include_directories(${INTELPERC_INCLUDE_DIR})
|
||||
list(APPEND HIGHGUI_LIBRARIES ${INTELPERC_LIBRARIES})
|
||||
endif(HAVE_INTELPERC)
|
||||
|
||||
if(IOS)
|
||||
add_definitions(-DHAVE_IOS=1)
|
||||
list(APPEND highgui_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary")
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
|
||||
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
|
||||
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
#these variables are set by CHECK_MODULE macro
|
||||
foreach(P ${HIGHGUI_INCLUDE_DIRS})
|
||||
@@ -248,33 +122,5 @@ set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "")
|
||||
ocv_add_precompiled_headers(${the_module})
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations)
|
||||
|
||||
if(WIN32 AND WITH_FFMPEG)
|
||||
#copy ffmpeg dll to the output folder
|
||||
if(MSVC64 OR MINGW64)
|
||||
set(FFMPEG_SUFFIX _64)
|
||||
endif()
|
||||
|
||||
set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll")
|
||||
set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
|
||||
set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}")
|
||||
|
||||
if(MSVC_IDE)
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}"
|
||||
COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
elseif(MSVC AND (CMAKE_GENERATOR MATCHES "Visual"))
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}"
|
||||
COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
else()
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}"
|
||||
COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
endif()
|
||||
|
||||
install(FILES "${ffmpeg_path}" DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs RENAME "${ffmpeg_bare_name_ver}")
|
||||
endif()
|
||||
|
||||
ocv_add_accuracy_tests()
|
||||
ocv_add_perf_tests()
|
||||
|
@@ -9,11 +9,9 @@ It provides easy interface to:
|
||||
|
||||
* Create and manipulate windows that can display images and "remember" their content (no need to handle repaint events from OS).
|
||||
* Add trackbars to the windows, handle simple mouse events as well as keyboard commands.
|
||||
* Read video from camera or file and write video to a file.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
user_interface
|
||||
reading_and_writing_video
|
||||
qt_new_functions
|
||||
|
@@ -202,344 +202,4 @@ CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change,
|
||||
|
||||
} // cv
|
||||
|
||||
////////////////////////////////// video io /////////////////////////////////
|
||||
|
||||
typedef struct CvCapture CvCapture;
|
||||
typedef struct CvVideoWriter CvVideoWriter;
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Camera API
|
||||
enum { CAP_ANY = 0, // autodetect
|
||||
CAP_VFW = 200, // platform native
|
||||
CAP_V4L = 200,
|
||||
CAP_V4L2 = CAP_V4L,
|
||||
CAP_FIREWARE = 300, // IEEE 1394 drivers
|
||||
CAP_FIREWIRE = CAP_FIREWARE,
|
||||
CAP_IEEE1394 = CAP_FIREWARE,
|
||||
CAP_DC1394 = CAP_FIREWARE,
|
||||
CAP_CMU1394 = CAP_FIREWARE,
|
||||
CAP_QT = 500, // QuickTime
|
||||
CAP_UNICAP = 600, // Unicap drivers
|
||||
CAP_DSHOW = 700, // DirectShow (via videoInput)
|
||||
CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK
|
||||
CAP_OPENNI = 900, // OpenNI (for Kinect)
|
||||
CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion)
|
||||
CAP_ANDROID = 1000, // Android
|
||||
CAP_XIAPI = 1100, // XIMEA Camera API
|
||||
CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
|
||||
CAP_MSMF = 1400, // Microsoft Media Foundation (via videoInput)
|
||||
CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
|
||||
};
|
||||
|
||||
// generic properties (based on DC1394 properties)
|
||||
enum { CAP_PROP_POS_MSEC =0,
|
||||
CAP_PROP_POS_FRAMES =1,
|
||||
CAP_PROP_POS_AVI_RATIO =2,
|
||||
CAP_PROP_FRAME_WIDTH =3,
|
||||
CAP_PROP_FRAME_HEIGHT =4,
|
||||
CAP_PROP_FPS =5,
|
||||
CAP_PROP_FOURCC =6,
|
||||
CAP_PROP_FRAME_COUNT =7,
|
||||
CAP_PROP_FORMAT =8,
|
||||
CAP_PROP_MODE =9,
|
||||
CAP_PROP_BRIGHTNESS =10,
|
||||
CAP_PROP_CONTRAST =11,
|
||||
CAP_PROP_SATURATION =12,
|
||||
CAP_PROP_HUE =13,
|
||||
CAP_PROP_GAIN =14,
|
||||
CAP_PROP_EXPOSURE =15,
|
||||
CAP_PROP_CONVERT_RGB =16,
|
||||
CAP_PROP_WHITE_BALANCE_BLUE_U =17,
|
||||
CAP_PROP_RECTIFICATION =18,
|
||||
CAP_PROP_MONOCROME =19,
|
||||
CAP_PROP_SHARPNESS =20,
|
||||
CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
|
||||
CAP_PROP_GAMMA =22,
|
||||
CAP_PROP_TEMPERATURE =23,
|
||||
CAP_PROP_TRIGGER =24,
|
||||
CAP_PROP_TRIGGER_DELAY =25,
|
||||
CAP_PROP_WHITE_BALANCE_RED_V =26,
|
||||
CAP_PROP_ZOOM =27,
|
||||
CAP_PROP_FOCUS =28,
|
||||
CAP_PROP_GUID =29,
|
||||
CAP_PROP_ISO_SPEED =30,
|
||||
CAP_PROP_BACKLIGHT =32,
|
||||
CAP_PROP_PAN =33,
|
||||
CAP_PROP_TILT =34,
|
||||
CAP_PROP_ROLL =35,
|
||||
CAP_PROP_IRIS =36,
|
||||
CAP_PROP_SETTINGS =37
|
||||
};
|
||||
|
||||
|
||||
// DC1394 only
|
||||
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
|
||||
// every feature can have only one mode turned on at a time
|
||||
enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
|
||||
CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
|
||||
CAP_PROP_DC1394_MODE_AUTO = -2,
|
||||
CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
|
||||
CAP_PROP_DC1394_MAX = 31
|
||||
};
|
||||
|
||||
|
||||
// OpenNI map generators
|
||||
enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
|
||||
CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
|
||||
CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR
|
||||
};
|
||||
|
||||
// Properties of cameras available through OpenNI interfaces
|
||||
enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100,
|
||||
CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
|
||||
CAP_PROP_OPENNI_BASELINE = 102, // in mm
|
||||
CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
|
||||
CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map
|
||||
// by changing depth generator's view point (if the flag is "on") or
|
||||
// sets this view point to its normal one (if the flag is "off").
|
||||
CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION,
|
||||
CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
|
||||
CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
|
||||
CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
|
||||
CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
|
||||
CAP_PROP_OPENNI_GENERATOR_PRESENT = 109
|
||||
};
|
||||
|
||||
// OpenNI shortcats
|
||||
enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,
|
||||
CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION
|
||||
};
|
||||
|
||||
// OpenNI data given from depth generator
|
||||
enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
|
||||
CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
|
||||
CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
|
||||
CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
|
||||
CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
|
||||
|
||||
// Data given from RGB image generator
|
||||
CAP_OPENNI_BGR_IMAGE = 5,
|
||||
CAP_OPENNI_GRAY_IMAGE = 6
|
||||
};
|
||||
|
||||
// Supported output modes of OpenNI image generator
|
||||
enum { CAP_OPENNI_VGA_30HZ = 0,
|
||||
CAP_OPENNI_SXGA_15HZ = 1,
|
||||
CAP_OPENNI_SXGA_30HZ = 2,
|
||||
CAP_OPENNI_QVGA_30HZ = 3,
|
||||
CAP_OPENNI_QVGA_60HZ = 4
|
||||
};
|
||||
|
||||
|
||||
// GStreamer
|
||||
enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1
|
||||
};
|
||||
|
||||
|
||||
// PVAPI
|
||||
enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
|
||||
CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301 // FrameStartTriggerMode: Determines how a frame is initiated
|
||||
};
|
||||
|
||||
// PVAPI: FrameStartTriggerMode
|
||||
enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, // Freerun
|
||||
CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, // SyncIn1
|
||||
CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, // SyncIn2
|
||||
CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, // FixedRate
|
||||
CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 // Software
|
||||
};
|
||||
|
||||
// Properties of cameras available through XIMEA SDK interface
|
||||
enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
|
||||
CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
|
||||
CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
|
||||
CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
|
||||
CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
|
||||
CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
|
||||
CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
|
||||
CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
|
||||
CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
|
||||
CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
|
||||
CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
|
||||
CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
|
||||
CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
|
||||
CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
|
||||
CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
|
||||
CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
|
||||
CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
|
||||
CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
|
||||
CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
|
||||
CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
|
||||
CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds
|
||||
};
|
||||
|
||||
|
||||
// Properties for Android cameras
|
||||
enum { CAP_PROP_ANDROID_AUTOGRAB = 1024,
|
||||
CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed
|
||||
CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed
|
||||
CAP_PROP_ANDROID_FLASH_MODE = 8001,
|
||||
CAP_PROP_ANDROID_FOCUS_MODE = 8002,
|
||||
CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
|
||||
CAP_PROP_ANDROID_ANTIBANDING = 8004,
|
||||
CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
|
||||
CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
|
||||
CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
|
||||
CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008
|
||||
};
|
||||
|
||||
|
||||
// Android camera output formats
|
||||
enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
|
||||
CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR,
|
||||
CAP_ANDROID_GREY_FRAME = 1, //Y
|
||||
CAP_ANDROID_COLOR_FRAME_RGB = 2,
|
||||
CAP_ANDROID_COLOR_FRAME_BGRA = 3,
|
||||
CAP_ANDROID_COLOR_FRAME_RGBA = 4
|
||||
};
|
||||
|
||||
|
||||
// Android camera flash modes
|
||||
enum { CAP_ANDROID_FLASH_MODE_AUTO = 0,
|
||||
CAP_ANDROID_FLASH_MODE_OFF = 1,
|
||||
CAP_ANDROID_FLASH_MODE_ON = 2,
|
||||
CAP_ANDROID_FLASH_MODE_RED_EYE = 3,
|
||||
CAP_ANDROID_FLASH_MODE_TORCH = 4
|
||||
};
|
||||
|
||||
|
||||
// Android camera focus modes
|
||||
enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0,
|
||||
CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1,
|
||||
CAP_ANDROID_FOCUS_MODE_EDOF = 2,
|
||||
CAP_ANDROID_FOCUS_MODE_FIXED = 3,
|
||||
CAP_ANDROID_FOCUS_MODE_INFINITY = 4,
|
||||
CAP_ANDROID_FOCUS_MODE_MACRO = 5
|
||||
};
|
||||
|
||||
|
||||
// Android camera white balance modes
|
||||
enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
|
||||
CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1,
|
||||
CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2,
|
||||
CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3,
|
||||
CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4,
|
||||
CAP_ANDROID_WHITE_BALANCE_SHADE = 5,
|
||||
CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6,
|
||||
CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7
|
||||
};
|
||||
|
||||
|
||||
// Android camera antibanding modes
|
||||
enum { CAP_ANDROID_ANTIBANDING_50HZ = 0,
|
||||
CAP_ANDROID_ANTIBANDING_60HZ = 1,
|
||||
CAP_ANDROID_ANTIBANDING_AUTO = 2,
|
||||
CAP_ANDROID_ANTIBANDING_OFF = 3
|
||||
};
|
||||
|
||||
|
||||
// Properties of cameras available through AVFOUNDATION interface
|
||||
enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001,
|
||||
CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
|
||||
CAP_PROP_IOS_DEVICE_FLASH = 9003,
|
||||
CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
|
||||
CAP_PROP_IOS_DEVICE_TORCH = 9005
|
||||
};
|
||||
|
||||
|
||||
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
|
||||
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
|
||||
enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
|
||||
CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
|
||||
CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
|
||||
CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
|
||||
CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
|
||||
CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006
|
||||
};
|
||||
|
||||
enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
|
||||
CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
|
||||
CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
|
||||
CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
|
||||
CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
|
||||
CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
|
||||
CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007
|
||||
};
|
||||
|
||||
// Intel PerC streams
|
||||
enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
|
||||
CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
|
||||
CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR
|
||||
};
|
||||
|
||||
enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
|
||||
CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
|
||||
CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
|
||||
CAP_INTELPERC_IMAGE = 3
|
||||
};
|
||||
|
||||
|
||||
class IVideoCapture;
|
||||
class CV_EXPORTS_W VideoCapture
|
||||
{
|
||||
public:
|
||||
CV_WRAP VideoCapture();
|
||||
CV_WRAP VideoCapture(const String& filename);
|
||||
CV_WRAP VideoCapture(int device);
|
||||
|
||||
virtual ~VideoCapture();
|
||||
CV_WRAP virtual bool open(const String& filename);
|
||||
CV_WRAP virtual bool open(int device);
|
||||
CV_WRAP virtual bool isOpened() const;
|
||||
CV_WRAP virtual void release();
|
||||
|
||||
CV_WRAP virtual bool grab();
|
||||
CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0);
|
||||
virtual VideoCapture& operator >> (CV_OUT Mat& image);
|
||||
virtual VideoCapture& operator >> (CV_OUT UMat& image);
|
||||
CV_WRAP virtual bool read(OutputArray image);
|
||||
|
||||
CV_WRAP virtual bool set(int propId, double value);
|
||||
CV_WRAP virtual double get(int propId);
|
||||
|
||||
protected:
|
||||
Ptr<CvCapture> cap;
|
||||
Ptr<IVideoCapture> icap;
|
||||
private:
|
||||
static Ptr<IVideoCapture> createCameraCapture(int index);
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W VideoWriter
|
||||
{
|
||||
public:
|
||||
CV_WRAP VideoWriter();
|
||||
CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,
|
||||
Size frameSize, bool isColor = true);
|
||||
|
||||
virtual ~VideoWriter();
|
||||
CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,
|
||||
Size frameSize, bool isColor = true);
|
||||
CV_WRAP virtual bool isOpened() const;
|
||||
CV_WRAP virtual void release();
|
||||
virtual VideoWriter& operator << (const Mat& image);
|
||||
CV_WRAP virtual void write(const Mat& image);
|
||||
|
||||
CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);
|
||||
|
||||
protected:
|
||||
Ptr<CvVideoWriter> writer;
|
||||
};
|
||||
|
||||
template<> CV_EXPORTS void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const;
|
||||
template<> CV_EXPORTS void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const;
|
||||
|
||||
} // cv
|
||||
|
||||
#endif
|
||||
|
@@ -206,364 +206,10 @@ CVAPI(void) cvSetOpenGlContext(const char* window_name);
|
||||
CVAPI(void) cvUpdateWindow(const char* window_name);
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Working with Video Files and Cameras *
|
||||
\****************************************************************************************/
|
||||
|
||||
/* "black box" capture structure */
|
||||
typedef struct CvCapture CvCapture;
|
||||
|
||||
/* start capturing frames from video file */
|
||||
CVAPI(CvCapture*) cvCreateFileCapture( const char* filename );
|
||||
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANY =0, // autodetect
|
||||
|
||||
CV_CAP_MIL =100, // MIL proprietary drivers
|
||||
|
||||
CV_CAP_VFW =200, // platform native
|
||||
CV_CAP_V4L =200,
|
||||
CV_CAP_V4L2 =200,
|
||||
|
||||
CV_CAP_FIREWARE =300, // IEEE 1394 drivers
|
||||
CV_CAP_FIREWIRE =300,
|
||||
CV_CAP_IEEE1394 =300,
|
||||
CV_CAP_DC1394 =300,
|
||||
CV_CAP_CMU1394 =300,
|
||||
|
||||
CV_CAP_STEREO =400, // TYZX proprietary drivers
|
||||
CV_CAP_TYZX =400,
|
||||
CV_TYZX_LEFT =400,
|
||||
CV_TYZX_RIGHT =401,
|
||||
CV_TYZX_COLOR =402,
|
||||
CV_TYZX_Z =403,
|
||||
|
||||
CV_CAP_QT =500, // QuickTime
|
||||
|
||||
CV_CAP_UNICAP =600, // Unicap drivers
|
||||
|
||||
CV_CAP_DSHOW =700, // DirectShow (via videoInput)
|
||||
CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput)
|
||||
|
||||
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
|
||||
|
||||
CV_CAP_OPENNI =900, // OpenNI (for Kinect)
|
||||
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
|
||||
|
||||
CV_CAP_ANDROID =1000, // Android
|
||||
CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera
|
||||
CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera
|
||||
|
||||
CV_CAP_XIAPI =1100, // XIMEA Camera API
|
||||
|
||||
CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
|
||||
CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
|
||||
|
||||
CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
|
||||
};
|
||||
|
||||
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
|
||||
CVAPI(CvCapture*) cvCreateCameraCapture( int index );
|
||||
|
||||
/* grab a frame, return 1 on success, 0 on fail.
|
||||
this function is thought to be fast */
|
||||
CVAPI(int) cvGrabFrame( CvCapture* capture );
|
||||
|
||||
/* get the frame grabbed with cvGrabFrame(..)
|
||||
This function may apply some frame processing like
|
||||
frame decompression, flipping etc.
|
||||
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
|
||||
CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );
|
||||
|
||||
/* Just a combination of cvGrabFrame and cvRetrieveFrame
|
||||
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
|
||||
CVAPI(IplImage*) cvQueryFrame( CvCapture* capture );
|
||||
|
||||
/* stop capturing/reading and free resources */
|
||||
CVAPI(void) cvReleaseCapture( CvCapture** capture );
|
||||
|
||||
enum
|
||||
{
|
||||
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
|
||||
// every feature can have only one mode turned on at a time
|
||||
CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
|
||||
CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
|
||||
CV_CAP_PROP_DC1394_MODE_AUTO = -2,
|
||||
CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
|
||||
CV_CAP_PROP_POS_MSEC =0,
|
||||
CV_CAP_PROP_POS_FRAMES =1,
|
||||
CV_CAP_PROP_POS_AVI_RATIO =2,
|
||||
CV_CAP_PROP_FRAME_WIDTH =3,
|
||||
CV_CAP_PROP_FRAME_HEIGHT =4,
|
||||
CV_CAP_PROP_FPS =5,
|
||||
CV_CAP_PROP_FOURCC =6,
|
||||
CV_CAP_PROP_FRAME_COUNT =7,
|
||||
CV_CAP_PROP_FORMAT =8,
|
||||
CV_CAP_PROP_MODE =9,
|
||||
CV_CAP_PROP_BRIGHTNESS =10,
|
||||
CV_CAP_PROP_CONTRAST =11,
|
||||
CV_CAP_PROP_SATURATION =12,
|
||||
CV_CAP_PROP_HUE =13,
|
||||
CV_CAP_PROP_GAIN =14,
|
||||
CV_CAP_PROP_EXPOSURE =15,
|
||||
CV_CAP_PROP_CONVERT_RGB =16,
|
||||
CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,
|
||||
CV_CAP_PROP_RECTIFICATION =18,
|
||||
CV_CAP_PROP_MONOCROME =19,
|
||||
CV_CAP_PROP_SHARPNESS =20,
|
||||
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
|
||||
// user can adjust refernce level
|
||||
// using this feature
|
||||
CV_CAP_PROP_GAMMA =22,
|
||||
CV_CAP_PROP_TEMPERATURE =23,
|
||||
CV_CAP_PROP_TRIGGER =24,
|
||||
CV_CAP_PROP_TRIGGER_DELAY =25,
|
||||
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
|
||||
CV_CAP_PROP_ZOOM =27,
|
||||
CV_CAP_PROP_FOCUS =28,
|
||||
CV_CAP_PROP_GUID =29,
|
||||
CV_CAP_PROP_ISO_SPEED =30,
|
||||
CV_CAP_PROP_MAX_DC1394 =31,
|
||||
CV_CAP_PROP_BACKLIGHT =32,
|
||||
CV_CAP_PROP_PAN =33,
|
||||
CV_CAP_PROP_TILT =34,
|
||||
CV_CAP_PROP_ROLL =35,
|
||||
CV_CAP_PROP_IRIS =36,
|
||||
CV_CAP_PROP_SETTINGS =37,
|
||||
|
||||
CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
|
||||
CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed
|
||||
CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed
|
||||
|
||||
// OpenNI map generators
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
|
||||
CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
|
||||
CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR,
|
||||
|
||||
// Properties of cameras available through OpenNI interfaces
|
||||
CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100,
|
||||
CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
|
||||
CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm
|
||||
CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
|
||||
CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag
|
||||
CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map
|
||||
// by changing depth generator's view point (if the flag is "on") or
|
||||
// sets this view point to its normal one (if the flag is "off").
|
||||
CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
|
||||
CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
|
||||
CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
|
||||
CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
|
||||
|
||||
CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
|
||||
|
||||
CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
|
||||
CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
|
||||
|
||||
// Properties of cameras available through GStreamer interface
|
||||
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
|
||||
|
||||
// PVAPI
|
||||
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
|
||||
CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated
|
||||
|
||||
// Properties of cameras available through XIMEA SDK interface
|
||||
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
|
||||
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
|
||||
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
|
||||
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
|
||||
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
|
||||
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
|
||||
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
|
||||
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
|
||||
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
|
||||
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
|
||||
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
|
||||
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
|
||||
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
|
||||
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
|
||||
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
|
||||
CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
|
||||
CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
|
||||
CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
|
||||
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
|
||||
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
|
||||
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
|
||||
|
||||
// Properties for Android cameras
|
||||
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
|
||||
CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
|
||||
CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,
|
||||
CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
|
||||
CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
|
||||
CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
|
||||
|
||||
// Properties of cameras available through AVFOUNDATION interface
|
||||
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
|
||||
CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
|
||||
CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,
|
||||
CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
|
||||
CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,
|
||||
|
||||
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
|
||||
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
|
||||
CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
|
||||
CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
|
||||
CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
|
||||
CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
|
||||
CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
|
||||
CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,
|
||||
|
||||
CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
|
||||
CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007,
|
||||
|
||||
// Intel PerC streams
|
||||
CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
|
||||
CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
|
||||
CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
// Data given from depth generator.
|
||||
CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
|
||||
CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
|
||||
CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
|
||||
CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
|
||||
CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
|
||||
|
||||
// Data given from RGB image generator.
|
||||
CV_CAP_OPENNI_BGR_IMAGE = 5,
|
||||
CV_CAP_OPENNI_GRAY_IMAGE = 6
|
||||
};
|
||||
|
||||
// Supported output modes of OpenNI image generator
|
||||
enum
|
||||
{
|
||||
CV_CAP_OPENNI_VGA_30HZ = 0,
|
||||
CV_CAP_OPENNI_SXGA_15HZ = 1,
|
||||
CV_CAP_OPENNI_SXGA_30HZ = 2,
|
||||
CV_CAP_OPENNI_QVGA_30HZ = 3,
|
||||
CV_CAP_OPENNI_QVGA_60HZ = 4
|
||||
};
|
||||
|
||||
//supported by Android camera output formats
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
|
||||
CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR,
|
||||
CV_CAP_ANDROID_GREY_FRAME = 1, //Y
|
||||
CV_CAP_ANDROID_COLOR_FRAME_RGB = 2,
|
||||
CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3,
|
||||
CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4
|
||||
};
|
||||
|
||||
// supported Android camera flash modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_FLASH_MODE_AUTO = 0,
|
||||
CV_CAP_ANDROID_FLASH_MODE_OFF,
|
||||
CV_CAP_ANDROID_FLASH_MODE_ON,
|
||||
CV_CAP_ANDROID_FLASH_MODE_RED_EYE,
|
||||
CV_CAP_ANDROID_FLASH_MODE_TORCH
|
||||
};
|
||||
|
||||
// supported Android camera focus modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_EDOF,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_FIXED,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_INFINITY,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_MACRO
|
||||
};
|
||||
|
||||
// supported Android camera white balance modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_SHADE,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT
|
||||
};
|
||||
|
||||
// supported Android camera antibanding modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_ANTIBANDING_50HZ = 0,
|
||||
CV_CAP_ANDROID_ANTIBANDING_60HZ,
|
||||
CV_CAP_ANDROID_ANTIBANDING_AUTO,
|
||||
CV_CAP_ANDROID_ANTIBANDING_OFF
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
|
||||
CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
|
||||
CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
|
||||
CV_CAP_INTELPERC_IMAGE = 3
|
||||
};
|
||||
|
||||
/* retrieve or set capture properties */
|
||||
CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
|
||||
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
|
||||
|
||||
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
|
||||
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
|
||||
|
||||
/* "black box" video file writer structure */
|
||||
typedef struct CvVideoWriter CvVideoWriter;
|
||||
|
||||
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
|
||||
|
||||
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
|
||||
{
|
||||
return CV_FOURCC_MACRO(c1, c2, c3, c4);
|
||||
}
|
||||
|
||||
#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */
|
||||
#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */
|
||||
|
||||
/* initialize video file writer */
|
||||
CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
|
||||
double fps, CvSize frame_size,
|
||||
int is_color CV_DEFAULT(1));
|
||||
|
||||
/* write frame to video file */
|
||||
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
|
||||
|
||||
/* close video file writer */
|
||||
CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
|
||||
|
||||
/****************************************************************************************\
|
||||
* Obsolete functions/synonyms *
|
||||
\****************************************************************************************/
|
||||
|
||||
#define cvCaptureFromFile cvCreateFileCapture
|
||||
#define cvCaptureFromCAM cvCreateCameraCapture
|
||||
#define cvCaptureFromAVI cvCaptureFromFile
|
||||
#define cvCreateAVIWriter cvCreateVideoWriter
|
||||
#define cvWriteToAVI cvWriteFrame
|
||||
#define cvAddSearchPath(path)
|
||||
#define cvvInitSystem cvInitSystem
|
||||
#define cvvNamedWindow cvNamedWindow
|
||||
@@ -571,12 +217,9 @@ CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
|
||||
#define cvvResizeWindow cvResizeWindow
|
||||
#define cvvDestroyWindow cvDestroyWindow
|
||||
#define cvvCreateTrackbar cvCreateTrackbar
|
||||
#define cvvLoadImage(name) cvLoadImage((name),1)
|
||||
#define cvvSaveImage cvSaveImage
|
||||
#define cvvAddSearchPath cvAddSearchPath
|
||||
#define cvvWaitKey(name) cvWaitKey(0)
|
||||
#define cvvWaitKeyEx(name,delay) cvWaitKey(delay)
|
||||
#define cvvConvertImage cvConvertImage
|
||||
#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
|
||||
#define set_preprocess_func cvSetPreprocessFuncWin32
|
||||
#define set_postprocess_func cvSetPostprocessFuncWin32
|
||||
|
@@ -95,90 +95,6 @@
|
||||
#define CV_WINDOW_MAGIC_VAL 0x00420042
|
||||
#define CV_TRACKBAR_MAGIC_VAL 0x00420043
|
||||
|
||||
/***************************** CvCapture structure ******************************/
|
||||
|
||||
struct CvCapture
|
||||
{
|
||||
virtual ~CvCapture() {}
|
||||
virtual double getProperty(int) { return 0; }
|
||||
virtual bool setProperty(int, double) { return 0; }
|
||||
virtual bool grabFrame() { return true; }
|
||||
virtual IplImage* retrieveFrame(int) { return 0; }
|
||||
virtual int getCaptureDomain() { return CV_CAP_ANY; } // Return the type of the capture object: CV_CAP_VFW, etc...
|
||||
};
|
||||
|
||||
/*************************** CvVideoWriter structure ****************************/
|
||||
|
||||
struct CvVideoWriter
|
||||
{
|
||||
virtual ~CvVideoWriter() {}
|
||||
virtual bool writeFrame(const IplImage*) { return false; }
|
||||
};
|
||||
|
||||
CvCapture * cvCreateCameraCapture_V4L( int index );
|
||||
CvCapture * cvCreateCameraCapture_DC1394( int index );
|
||||
CvCapture * cvCreateCameraCapture_DC1394_2( int index );
|
||||
CvCapture* cvCreateCameraCapture_MIL( int index );
|
||||
CvCapture* cvCreateCameraCapture_Giganetix( int index );
|
||||
CvCapture * cvCreateCameraCapture_CMU( int index );
|
||||
CV_IMPL CvCapture * cvCreateCameraCapture_TYZX( int index );
|
||||
CvCapture* cvCreateFileCapture_Win32( const char* filename );
|
||||
CvCapture* cvCreateCameraCapture_VFW( int index );
|
||||
CvCapture* cvCreateFileCapture_VFW( const char* filename );
|
||||
CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
CvCapture* cvCreateCameraCapture_DShow( int index );
|
||||
CvCapture* cvCreateCameraCapture_MSMF( int index );
|
||||
CvCapture* cvCreateFileCapture_MSMF (const char* filename);
|
||||
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
CvCapture* cvCreateCameraCapture_OpenNI( int index );
|
||||
CvCapture* cvCreateFileCapture_OpenNI( const char* filename );
|
||||
CvCapture* cvCreateCameraCapture_Android( int index );
|
||||
CvCapture* cvCreateCameraCapture_XIMEA( int index );
|
||||
CvCapture* cvCreateCameraCapture_AVFoundation(int index);
|
||||
|
||||
CVAPI(int) cvHaveImageReader(const char* filename);
|
||||
CVAPI(int) cvHaveImageWriter(const char* filename);
|
||||
|
||||
CvCapture* cvCreateFileCapture_Images(const char* filename);
|
||||
CvVideoWriter* cvCreateVideoWriter_Images(const char* filename);
|
||||
|
||||
CvCapture* cvCreateFileCapture_XINE (const char* filename);
|
||||
|
||||
|
||||
|
||||
|
||||
#define CV_CAP_GSTREAMER_1394 0
|
||||
#define CV_CAP_GSTREAMER_V4L 1
|
||||
#define CV_CAP_GSTREAMER_V4L2 2
|
||||
#define CV_CAP_GSTREAMER_FILE 3
|
||||
|
||||
CvCapture* cvCreateCapture_GStreamer(int type, const char *filename);
|
||||
CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename);
|
||||
|
||||
|
||||
CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
CvCapture * cvCreateFileCapture_QT (const char * filename);
|
||||
CvCapture * cvCreateCameraCapture_QT (const int index);
|
||||
|
||||
CvVideoWriter* cvCreateVideoWriter_QT ( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
CvCapture* cvCreateFileCapture_AVFoundation (const char * filename);
|
||||
CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
|
||||
CvCapture * cvCreateCameraCapture_Unicap (const int index);
|
||||
CvCapture * cvCreateCameraCapture_PvAPI (const int index);
|
||||
CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
//Yannick Verdie 2010
|
||||
void cvSetModeWindow_W32(const char* name, double prop_value);
|
||||
void cvSetModeWindow_GTK(const char* name, double prop_value);
|
||||
@@ -199,20 +115,6 @@ double cvGetRatioWindow_GTK(const char* name);
|
||||
double cvGetOpenGlProp_W32(const char* name);
|
||||
double cvGetOpenGlProp_GTK(const char* name);
|
||||
|
||||
namespace cv
|
||||
{
|
||||
class IVideoCapture
|
||||
{
|
||||
public:
|
||||
virtual ~IVideoCapture() {}
|
||||
virtual double getProperty(int) { return 0; }
|
||||
virtual bool setProperty(int, double) { return 0; }
|
||||
virtual bool grabFrame() = 0;
|
||||
virtual bool retrieveFrame(int, cv::OutputArray) = 0;
|
||||
virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc...
|
||||
};
|
||||
};
|
||||
|
||||
//for QT
|
||||
#if defined (HAVE_QT)
|
||||
double cvGetModeWindow_QT(const char* name);
|
||||
|
@@ -11,81 +11,11 @@
|
||||
|
||||
#include <iostream>
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
//#include "opencv2/imgproc.hpp"
|
||||
//#include "opencv2/imgcodecs.hpp"
|
||||
//#include "opencv2/highgui.hpp"
|
||||
//#include "opencv2/imgproc/imgproc_c.h"
|
||||
|
||||
#include "opencv2/core/private.hpp"
|
||||
|
||||
#if defined(HAVE_DSHOW) || \
|
||||
defined(HAVE_TYZX) || \
|
||||
defined(HAVE_VFW) || \
|
||||
defined(HAVE_LIBV4L) || \
|
||||
(defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \
|
||||
defined(HAVE_GSTREAMER) || \
|
||||
defined(HAVE_DC1394_2) || \
|
||||
defined(HAVE_DC1394) || \
|
||||
defined(HAVE_CMU1394) || \
|
||||
defined(HAVE_MIL) || \
|
||||
defined(HAVE_QUICKTIME) || \
|
||||
defined(HAVE_QTKIT) || \
|
||||
defined(HAVE_UNICAP) || \
|
||||
defined(HAVE_PVAPI) || \
|
||||
defined(HAVE_OPENNI) || \
|
||||
defined(HAVE_XIMEA) || \
|
||||
defined(HAVE_AVFOUNDATION) || \
|
||||
defined(HAVE_GIGE_API) || \
|
||||
defined(HAVE_INTELPERC) || \
|
||||
(0)
|
||||
//defined(HAVE_ANDROID_NATIVE_CAMERA) || - enable after #1193
|
||||
# define BUILD_WITH_CAMERA_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_CAMERA_SUPPORT 0
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_XINE) || \
|
||||
defined(HAVE_GSTREAMER) || \
|
||||
defined(HAVE_QUICKTIME) || \
|
||||
defined(HAVE_QTKIT) || \
|
||||
defined(HAVE_AVFOUNDATION) || \
|
||||
/*defined(HAVE_OPENNI) || too specialized */ \
|
||||
defined(HAVE_FFMPEG) || \
|
||||
defined(HAVE_MSMF)
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
|
||||
#endif
|
||||
|
||||
#if /*defined(HAVE_XINE) || */\
|
||||
defined(HAVE_GSTREAMER) || \
|
||||
defined(HAVE_QUICKTIME) || \
|
||||
defined(HAVE_QTKIT) || \
|
||||
defined(HAVE_AVFOUNDATION) || \
|
||||
defined(HAVE_FFMPEG) || \
|
||||
defined(HAVE_MSMF)
|
||||
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0
|
||||
#endif
|
||||
|
||||
namespace cvtest
|
||||
{
|
||||
|
||||
string fourccToString(int fourcc);
|
||||
|
||||
struct VideoFormat
|
||||
{
|
||||
VideoFormat() { fourcc = -1; }
|
||||
VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {}
|
||||
bool empty() const { return ext.empty(); }
|
||||
|
||||
string ext;
|
||||
int fourcc;
|
||||
};
|
||||
|
||||
extern const VideoFormat g_specific_fmt_list[];
|
||||
|
||||
}
|
||||
//#include "opencv2/core/private.hpp"
|
||||
|
||||
#endif
|
||||
|
@@ -109,6 +109,18 @@ enum
|
||||
/* utility function: convert one image to another with optional vertical flip */
|
||||
CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0));
|
||||
|
||||
CVAPI(int) cvHaveImageReader(const char* filename);
|
||||
CVAPI(int) cvHaveImageWriter(const char* filename);
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Obsolete functions/synonyms *
|
||||
\****************************************************************************************/
|
||||
|
||||
#define cvvLoadImage(name) cvLoadImage((name),1)
|
||||
#define cvvSaveImage cvSaveImage
|
||||
#define cvvConvertImage cvConvertImage
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@@ -41,6 +41,10 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#import <UIKit/UIKit.h>
|
||||
#import <Accelerate/Accelerate.h>
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
#import <ImageIO/ImageIO.h>
|
||||
#include "opencv2/core/core.hpp"
|
||||
|
||||
UIImage* MatToUIImage(const cv::Mat& image);
|
||||
|
@@ -1,3 +1,3 @@
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
CV_PERF_TEST_MAIN(highgui)
|
||||
CV_PERF_TEST_MAIN(imgcodecs)
|
||||
|
@@ -40,7 +40,11 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#import "opencv2/highgui/cap_ios.h"
|
||||
#import <UIKit/UIKit.h>
|
||||
#import <Accelerate/Accelerate.h>
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
#import <ImageIO/ImageIO.h>
|
||||
#include "opencv2/core.hpp"
|
||||
#include "precomp.hpp"
|
||||
|
||||
UIImage* MatToUIImage(const cv::Mat& image) {
|
||||
|
@@ -454,7 +454,7 @@ bool imencode( const String& ext, InputArray _image,
|
||||
}
|
||||
|
||||
/****************************************************************************************\
|
||||
* HighGUI loading & saving function implementation *
|
||||
* Imgcodecs loading & saving function implementation *
|
||||
\****************************************************************************************/
|
||||
|
||||
CV_IMPL int
|
||||
|
@@ -81,7 +81,4 @@
|
||||
#define __END__ __CV_END__
|
||||
#define EXIT __CV_EXIT__
|
||||
|
||||
CVAPI(int) cvHaveImageReader(const char* filename);
|
||||
CVAPI(int) cvHaveImageWriter(const char* filename);
|
||||
|
||||
#endif /* __HIGHGUI_H_ */
|
||||
#endif /* __IMGCODECS_H_ */
|
||||
|
@@ -407,8 +407,8 @@ int CV_DrawingTest_C::checkLineIterator( Mat& _img )
|
||||
}
|
||||
|
||||
#ifdef HAVE_JPEG
|
||||
TEST(Highgui_Drawing, cpp_regression) { CV_DrawingTest_CPP test; test.safe_run(); }
|
||||
TEST(Highgui_Drawing, c_regression) { CV_DrawingTest_C test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Drawing, cpp_regression) { CV_DrawingTest_CPP test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Drawing, c_regression) { CV_DrawingTest_C test; test.safe_run(); }
|
||||
#endif
|
||||
|
||||
class CV_FillConvexPolyTest : public cvtest::BaseTest
|
||||
@@ -443,4 +443,4 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Highgui_Drawing, fillconvexpoly_clipping) { CV_FillConvexPolyTest test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Drawing, fillconvexpoly_clipping) { CV_FillConvexPolyTest test; test.safe_run(); }
|
||||
|
@@ -222,12 +222,12 @@ public:
|
||||
|
||||
|
||||
#ifdef HAVE_PNG
|
||||
TEST(Highgui_Image, write_big) { CV_GrfmtWriteBigImageTest test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Image, write_big) { CV_GrfmtWriteBigImageTest test; test.safe_run(); }
|
||||
#endif
|
||||
|
||||
TEST(Highgui_Image, write_imageseq) { CV_GrfmtWriteSequenceImageTest test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Image, write_imageseq) { CV_GrfmtWriteSequenceImageTest test; test.safe_run(); }
|
||||
|
||||
TEST(Highgui_Image, read_bmp_rle8) { CV_GrfmtReadBMPRLE8Test test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Image, read_bmp_rle8) { CV_GrfmtReadBMPRLE8Test test; test.safe_run(); }
|
||||
|
||||
#ifdef HAVE_PNG
|
||||
class CV_GrfmtPNGEncodeTest : public cvtest::BaseTest
|
||||
@@ -256,9 +256,9 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Highgui_Image, encode_png) { CV_GrfmtPNGEncodeTest test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Image, encode_png) { CV_GrfmtPNGEncodeTest test; test.safe_run(); }
|
||||
|
||||
TEST(Highgui_ImreadVSCvtColor, regression)
|
||||
TEST(Imgcodecs_ImreadVSCvtColor, regression)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
|
||||
@@ -374,11 +374,11 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Highgui_Image, read_png_color_palette_with_alpha) { CV_GrfmtReadPNGColorPaletteWithAlphaTest test; test.safe_run(); }
|
||||
TEST(Imgcodecs_Image, read_png_color_palette_with_alpha) { CV_GrfmtReadPNGColorPaletteWithAlphaTest test; test.safe_run(); }
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_JPEG
|
||||
TEST(Highgui_Jpeg, encode_empty)
|
||||
TEST(Imgcodecs_Jpeg, encode_empty)
|
||||
{
|
||||
cv::Mat img;
|
||||
std::vector<uchar> jpegImg;
|
||||
@@ -386,7 +386,7 @@ TEST(Highgui_Jpeg, encode_empty)
|
||||
ASSERT_THROW(cv::imencode(".jpg", img, jpegImg), cv::Exception);
|
||||
}
|
||||
|
||||
TEST(Highgui_Jpeg, encode_decode_progressive_jpeg)
|
||||
TEST(Imgcodecs_Jpeg, encode_decode_progressive_jpeg)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
string input = string(ts.get_data_path()) + "../cv/shared/lena.png";
|
||||
@@ -410,7 +410,7 @@ TEST(Highgui_Jpeg, encode_decode_progressive_jpeg)
|
||||
remove(output_progressive.c_str());
|
||||
}
|
||||
|
||||
TEST(Highgui_Jpeg, encode_decode_optimize_jpeg)
|
||||
TEST(Imgcodecs_Jpeg, encode_decode_optimize_jpeg)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
string input = string(ts.get_data_path()) + "../cv/shared/lena.png";
|
||||
@@ -446,9 +446,9 @@ TEST(Highgui_Jpeg, encode_decode_optimize_jpeg)
|
||||
#ifdef ANDROID
|
||||
// Test disabled as it uses a lot of memory.
|
||||
// It is killed with SIGKILL by out of memory killer.
|
||||
TEST(Highgui_Tiff, DISABLED_decode_tile16384x16384)
|
||||
TEST(Imgcodecs_Tiff, DISABLED_decode_tile16384x16384)
|
||||
#else
|
||||
TEST(Highgui_Tiff, decode_tile16384x16384)
|
||||
TEST(Imgcodecs_Tiff, decode_tile16384x16384)
|
||||
#endif
|
||||
{
|
||||
// see issue #2161
|
||||
@@ -477,7 +477,7 @@ TEST(Highgui_Tiff, decode_tile16384x16384)
|
||||
remove(file4.c_str());
|
||||
}
|
||||
|
||||
TEST(Highgui_Tiff, write_read_16bit_big_little_endian)
|
||||
TEST(Imgcodecs_Tiff, write_read_16bit_big_little_endian)
|
||||
{
|
||||
// see issue #2601 "16-bit Grayscale TIFF Load Failures Due to Buffer Underflow and Endianness"
|
||||
|
||||
@@ -560,7 +560,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Highgui_Tiff, decode_tile_remainder)
|
||||
TEST(Imgcodecs_Tiff, decode_tile_remainder)
|
||||
{
|
||||
CV_GrfmtReadTifTiledWithNotFullTiles test; test.safe_run();
|
||||
}
|
||||
@@ -569,7 +569,7 @@ TEST(Highgui_Tiff, decode_tile_remainder)
|
||||
|
||||
#ifdef HAVE_WEBP
|
||||
|
||||
TEST(Highgui_WebP, encode_decode_lossless_webp)
|
||||
TEST(Imgcodecs_WebP, encode_decode_lossless_webp)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
string input = string(ts.get_data_path()) + "../cv/shared/lena.png";
|
||||
@@ -618,7 +618,7 @@ TEST(Highgui_WebP, encode_decode_lossless_webp)
|
||||
EXPECT_TRUE(cvtest::norm(img, img_webp, NORM_INF) == 0);
|
||||
}
|
||||
|
||||
TEST(Highgui_WebP, encode_decode_lossy_webp)
|
||||
TEST(Imgcodecs_WebP, encode_decode_lossy_webp)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
std::string input = std::string(ts.get_data_path()) + "../cv/shared/lena.png";
|
||||
@@ -642,7 +642,7 @@ TEST(Highgui_WebP, encode_decode_lossy_webp)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Highgui_WebP, encode_decode_with_alpha_webp)
|
||||
TEST(Imgcodecs_WebP, encode_decode_with_alpha_webp)
|
||||
{
|
||||
cvtest::TS& ts = *cvtest::TS::ptr();
|
||||
std::string input = std::string(ts.get_data_path()) + "../cv/shared/lena.png";
|
||||
@@ -668,7 +668,7 @@ TEST(Highgui_WebP, encode_decode_with_alpha_webp)
|
||||
|
||||
#endif
|
||||
|
||||
TEST(Highgui_Hdr, regression)
|
||||
TEST(Imgcodecs_Hdr, regression)
|
||||
{
|
||||
string folder = string(cvtest::TS::ptr()->get_data_path()) + "/readwrite/";
|
||||
string name_rle = folder + "rle.hdr";
|
||||
|
@@ -6,7 +6,7 @@ if(IOS OR NOT PYTHON_EXECUTABLE OR NOT ANT_EXECUTABLE OR NOT (JNI_FOUND OR (ANDR
|
||||
endif()
|
||||
|
||||
set(the_description "The java bindings")
|
||||
ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_highgui opencv_ml opencv_calib3d opencv_photo opencv_nonfree opencv_contrib)
|
||||
ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_imgcodecs opencv_videoio opencv_ml opencv_calib3d opencv_photo opencv_nonfree opencv_contrib)
|
||||
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/generator/src/cpp")
|
||||
|
||||
if(NOT ANDROID)
|
||||
|
@@ -21,7 +21,7 @@ import org.opencv.core.Scalar;
|
||||
import org.opencv.core.Size;
|
||||
import org.opencv.core.DMatch;
|
||||
import org.opencv.core.KeyPoint;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
|
||||
import android.util.Log;
|
||||
|
||||
@@ -134,8 +134,8 @@ public class OpenCVTestCase extends TestCase {
|
||||
rgba0 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(0));
|
||||
rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128));
|
||||
|
||||
rgbLena = Highgui.imread(OpenCVTestRunner.LENA_PATH);
|
||||
grayChess = Highgui.imread(OpenCVTestRunner.CHESS_PATH, 0);
|
||||
rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH);
|
||||
grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0);
|
||||
|
||||
v1 = new Mat(1, 3, CvType.CV_32F);
|
||||
v1.put(0, 0, 1.0, 3.0, 2.0);
|
||||
|
@@ -5,7 +5,7 @@ import org.opencv.core.Core;
|
||||
import org.opencv.core.CvType;
|
||||
import org.opencv.core.Mat;
|
||||
import org.opencv.core.Scalar;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
import org.opencv.imgproc.Imgproc;
|
||||
import org.opencv.test.OpenCVTestCase;
|
||||
import org.opencv.test.OpenCVTestRunner;
|
||||
@@ -57,7 +57,7 @@ public class UtilsTest extends OpenCVTestCase {
|
||||
}
|
||||
|
||||
public void testMatToBitmap() {
|
||||
Mat imgBGR = Highgui.imread( OpenCVTestRunner.LENA_PATH );
|
||||
Mat imgBGR = Imgcodecs.imread( OpenCVTestRunner.LENA_PATH );
|
||||
assertTrue(imgBGR != null && !imgBGR.empty() && imgBGR.channels() == 3);
|
||||
|
||||
Mat m16 = new Mat(imgBGR.rows(), imgBGR.cols(), CvType.CV_8UC4);
|
||||
|
@@ -18,7 +18,7 @@ import org.opencv.features2d.DescriptorMatcher;
|
||||
import org.opencv.features2d.FeatureDetector;
|
||||
import org.opencv.features2d.Features2d;
|
||||
import org.opencv.core.KeyPoint;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
import org.opencv.test.OpenCVTestCase;
|
||||
import org.opencv.test.OpenCVTestRunner;
|
||||
|
||||
@@ -93,7 +93,7 @@ public class Features2dTest extends OpenCVTestCase {
|
||||
writeFile(extractorCfgFile, extractorCfg);
|
||||
extractor.read(extractorCfgFile);
|
||||
|
||||
Mat imgTrain = Highgui.imread(OpenCVTestRunner.LENA_PATH, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
|
||||
Mat imgTrain = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
|
||||
Mat imgQuery = imgTrain.submat(new Range(0, imgTrain.rows() - 100), Range.all());
|
||||
|
||||
MatOfKeyPoint trainKeypoints = new MatOfKeyPoint();
|
||||
@@ -139,7 +139,7 @@ public class Features2dTest extends OpenCVTestCase {
|
||||
Mat outimg = new Mat();
|
||||
Features2d.drawMatches(imgQuery, queryKeypoints, imgTrain, trainKeypoints, matches, outimg);
|
||||
String outputPath = OpenCVTestRunner.getOutputFileName("PTODresult.png");
|
||||
Highgui.imwrite(outputPath, outimg);
|
||||
Imgcodecs.imwrite(outputPath, outimg);
|
||||
// OpenCVTestRunner.Log("Output image is saved to: " + outputPath);
|
||||
}
|
||||
}
|
||||
|
@@ -2,7 +2,7 @@ package org.opencv.test.highgui;
|
||||
|
||||
import org.opencv.core.MatOfByte;
|
||||
import org.opencv.core.MatOfInt;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
import org.opencv.test.OpenCVTestCase;
|
||||
import org.opencv.test.OpenCVTestRunner;
|
||||
|
||||
@@ -15,29 +15,29 @@ public class HighguiTest extends OpenCVTestCase {
|
||||
public void testImencodeStringMatListOfByte() {
|
||||
MatOfByte buff = new MatOfByte();
|
||||
assertEquals(0, buff.total());
|
||||
assertTrue( Highgui.imencode(".jpg", gray127, buff) );
|
||||
assertTrue( Imgcodecs.imencode(".jpg", gray127, buff) );
|
||||
assertFalse(0 == buff.total());
|
||||
}
|
||||
|
||||
public void testImencodeStringMatListOfByteListOfInteger() {
|
||||
MatOfInt params40 = new MatOfInt(Highgui.IMWRITE_JPEG_QUALITY, 40);
|
||||
MatOfInt params90 = new MatOfInt(Highgui.IMWRITE_JPEG_QUALITY, 90);
|
||||
MatOfInt params40 = new MatOfInt(Imgcodecs.IMWRITE_JPEG_QUALITY, 40);
|
||||
MatOfInt params90 = new MatOfInt(Imgcodecs.IMWRITE_JPEG_QUALITY, 90);
|
||||
/* or
|
||||
MatOfInt params = new MatOfInt();
|
||||
params.fromArray(Highgui.IMWRITE_JPEG_QUALITY, 40);
|
||||
params.fromArray(Imgcodecs.IMWRITE_JPEG_QUALITY, 40);
|
||||
*/
|
||||
MatOfByte buff40 = new MatOfByte();
|
||||
MatOfByte buff90 = new MatOfByte();
|
||||
|
||||
assertTrue( Highgui.imencode(".jpg", rgbLena, buff40, params40) );
|
||||
assertTrue( Highgui.imencode(".jpg", rgbLena, buff90, params90) );
|
||||
assertTrue( Imgcodecs.imencode(".jpg", rgbLena, buff40, params40) );
|
||||
assertTrue( Imgcodecs.imencode(".jpg", rgbLena, buff90, params90) );
|
||||
|
||||
assertTrue(buff40.total() > 0);
|
||||
assertTrue(buff40.total() < buff90.total());
|
||||
}
|
||||
|
||||
public void testImreadString() {
|
||||
dst = Highgui.imread(OpenCVTestRunner.LENA_PATH);
|
||||
dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH);
|
||||
assertTrue(!dst.empty());
|
||||
assertEquals(3, dst.channels());
|
||||
assertTrue(512 == dst.cols());
|
||||
@@ -45,7 +45,7 @@ public class HighguiTest extends OpenCVTestCase {
|
||||
}
|
||||
|
||||
public void testImreadStringInt() {
|
||||
dst = Highgui.imread(OpenCVTestRunner.LENA_PATH, 0);
|
||||
dst = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH, 0);
|
||||
assertTrue(!dst.empty());
|
||||
assertEquals(1, dst.channels());
|
||||
assertTrue(512 == dst.cols());
|
||||
|
@@ -3,8 +3,8 @@ package org.opencv.test.highgui;
|
||||
import java.util.List;
|
||||
|
||||
import org.opencv.core.Size;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.highgui.VideoCapture;
|
||||
import org.opencv.videoio.Videoio;
|
||||
import org.opencv.videoio.VideoCapture;
|
||||
|
||||
import org.opencv.test.OpenCVTestCase;
|
||||
|
||||
@@ -26,8 +26,8 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testGet() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
double frameWidth = capture.get(Highgui.CV_CAP_PROP_FRAME_WIDTH);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
double frameWidth = capture.get(Videoio.CV_CAP_PROP_FRAME_WIDTH);
|
||||
assertTrue(0 != frameWidth);
|
||||
} finally {
|
||||
if (capture != null) capture.release();
|
||||
@@ -36,7 +36,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testGetSupportedPreviewSizes() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
List<Size> sizes = capture.getSupportedPreviewSizes();
|
||||
assertNotNull(sizes);
|
||||
assertFalse(sizes.isEmpty());
|
||||
@@ -53,7 +53,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testGrabFromRealCamera() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
isSucceed = capture.grab();
|
||||
assertTrue(isSucceed);
|
||||
} finally {
|
||||
@@ -68,7 +68,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testIsOpenedRealCamera() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
isOpened = capture.isOpened();
|
||||
assertTrue(isOpened);
|
||||
} finally {
|
||||
@@ -79,7 +79,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
public void testOpen() {
|
||||
try {
|
||||
capture = new VideoCapture();
|
||||
capture.open(Highgui.CV_CAP_ANDROID);
|
||||
capture.open(Videoio.CV_CAP_ANDROID);
|
||||
isOpened = capture.isOpened();
|
||||
assertTrue(isOpened);
|
||||
} finally {
|
||||
@@ -89,7 +89,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testRead() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
isSucceed = capture.read(dst);
|
||||
assertTrue(isSucceed);
|
||||
assertFalse(dst.empty());
|
||||
@@ -101,7 +101,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testRelease() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
capture.release();
|
||||
assertFalse(capture.isOpened());
|
||||
capture = null;
|
||||
@@ -112,7 +112,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testRetrieveMat() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
capture.grab();
|
||||
isSucceed = capture.retrieve(dst);
|
||||
assertTrue(isSucceed);
|
||||
@@ -125,9 +125,9 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testRetrieveMatInt() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
capture.grab();
|
||||
isSucceed = capture.retrieve(dst, Highgui.CV_CAP_ANDROID_GREY_FRAME);
|
||||
isSucceed = capture.retrieve(dst, Videoio.CV_CAP_ANDROID_GREY_FRAME);
|
||||
assertTrue(isSucceed);
|
||||
assertFalse(dst.empty());
|
||||
assertEquals(1, dst.channels());
|
||||
@@ -138,10 +138,10 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testSet() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, 640);
|
||||
capture.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, 480);
|
||||
double frameWidth = capture.get(Highgui.CV_CAP_PROP_FRAME_WIDTH);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
capture.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, 640);
|
||||
capture.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, 480);
|
||||
double frameWidth = capture.get(Videoio.CV_CAP_PROP_FRAME_WIDTH);
|
||||
capture.read(dst);
|
||||
assertEquals(640.0, frameWidth);
|
||||
assertEquals(640, dst.cols());
|
||||
@@ -158,7 +158,7 @@ public class VideoCaptureTest extends OpenCVTestCase {
|
||||
|
||||
public void testVideoCaptureInt() {
|
||||
try {
|
||||
capture = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
capture = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
assertNotNull(capture);
|
||||
assertTrue(capture.isOpened());
|
||||
} finally {
|
||||
|
@@ -11,7 +11,7 @@ except:
|
||||
class_ignore_list = (
|
||||
#core
|
||||
"FileNode", "FileStorage", "KDTree", "KeyPoint", "DMatch",
|
||||
#highgui
|
||||
#videoio
|
||||
"VideoWriter",
|
||||
)
|
||||
|
||||
@@ -536,13 +536,13 @@ JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1getTextSize
|
||||
"""\n private static native String getSupportedPreviewSizes_0(long nativeObj);\n""",
|
||||
'cpp_code' :
|
||||
"""
|
||||
JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10
|
||||
JNIEXPORT jstring JNICALL Java_org_opencv_videoio_VideoCapture_getSupportedPreviewSizes_10
|
||||
(JNIEnv *env, jclass, jlong self);
|
||||
|
||||
JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_getSupportedPreviewSizes_10
|
||||
JNIEXPORT jstring JNICALL Java_org_opencv_videoio_VideoCapture_getSupportedPreviewSizes_10
|
||||
(JNIEnv *env, jclass, jlong self)
|
||||
{
|
||||
static const char method_name[] = "highgui::VideoCapture_getSupportedPreviewSizes_10()";
|
||||
static const char method_name[] = "videoio::VideoCapture_getSupportedPreviewSizes_10()";
|
||||
try {
|
||||
LOGD("%s", method_name);
|
||||
VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import print_function
|
||||
import os, sys, re, string, fnmatch
|
||||
allmodules = ["core", "flann", "imgproc", "ml", "imgcodecs", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
|
||||
allmodules = ["core", "flann", "imgproc", "ml", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
|
||||
verbose = False
|
||||
show_warnings = True
|
||||
show_errors = True
|
||||
|
@@ -6,7 +6,7 @@ import org.opencv.R;
|
||||
import org.opencv.android.Utils;
|
||||
import org.opencv.core.Mat;
|
||||
import org.opencv.core.Size;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.videoio.Videoio;
|
||||
|
||||
import android.app.Activity;
|
||||
import android.app.AlertDialog;
|
||||
@@ -46,7 +46,7 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
|
||||
protected int mMaxHeight;
|
||||
protected int mMaxWidth;
|
||||
protected float mScale = 0;
|
||||
protected int mPreviewFormat = Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA;
|
||||
protected int mPreviewFormat = Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA;
|
||||
protected int mCameraIndex = CAMERA_ID_ANY;
|
||||
protected boolean mEnabled;
|
||||
protected FpsMeter mFpsMeter = null;
|
||||
@@ -151,10 +151,10 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
|
||||
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
|
||||
Mat result = null;
|
||||
switch (mPreviewFormat) {
|
||||
case Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA:
|
||||
case Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA:
|
||||
result = mOldStyleListener.onCameraFrame(inputFrame.rgba());
|
||||
break;
|
||||
case Highgui.CV_CAP_ANDROID_GREY_FRAME:
|
||||
case Videoio.CV_CAP_ANDROID_GREY_FRAME:
|
||||
result = mOldStyleListener.onCameraFrame(inputFrame.gray());
|
||||
break;
|
||||
default:
|
||||
@@ -168,7 +168,7 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
|
||||
mPreviewFormat = format;
|
||||
}
|
||||
|
||||
private int mPreviewFormat = Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA;
|
||||
private int mPreviewFormat = Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA;
|
||||
private CvCameraViewListener mOldStyleListener;
|
||||
};
|
||||
|
||||
|
@@ -2,8 +2,8 @@ package org.opencv.android;
|
||||
|
||||
import org.opencv.core.Mat;
|
||||
import org.opencv.core.Size;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.highgui.VideoCapture;
|
||||
import org.opencv.videoio.Videoio;
|
||||
import org.opencv.videoio.VideoCapture;
|
||||
|
||||
import android.content.Context;
|
||||
import android.util.AttributeSet;
|
||||
@@ -88,9 +88,9 @@ public class NativeCameraView extends CameraBridgeViewBase {
|
||||
synchronized (this) {
|
||||
|
||||
if (mCameraIndex == -1)
|
||||
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
|
||||
mCamera = new VideoCapture(Videoio.CV_CAP_ANDROID);
|
||||
else
|
||||
mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID + mCameraIndex);
|
||||
mCamera = new VideoCapture(Videoio.CV_CAP_ANDROID + mCameraIndex);
|
||||
|
||||
if (mCamera == null)
|
||||
return false;
|
||||
@@ -119,8 +119,8 @@ public class NativeCameraView extends CameraBridgeViewBase {
|
||||
|
||||
AllocateCache();
|
||||
|
||||
mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, frameSize.width);
|
||||
mCamera.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, frameSize.height);
|
||||
mCamera.set(Videoio.CV_CAP_PROP_FRAME_WIDTH, frameSize.width);
|
||||
mCamera.set(Videoio.CV_CAP_PROP_FRAME_HEIGHT, frameSize.height);
|
||||
}
|
||||
|
||||
Log.i(TAG, "Selected camera frame size = (" + mFrameWidth + ", " + mFrameHeight + ")");
|
||||
@@ -139,13 +139,13 @@ public class NativeCameraView extends CameraBridgeViewBase {
|
||||
|
||||
@Override
|
||||
public Mat rgba() {
|
||||
mCapture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
|
||||
mCapture.retrieve(mRgba, Videoio.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
|
||||
return mRgba;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mat gray() {
|
||||
mCapture.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME);
|
||||
mCapture.retrieve(mGray, Videoio.CV_CAP_ANDROID_GREY_FRAME);
|
||||
return mGray;
|
||||
}
|
||||
|
||||
|
@@ -25,7 +25,7 @@ import org.opencv.core.Scalar;
|
||||
import org.opencv.core.Size;
|
||||
import org.opencv.core.DMatch;
|
||||
import org.opencv.core.KeyPoint;
|
||||
import org.opencv.highgui.Highgui;
|
||||
import org.opencv.imgcodecs.Imgcodecs;
|
||||
|
||||
public class OpenCVTestCase extends TestCase {
|
||||
//change to 'true' to unblock fail on fail("Not yet implemented")
|
||||
@@ -164,8 +164,8 @@ public class OpenCVTestCase extends TestCase {
|
||||
rgba0 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(0));
|
||||
rgba128 = new Mat(matSize, matSize, CvType.CV_8UC4, Scalar.all(128));
|
||||
|
||||
rgbLena = Highgui.imread(OpenCVTestRunner.LENA_PATH);
|
||||
grayChess = Highgui.imread(OpenCVTestRunner.CHESS_PATH, 0);
|
||||
rgbLena = Imgcodecs.imread(OpenCVTestRunner.LENA_PATH);
|
||||
grayChess = Imgcodecs.imread(OpenCVTestRunner.CHESS_PATH, 0);
|
||||
|
||||
v1 = new Mat(1, 3, CvType.CV_32F);
|
||||
v1.put(0, 0, 1.0, 3.0, 2.0);
|
||||
|
@@ -85,7 +85,8 @@ endif()
|
||||
set(the_description "The Matlab/Octave bindings")
|
||||
ocv_add_module(matlab BINDINGS
|
||||
OPTIONAL opencv_core
|
||||
opencv_imgproc opencv_ml opencv_highgui
|
||||
opencv_imgproc opencv_ml
|
||||
opencv_imgcodecs opencv_videoio opencv_highgui
|
||||
opencv_objdetect opencv_flann opencv_features2d
|
||||
opencv_photo opencv_video opencv_videostab
|
||||
opencv_calib opencv_calib3d
|
||||
|
@@ -11,7 +11,7 @@ if(ANDROID OR IOS OR NOT PYTHONLIBS_FOUND OR NOT PYTHON_NUMPY_INCLUDE_DIRS)
|
||||
endif()
|
||||
|
||||
set(the_description "The python bindings")
|
||||
ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_imgcodecs opencv_highgui opencv_calib3d opencv_photo opencv_objdetect OPTIONAL opencv_nonfree)
|
||||
ocv_add_module(python BINDINGS opencv_core opencv_flann opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_imgcodecs opencv_videoio opencv_highgui opencv_calib3d opencv_photo opencv_objdetect OPTIONAL opencv_nonfree)
|
||||
|
||||
ocv_module_include_directories(
|
||||
"${PYTHON_INCLUDE_PATH}"
|
||||
@@ -32,6 +32,7 @@ set(opencv_hdrs
|
||||
"${OPENCV_MODULE_opencv_video_LOCATION}/include/opencv2/video/tracking.hpp"
|
||||
"${OPENCV_MODULE_opencv_photo_LOCATION}/include/opencv2/photo.hpp"
|
||||
"${OPENCV_MODULE_opencv_imgcodecs_LOCATION}/include/opencv2/imgcodecs.hpp"
|
||||
"${OPENCV_MODULE_opencv_videoio_LOCATION}/include/opencv2/videoio.hpp"
|
||||
"${OPENCV_MODULE_opencv_highgui_LOCATION}/include/opencv2/highgui.hpp"
|
||||
"${OPENCV_MODULE_opencv_ml_LOCATION}/include/opencv2/ml.hpp"
|
||||
"${OPENCV_MODULE_opencv_features2d_LOCATION}/include/opencv2/features2d.hpp"
|
||||
|
@@ -16,6 +16,7 @@ opencv_hdr_list = [
|
||||
"../../video/include/opencv2/video/background_segm.hpp",
|
||||
"../../objdetect/include/opencv2/objdetect.hpp",
|
||||
"../../imgcodecs/include/opencv2/imgcodecs.hpp",
|
||||
"../../videoio/include/opencv2/videoio.hpp",
|
||||
"../../highgui/include/opencv2/highgui.hpp"
|
||||
]
|
||||
|
||||
|
@@ -5,4 +5,4 @@ endif()
|
||||
set(the_description "Super Resolution")
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 -Wundef -Wshadow)
|
||||
ocv_define_module(superres opencv_imgproc opencv_video
|
||||
OPTIONAL opencv_highgui opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudaoptflow opencv_cudacodec)
|
||||
OPTIONAL opencv_videoio opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudaoptflow opencv_cudacodec)
|
||||
|
@@ -80,7 +80,7 @@ Ptr<FrameSource> cv::superres::createFrameSource_Empty()
|
||||
//////////////////////////////////////////////////////
|
||||
// VideoFrameSource & CameraFrameSource
|
||||
|
||||
#ifndef HAVE_OPENCV_HIGHGUI
|
||||
#ifndef HAVE_OPENCV_VIDEOIO
|
||||
|
||||
Ptr<FrameSource> cv::superres::createFrameSource_Video(const String& fileName)
|
||||
{
|
||||
@@ -96,7 +96,7 @@ Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
|
||||
return Ptr<FrameSource>();
|
||||
}
|
||||
|
||||
#else // HAVE_OPENCV_HIGHGUI
|
||||
#else // HAVE_OPENCV_VIDEOIO
|
||||
|
||||
namespace
|
||||
{
|
||||
@@ -187,7 +187,7 @@ Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
|
||||
return makePtr<CameraFrameSource>(deviceId);
|
||||
}
|
||||
|
||||
#endif // HAVE_OPENCV_HIGHGUI
|
||||
#endif // HAVE_OPENCV_VIDEOIO
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// VideoFrameSource_CUDA
|
||||
|
@@ -82,8 +82,8 @@
|
||||
# include "opencv2/cudacodec.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
#include "opencv2/highgui.hpp"
|
||||
#ifdef HAVE_OPENCV_VIDEOIO
|
||||
#include "opencv2/videoio.hpp"
|
||||
#endif
|
||||
|
||||
#include "opencv2/superres.hpp"
|
||||
|
@@ -9,7 +9,7 @@ set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE)
|
||||
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
|
||||
|
||||
ocv_add_module(ts opencv_core opencv_imgproc opencv_imgcodecs opencv_highgui)
|
||||
ocv_add_module(ts opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui)
|
||||
|
||||
ocv_glob_module_sources()
|
||||
ocv_module_include_directories()
|
||||
|
@@ -45,6 +45,7 @@
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/ts/ts_perf.hpp"
|
||||
#include "cvconfig.h"
|
||||
|
@@ -47,6 +47,7 @@
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgproc/types_c.h"
|
||||
|
233
modules/videoio/CMakeLists.txt
Normal file
233
modules/videoio/CMakeLists.txt
Normal file
@@ -0,0 +1,233 @@
|
||||
set(the_description "Media I/O")
|
||||
ocv_add_module(videoio opencv_imgproc opencv_imgcodecs OPTIONAL opencv_androidcamera)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# CMake file for videoio. See root CMakeLists.txt
|
||||
# Some parts taken from version of Hartmut Seichter, HIT Lab NZ.
|
||||
# Jose Luis Blanco, 2008
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
if(HAVE_WINRT_CX)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW")
|
||||
endif()
|
||||
|
||||
if(APPLE)
|
||||
ocv_include_directories(${ZLIB_INCLUDE_DIRS})
|
||||
list(APPEND VIDEOIO_LIBRARIES ${ZLIB_LIBRARIES})
|
||||
endif()
|
||||
|
||||
set(videoio_hdrs
|
||||
src/precomp.hpp
|
||||
src/cap_ffmpeg_impl.hpp
|
||||
)
|
||||
|
||||
set(videoio_srcs
|
||||
src/cap.cpp
|
||||
src/cap_images.cpp
|
||||
src/cap_ffmpeg.cpp
|
||||
)
|
||||
|
||||
file(GLOB videoio_ext_hdrs "include/opencv2/*.hpp" "include/opencv2/${name}/*.hpp" "include/opencv2/${name}/*.h")
|
||||
|
||||
if(WIN32 AND NOT ARM)
|
||||
list(APPEND videoio_srcs src/cap_cmu.cpp)
|
||||
endif()
|
||||
|
||||
if (WIN32 AND HAVE_DSHOW)
|
||||
list(APPEND videoio_srcs src/cap_dshow.cpp)
|
||||
endif()
|
||||
|
||||
if (WIN32 AND HAVE_MSMF)
|
||||
list(APPEND videoio_srcs src/cap_msmf.cpp)
|
||||
endif()
|
||||
|
||||
if (WIN32 AND HAVE_VFW)
|
||||
list(APPEND videoio_srcs src/cap_vfw.cpp)
|
||||
endif()
|
||||
|
||||
if(HAVE_XINE)
|
||||
list(APPEND videoio_srcs src/cap_xine.cpp)
|
||||
endif(HAVE_XINE)
|
||||
|
||||
if(HAVE_DC1394_2)
|
||||
list(APPEND videoio_srcs src/cap_dc1394_v2.cpp)
|
||||
endif(HAVE_DC1394_2)
|
||||
|
||||
if(HAVE_DC1394)
|
||||
list(APPEND videoio_srcs src/cap_dc1394.cpp)
|
||||
endif(HAVE_DC1394)
|
||||
|
||||
if(HAVE_GSTREAMER)
|
||||
list(APPEND videoio_srcs src/cap_gstreamer.cpp)
|
||||
endif(HAVE_GSTREAMER)
|
||||
|
||||
if(HAVE_UNICAP)
|
||||
list(APPEND videoio_srcs src/cap_unicap.cpp)
|
||||
endif(HAVE_UNICAP)
|
||||
|
||||
if(HAVE_LIBV4L)
|
||||
list(APPEND videoio_srcs src/cap_libv4l.cpp)
|
||||
elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO)
|
||||
list(APPEND videoio_srcs src/cap_v4l.cpp)
|
||||
endif()
|
||||
|
||||
if(HAVE_OPENNI)
|
||||
list(APPEND videoio_srcs src/cap_openni.cpp)
|
||||
ocv_include_directories(${OPENNI_INCLUDE_DIR})
|
||||
list(APPEND VIDEOIO_LIBRARIES ${OPENNI_LIBRARY})
|
||||
endif(HAVE_OPENNI)
|
||||
|
||||
if(HAVE_opencv_androidcamera)
|
||||
list(APPEND videoio_srcs src/cap_android.cpp)
|
||||
add_definitions(-DHAVE_ANDROID_NATIVE_CAMERA)#TODO: remove this line
|
||||
endif(HAVE_opencv_androidcamera)
|
||||
|
||||
if(HAVE_XIMEA)
|
||||
list(APPEND videoio_srcs src/cap_ximea.cpp)
|
||||
ocv_include_directories(${XIMEA_PATH})
|
||||
if(XIMEA_LIBRARY_DIR)
|
||||
link_directories("${XIMEA_LIBRARY_DIR}")
|
||||
endif()
|
||||
if(X86_64)
|
||||
list(APPEND VIDEOIO_LIBRARIES m3apiX64)
|
||||
else()
|
||||
list(APPEND VIDEOIO_LIBRARIES m3api)
|
||||
endif()
|
||||
endif(HAVE_XIMEA)
|
||||
|
||||
if(HAVE_FFMPEG)
|
||||
if(UNIX AND BZIP2_LIBRARIES)
|
||||
list(APPEND VIDEOIO_LIBRARIES ${BZIP2_LIBRARIES})
|
||||
endif()
|
||||
if(APPLE)
|
||||
list(APPEND VIDEOIO_LIBRARIES "-framework VideoDecodeAcceleration" bz2)
|
||||
endif()
|
||||
endif(HAVE_FFMPEG)
|
||||
|
||||
if(HAVE_PVAPI)
|
||||
add_definitions(-DHAVE_PVAPI)
|
||||
add_definitions(${PVAPI_DEFINITIONS})
|
||||
ocv_include_directories(${PVAPI_INCLUDE_PATH})
|
||||
set(videoio_srcs src/cap_pvapi.cpp ${videoio_srcs})
|
||||
list(APPEND VIDEOIO_LIBRARIES ${PVAPI_LIBRARY})
|
||||
endif()
|
||||
|
||||
if(HAVE_GIGE_API)
|
||||
add_definitions(-DHAVE_GIGE_API)
|
||||
ocv_include_directories(${GIGEAPI_INCLUDE_PATH})
|
||||
set(videoio_srcs src/cap_giganetix.cpp ${videoio_srcs})
|
||||
list(APPEND VIDEOIO_LIBRARIES ${GIGEAPI_LIBRARIES})
|
||||
list(APPEND videoio_srcs src/cap_giganetix.cpp)
|
||||
endif(HAVE_GIGE_API)
|
||||
|
||||
if(HAVE_AVFOUNDATION)
|
||||
list(APPEND videoio_srcs src/cap_avfoundation.mm)
|
||||
list(APPEND VIDEOIO_LIBRARIES "-framework AVFoundation" "-framework QuartzCore")
|
||||
endif()
|
||||
|
||||
if(HAVE_QUICKTIME)
|
||||
list(APPEND videoio_srcs src/cap_qt.cpp)
|
||||
list(APPEND VIDEOIO_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
|
||||
elseif(HAVE_QTKIT)
|
||||
list(APPEND videoio_srcs src/cap_qtkit.mm)
|
||||
list(APPEND VIDEOIO_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
|
||||
endif()
|
||||
|
||||
if(HAVE_INTELPERC)
|
||||
list(APPEND videoio_srcs src/cap_intelperc.cpp)
|
||||
ocv_include_directories(${INTELPERC_INCLUDE_DIR})
|
||||
list(APPEND VIDEOIO_LIBRARIES ${INTELPERC_LIBRARIES})
|
||||
endif(HAVE_INTELPERC)
|
||||
|
||||
if(IOS)
|
||||
add_definitions(-DHAVE_IOS=1)
|
||||
list(APPEND videoio_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm)
|
||||
list(APPEND VIDEOIO_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary")
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
|
||||
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
|
||||
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
#these variables are set by CHECK_MODULE macro
|
||||
foreach(P ${VIDEOIO_INCLUDE_DIRS})
|
||||
ocv_include_directories(${P})
|
||||
endforeach()
|
||||
|
||||
foreach(P ${VIDEOIO_LIBRARY_DIRS})
|
||||
link_directories(${P})
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
source_group("Src" FILES ${videoio_srcs} ${videoio_hdrs})
|
||||
source_group("Include" FILES ${videoio_ext_hdrs})
|
||||
ocv_set_module_sources(HEADERS ${videoio_ext_hdrs} SOURCES ${videoio_srcs} ${videoio_hdrs})
|
||||
ocv_module_include_directories()
|
||||
|
||||
ocv_create_module(${VIDEOIO_LIBRARIES})
|
||||
|
||||
if(APPLE)
|
||||
ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS)
|
||||
if(HAVE_OBJC_EXCEPTIONS)
|
||||
foreach(source ${OPENCV_MODULE_${the_module}_SOURCES})
|
||||
if("${source}" MATCHES "\\.mm$")
|
||||
get_source_file_property(flags "${source}" COMPILE_FLAGS)
|
||||
if(flags)
|
||||
set(flags "${_flags} -fobjc-exceptions")
|
||||
else()
|
||||
set(flags "-fobjc-exceptions")
|
||||
endif()
|
||||
|
||||
set_source_files_properties("${source}" PROPERTIES COMPILE_FLAGS "${flags}")
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
add_definitions(-DVIDEOIO_EXPORTS)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /NODEFAULTLIB:libcmt.lib /DEBUG")
|
||||
endif()
|
||||
|
||||
#stop automatic dependencies propagation for this module
|
||||
set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "")
|
||||
|
||||
ocv_add_precompiled_headers(${the_module})
|
||||
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations)
|
||||
|
||||
if(WIN32 AND WITH_FFMPEG)
|
||||
#copy ffmpeg dll to the output folder
|
||||
if(MSVC64 OR MINGW64)
|
||||
set(FFMPEG_SUFFIX _64)
|
||||
endif()
|
||||
|
||||
set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll")
|
||||
set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
|
||||
set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}")
|
||||
|
||||
if(MSVC_IDE)
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}"
|
||||
COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
elseif(MSVC AND (CMAKE_GENERATOR MATCHES "Visual"))
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}"
|
||||
COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
else()
|
||||
add_custom_command(TARGET ${the_module} POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}"
|
||||
COMMENT "Copying ${ffmpeg_path} to the output directory")
|
||||
endif()
|
||||
|
||||
install(FILES "${ffmpeg_path}" DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs RENAME "${ffmpeg_bare_name_ver}")
|
||||
endif()
|
||||
|
||||
ocv_add_accuracy_tests()
|
||||
ocv_add_perf_tests()
|
12
modules/videoio/doc/videoio.rst
Normal file
12
modules/videoio/doc/videoio.rst
Normal file
@@ -0,0 +1,12 @@
|
||||
*******************
|
||||
videoio. Media I/O
|
||||
*******************
|
||||
|
||||
videoio provides easy interface to:
|
||||
|
||||
* Read video from camera or file and write video to a file.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
reading_and_writing_video
|
389
modules/videoio/include/opencv2/videoio.hpp
Normal file
389
modules/videoio/include/opencv2/videoio.hpp
Normal file
@@ -0,0 +1,389 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_VIDEOIO_HPP__
|
||||
#define __OPENCV_VIDEOIO_HPP__
|
||||
|
||||
#include "opencv2/core.hpp"
|
||||
|
||||
|
||||
////////////////////////////////// video io /////////////////////////////////
|
||||
|
||||
typedef struct CvCapture CvCapture;
|
||||
typedef struct CvVideoWriter CvVideoWriter;
|
||||
|
||||
namespace cv
|
||||
{
|
||||
|
||||
// Camera API
|
||||
enum { CAP_ANY = 0, // autodetect
|
||||
CAP_VFW = 200, // platform native
|
||||
CAP_V4L = 200,
|
||||
CAP_V4L2 = CAP_V4L,
|
||||
CAP_FIREWARE = 300, // IEEE 1394 drivers
|
||||
CAP_FIREWIRE = CAP_FIREWARE,
|
||||
CAP_IEEE1394 = CAP_FIREWARE,
|
||||
CAP_DC1394 = CAP_FIREWARE,
|
||||
CAP_CMU1394 = CAP_FIREWARE,
|
||||
CAP_QT = 500, // QuickTime
|
||||
CAP_UNICAP = 600, // Unicap drivers
|
||||
CAP_DSHOW = 700, // DirectShow (via videoInput)
|
||||
CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK
|
||||
CAP_OPENNI = 900, // OpenNI (for Kinect)
|
||||
CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion)
|
||||
CAP_ANDROID = 1000, // Android
|
||||
CAP_XIAPI = 1100, // XIMEA Camera API
|
||||
CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
|
||||
CAP_MSMF = 1400, // Microsoft Media Foundation (via videoInput)
|
||||
CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
|
||||
};
|
||||
|
||||
// generic properties (based on DC1394 properties)
|
||||
enum { CAP_PROP_POS_MSEC =0,
|
||||
CAP_PROP_POS_FRAMES =1,
|
||||
CAP_PROP_POS_AVI_RATIO =2,
|
||||
CAP_PROP_FRAME_WIDTH =3,
|
||||
CAP_PROP_FRAME_HEIGHT =4,
|
||||
CAP_PROP_FPS =5,
|
||||
CAP_PROP_FOURCC =6,
|
||||
CAP_PROP_FRAME_COUNT =7,
|
||||
CAP_PROP_FORMAT =8,
|
||||
CAP_PROP_MODE =9,
|
||||
CAP_PROP_BRIGHTNESS =10,
|
||||
CAP_PROP_CONTRAST =11,
|
||||
CAP_PROP_SATURATION =12,
|
||||
CAP_PROP_HUE =13,
|
||||
CAP_PROP_GAIN =14,
|
||||
CAP_PROP_EXPOSURE =15,
|
||||
CAP_PROP_CONVERT_RGB =16,
|
||||
CAP_PROP_WHITE_BALANCE_BLUE_U =17,
|
||||
CAP_PROP_RECTIFICATION =18,
|
||||
CAP_PROP_MONOCROME =19,
|
||||
CAP_PROP_SHARPNESS =20,
|
||||
CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
|
||||
CAP_PROP_GAMMA =22,
|
||||
CAP_PROP_TEMPERATURE =23,
|
||||
CAP_PROP_TRIGGER =24,
|
||||
CAP_PROP_TRIGGER_DELAY =25,
|
||||
CAP_PROP_WHITE_BALANCE_RED_V =26,
|
||||
CAP_PROP_ZOOM =27,
|
||||
CAP_PROP_FOCUS =28,
|
||||
CAP_PROP_GUID =29,
|
||||
CAP_PROP_ISO_SPEED =30,
|
||||
CAP_PROP_BACKLIGHT =32,
|
||||
CAP_PROP_PAN =33,
|
||||
CAP_PROP_TILT =34,
|
||||
CAP_PROP_ROLL =35,
|
||||
CAP_PROP_IRIS =36,
|
||||
CAP_PROP_SETTINGS =37
|
||||
};
|
||||
|
||||
|
||||
// DC1394 only
|
||||
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
|
||||
// every feature can have only one mode turned on at a time
|
||||
enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
|
||||
CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
|
||||
CAP_PROP_DC1394_MODE_AUTO = -2,
|
||||
CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
|
||||
CAP_PROP_DC1394_MAX = 31
|
||||
};
|
||||
|
||||
|
||||
// OpenNI map generators
|
||||
enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
|
||||
CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
|
||||
CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR
|
||||
};
|
||||
|
||||
// Properties of cameras available through OpenNI interfaces
|
||||
enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100,
|
||||
CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
|
||||
CAP_PROP_OPENNI_BASELINE = 102, // in mm
|
||||
CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
|
||||
CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map
|
||||
// by changing depth generator's view point (if the flag is "on") or
|
||||
// sets this view point to its normal one (if the flag is "off").
|
||||
CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION,
|
||||
CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
|
||||
CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
|
||||
CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
|
||||
CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
|
||||
CAP_PROP_OPENNI_GENERATOR_PRESENT = 109
|
||||
};
|
||||
|
||||
// OpenNI shortcats
|
||||
enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,
|
||||
CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,
|
||||
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION
|
||||
};
|
||||
|
||||
// OpenNI data given from depth generator
|
||||
enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
|
||||
CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
|
||||
CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
|
||||
CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
|
||||
CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
|
||||
|
||||
// Data given from RGB image generator
|
||||
CAP_OPENNI_BGR_IMAGE = 5,
|
||||
CAP_OPENNI_GRAY_IMAGE = 6
|
||||
};
|
||||
|
||||
// Supported output modes of OpenNI image generator
|
||||
enum { CAP_OPENNI_VGA_30HZ = 0,
|
||||
CAP_OPENNI_SXGA_15HZ = 1,
|
||||
CAP_OPENNI_SXGA_30HZ = 2,
|
||||
CAP_OPENNI_QVGA_30HZ = 3,
|
||||
CAP_OPENNI_QVGA_60HZ = 4
|
||||
};
|
||||
|
||||
|
||||
// GStreamer
|
||||
enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1
|
||||
};
|
||||
|
||||
|
||||
// PVAPI
|
||||
enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
|
||||
CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301 // FrameStartTriggerMode: Determines how a frame is initiated
|
||||
};
|
||||
|
||||
// PVAPI: FrameStartTriggerMode
|
||||
enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, // Freerun
|
||||
CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, // SyncIn1
|
||||
CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, // SyncIn2
|
||||
CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, // FixedRate
|
||||
CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 // Software
|
||||
};
|
||||
|
||||
// Properties of cameras available through XIMEA SDK interface
|
||||
enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
|
||||
CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
|
||||
CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
|
||||
CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
|
||||
CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
|
||||
CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
|
||||
CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
|
||||
CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
|
||||
CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
|
||||
CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
|
||||
CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
|
||||
CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
|
||||
CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
|
||||
CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
|
||||
CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
|
||||
CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
|
||||
CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
|
||||
CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
|
||||
CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
|
||||
CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
|
||||
CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds
|
||||
};
|
||||
|
||||
|
||||
// Properties for Android cameras
|
||||
enum { CAP_PROP_ANDROID_AUTOGRAB = 1024,
|
||||
CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed
|
||||
CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed
|
||||
CAP_PROP_ANDROID_FLASH_MODE = 8001,
|
||||
CAP_PROP_ANDROID_FOCUS_MODE = 8002,
|
||||
CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
|
||||
CAP_PROP_ANDROID_ANTIBANDING = 8004,
|
||||
CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
|
||||
CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
|
||||
CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
|
||||
CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008
|
||||
};
|
||||
|
||||
|
||||
// Android camera output formats
|
||||
enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
|
||||
CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR,
|
||||
CAP_ANDROID_GREY_FRAME = 1, //Y
|
||||
CAP_ANDROID_COLOR_FRAME_RGB = 2,
|
||||
CAP_ANDROID_COLOR_FRAME_BGRA = 3,
|
||||
CAP_ANDROID_COLOR_FRAME_RGBA = 4
|
||||
};
|
||||
|
||||
|
||||
// Android camera flash modes
|
||||
enum { CAP_ANDROID_FLASH_MODE_AUTO = 0,
|
||||
CAP_ANDROID_FLASH_MODE_OFF = 1,
|
||||
CAP_ANDROID_FLASH_MODE_ON = 2,
|
||||
CAP_ANDROID_FLASH_MODE_RED_EYE = 3,
|
||||
CAP_ANDROID_FLASH_MODE_TORCH = 4
|
||||
};
|
||||
|
||||
|
||||
// Android camera focus modes
|
||||
enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0,
|
||||
CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1,
|
||||
CAP_ANDROID_FOCUS_MODE_EDOF = 2,
|
||||
CAP_ANDROID_FOCUS_MODE_FIXED = 3,
|
||||
CAP_ANDROID_FOCUS_MODE_INFINITY = 4,
|
||||
CAP_ANDROID_FOCUS_MODE_MACRO = 5
|
||||
};
|
||||
|
||||
|
||||
// Android camera white balance modes
|
||||
enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
|
||||
CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1,
|
||||
CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2,
|
||||
CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3,
|
||||
CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4,
|
||||
CAP_ANDROID_WHITE_BALANCE_SHADE = 5,
|
||||
CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6,
|
||||
CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7
|
||||
};
|
||||
|
||||
|
||||
// Android camera antibanding modes
|
||||
enum { CAP_ANDROID_ANTIBANDING_50HZ = 0,
|
||||
CAP_ANDROID_ANTIBANDING_60HZ = 1,
|
||||
CAP_ANDROID_ANTIBANDING_AUTO = 2,
|
||||
CAP_ANDROID_ANTIBANDING_OFF = 3
|
||||
};
|
||||
|
||||
|
||||
// Properties of cameras available through AVFOUNDATION interface
|
||||
enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001,
|
||||
CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
|
||||
CAP_PROP_IOS_DEVICE_FLASH = 9003,
|
||||
CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
|
||||
CAP_PROP_IOS_DEVICE_TORCH = 9005
|
||||
};
|
||||
|
||||
|
||||
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
|
||||
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
|
||||
enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
|
||||
CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
|
||||
CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
|
||||
CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
|
||||
CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
|
||||
CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006
|
||||
};
|
||||
|
||||
enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
|
||||
CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
|
||||
CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
|
||||
CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
|
||||
CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
|
||||
CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
|
||||
CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007
|
||||
};
|
||||
|
||||
// Intel PerC streams
|
||||
enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
|
||||
CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
|
||||
CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR
|
||||
};
|
||||
|
||||
enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
|
||||
CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
|
||||
CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
|
||||
CAP_INTELPERC_IMAGE = 3
|
||||
};
|
||||
|
||||
|
||||
class IVideoCapture;
|
||||
class CV_EXPORTS_W VideoCapture
|
||||
{
|
||||
public:
|
||||
CV_WRAP VideoCapture();
|
||||
CV_WRAP VideoCapture(const String& filename);
|
||||
CV_WRAP VideoCapture(int device);
|
||||
|
||||
virtual ~VideoCapture();
|
||||
CV_WRAP virtual bool open(const String& filename);
|
||||
CV_WRAP virtual bool open(int device);
|
||||
CV_WRAP virtual bool isOpened() const;
|
||||
CV_WRAP virtual void release();
|
||||
|
||||
CV_WRAP virtual bool grab();
|
||||
CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0);
|
||||
virtual VideoCapture& operator >> (CV_OUT Mat& image);
|
||||
virtual VideoCapture& operator >> (CV_OUT UMat& image);
|
||||
CV_WRAP virtual bool read(OutputArray image);
|
||||
|
||||
CV_WRAP virtual bool set(int propId, double value);
|
||||
CV_WRAP virtual double get(int propId);
|
||||
|
||||
protected:
|
||||
Ptr<CvCapture> cap;
|
||||
Ptr<IVideoCapture> icap;
|
||||
private:
|
||||
static Ptr<IVideoCapture> createCameraCapture(int index);
|
||||
};
|
||||
|
||||
class CV_EXPORTS_W VideoWriter
|
||||
{
|
||||
public:
|
||||
CV_WRAP VideoWriter();
|
||||
CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,
|
||||
Size frameSize, bool isColor = true);
|
||||
|
||||
virtual ~VideoWriter();
|
||||
CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,
|
||||
Size frameSize, bool isColor = true);
|
||||
CV_WRAP virtual bool isOpened() const;
|
||||
CV_WRAP virtual void release();
|
||||
virtual VideoWriter& operator << (const Mat& image);
|
||||
CV_WRAP virtual void write(const Mat& image);
|
||||
|
||||
CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);
|
||||
|
||||
protected:
|
||||
Ptr<CvVideoWriter> writer;
|
||||
};
|
||||
|
||||
template<> CV_EXPORTS void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const;
|
||||
template<> CV_EXPORTS void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const;
|
||||
|
||||
} // cv
|
||||
|
||||
#endif //__OPENCV_VIDEOIO_HPP__
|
@@ -1,4 +1,3 @@
|
||||
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
@@ -13,6 +12,7 @@
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
@@ -41,9 +41,8 @@
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#import "opencv2/highgui/cap_ios.h"
|
||||
#ifdef __OPENCV_BUILD
|
||||
#error this is a compatibility header which should not be used inside the OpenCV library
|
||||
#endif
|
||||
|
||||
UIImage* MatToUIImage(const cv::Mat& image);
|
||||
void UIImageToMat(const UIImage* image,
|
||||
cv::Mat& m, bool alphaExist = false);
|
||||
#include "opencv2/videoio.hpp"
|
416
modules/videoio/include/opencv2/videoio/videoio_c.h
Normal file
416
modules/videoio/include/opencv2/videoio/videoio_c.h
Normal file
@@ -0,0 +1,416 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_VIDEOIO_H__
|
||||
#define __OPENCV_VIDEOIO_H__
|
||||
|
||||
#include "opencv2/core/core_c.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
|
||||
/****************************************************************************************\
|
||||
* Working with Video Files and Cameras *
|
||||
\****************************************************************************************/
|
||||
|
||||
/* "black box" capture structure */
|
||||
typedef struct CvCapture CvCapture;
|
||||
|
||||
/* start capturing frames from video file */
|
||||
CVAPI(CvCapture*) cvCreateFileCapture( const char* filename );
|
||||
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANY =0, // autodetect
|
||||
|
||||
CV_CAP_MIL =100, // MIL proprietary drivers
|
||||
|
||||
CV_CAP_VFW =200, // platform native
|
||||
CV_CAP_V4L =200,
|
||||
CV_CAP_V4L2 =200,
|
||||
|
||||
CV_CAP_FIREWARE =300, // IEEE 1394 drivers
|
||||
CV_CAP_FIREWIRE =300,
|
||||
CV_CAP_IEEE1394 =300,
|
||||
CV_CAP_DC1394 =300,
|
||||
CV_CAP_CMU1394 =300,
|
||||
|
||||
CV_CAP_STEREO =400, // TYZX proprietary drivers
|
||||
CV_CAP_TYZX =400,
|
||||
CV_TYZX_LEFT =400,
|
||||
CV_TYZX_RIGHT =401,
|
||||
CV_TYZX_COLOR =402,
|
||||
CV_TYZX_Z =403,
|
||||
|
||||
CV_CAP_QT =500, // QuickTime
|
||||
|
||||
CV_CAP_UNICAP =600, // Unicap drivers
|
||||
|
||||
CV_CAP_DSHOW =700, // DirectShow (via videoInput)
|
||||
CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput)
|
||||
|
||||
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
|
||||
|
||||
CV_CAP_OPENNI =900, // OpenNI (for Kinect)
|
||||
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
|
||||
|
||||
CV_CAP_ANDROID =1000, // Android
|
||||
CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera
|
||||
CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera
|
||||
|
||||
CV_CAP_XIAPI =1100, // XIMEA Camera API
|
||||
|
||||
CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
|
||||
|
||||
CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
|
||||
|
||||
CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
|
||||
};
|
||||
|
||||
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
|
||||
CVAPI(CvCapture*) cvCreateCameraCapture( int index );
|
||||
|
||||
/* grab a frame, return 1 on success, 0 on fail.
|
||||
this function is thought to be fast */
|
||||
CVAPI(int) cvGrabFrame( CvCapture* capture );
|
||||
|
||||
/* get the frame grabbed with cvGrabFrame(..)
|
||||
This function may apply some frame processing like
|
||||
frame decompression, flipping etc.
|
||||
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
|
||||
CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );
|
||||
|
||||
/* Just a combination of cvGrabFrame and cvRetrieveFrame
|
||||
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
|
||||
CVAPI(IplImage*) cvQueryFrame( CvCapture* capture );
|
||||
|
||||
/* stop capturing/reading and free resources */
|
||||
CVAPI(void) cvReleaseCapture( CvCapture** capture );
|
||||
|
||||
enum
|
||||
{
|
||||
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
|
||||
// every feature can have only one mode turned on at a time
|
||||
CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
|
||||
CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
|
||||
CV_CAP_PROP_DC1394_MODE_AUTO = -2,
|
||||
CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
|
||||
CV_CAP_PROP_POS_MSEC =0,
|
||||
CV_CAP_PROP_POS_FRAMES =1,
|
||||
CV_CAP_PROP_POS_AVI_RATIO =2,
|
||||
CV_CAP_PROP_FRAME_WIDTH =3,
|
||||
CV_CAP_PROP_FRAME_HEIGHT =4,
|
||||
CV_CAP_PROP_FPS =5,
|
||||
CV_CAP_PROP_FOURCC =6,
|
||||
CV_CAP_PROP_FRAME_COUNT =7,
|
||||
CV_CAP_PROP_FORMAT =8,
|
||||
CV_CAP_PROP_MODE =9,
|
||||
CV_CAP_PROP_BRIGHTNESS =10,
|
||||
CV_CAP_PROP_CONTRAST =11,
|
||||
CV_CAP_PROP_SATURATION =12,
|
||||
CV_CAP_PROP_HUE =13,
|
||||
CV_CAP_PROP_GAIN =14,
|
||||
CV_CAP_PROP_EXPOSURE =15,
|
||||
CV_CAP_PROP_CONVERT_RGB =16,
|
||||
CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,
|
||||
CV_CAP_PROP_RECTIFICATION =18,
|
||||
CV_CAP_PROP_MONOCROME =19,
|
||||
CV_CAP_PROP_SHARPNESS =20,
|
||||
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
|
||||
// user can adjust refernce level
|
||||
// using this feature
|
||||
CV_CAP_PROP_GAMMA =22,
|
||||
CV_CAP_PROP_TEMPERATURE =23,
|
||||
CV_CAP_PROP_TRIGGER =24,
|
||||
CV_CAP_PROP_TRIGGER_DELAY =25,
|
||||
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
|
||||
CV_CAP_PROP_ZOOM =27,
|
||||
CV_CAP_PROP_FOCUS =28,
|
||||
CV_CAP_PROP_GUID =29,
|
||||
CV_CAP_PROP_ISO_SPEED =30,
|
||||
CV_CAP_PROP_MAX_DC1394 =31,
|
||||
CV_CAP_PROP_BACKLIGHT =32,
|
||||
CV_CAP_PROP_PAN =33,
|
||||
CV_CAP_PROP_TILT =34,
|
||||
CV_CAP_PROP_ROLL =35,
|
||||
CV_CAP_PROP_IRIS =36,
|
||||
CV_CAP_PROP_SETTINGS =37,
|
||||
|
||||
CV_CAP_PROP_AUTOGRAB =1024, // property for videoio class CvCapture_Android only
|
||||
CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed
|
||||
CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed
|
||||
|
||||
// OpenNI map generators
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
|
||||
CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
|
||||
CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR,
|
||||
|
||||
// Properties of cameras available through OpenNI interfaces
|
||||
CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100,
|
||||
CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
|
||||
CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm
|
||||
CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
|
||||
CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag
|
||||
CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map
|
||||
// by changing depth generator's view point (if the flag is "on") or
|
||||
// sets this view point to its normal one (if the flag is "off").
|
||||
CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
|
||||
CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
|
||||
CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
|
||||
CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
|
||||
|
||||
CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
|
||||
|
||||
CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
|
||||
CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
|
||||
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
|
||||
|
||||
// Properties of cameras available through GStreamer interface
|
||||
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
|
||||
|
||||
// PVAPI
|
||||
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
|
||||
CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated
|
||||
|
||||
// Properties of cameras available through XIMEA SDK interface
|
||||
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
|
||||
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
|
||||
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
|
||||
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
|
||||
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
|
||||
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
|
||||
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
|
||||
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
|
||||
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
|
||||
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
|
||||
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
|
||||
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
|
||||
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
|
||||
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
|
||||
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
|
||||
CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
|
||||
CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
|
||||
CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
|
||||
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
|
||||
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
|
||||
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
|
||||
|
||||
// Properties for Android cameras
|
||||
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
|
||||
CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
|
||||
CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,
|
||||
CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
|
||||
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
|
||||
CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
|
||||
CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
|
||||
|
||||
// Properties of cameras available through AVFOUNDATION interface
|
||||
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
|
||||
CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
|
||||
CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,
|
||||
CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
|
||||
CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,
|
||||
|
||||
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
|
||||
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
|
||||
CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
|
||||
CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
|
||||
CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
|
||||
CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
|
||||
CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
|
||||
CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,
|
||||
|
||||
CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
|
||||
CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
|
||||
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007,
|
||||
|
||||
// Intel PerC streams
|
||||
CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
|
||||
CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
|
||||
CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
// Data given from depth generator.
|
||||
CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
|
||||
CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
|
||||
CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
|
||||
CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
|
||||
CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
|
||||
|
||||
// Data given from RGB image generator.
|
||||
CV_CAP_OPENNI_BGR_IMAGE = 5,
|
||||
CV_CAP_OPENNI_GRAY_IMAGE = 6
|
||||
};
|
||||
|
||||
// Supported output modes of OpenNI image generator
|
||||
enum
|
||||
{
|
||||
CV_CAP_OPENNI_VGA_30HZ = 0,
|
||||
CV_CAP_OPENNI_SXGA_15HZ = 1,
|
||||
CV_CAP_OPENNI_SXGA_30HZ = 2,
|
||||
CV_CAP_OPENNI_QVGA_30HZ = 3,
|
||||
CV_CAP_OPENNI_QVGA_60HZ = 4
|
||||
};
|
||||
|
||||
//supported by Android camera output formats
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
|
||||
CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR,
|
||||
CV_CAP_ANDROID_GREY_FRAME = 1, //Y
|
||||
CV_CAP_ANDROID_COLOR_FRAME_RGB = 2,
|
||||
CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3,
|
||||
CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4
|
||||
};
|
||||
|
||||
// supported Android camera flash modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_FLASH_MODE_AUTO = 0,
|
||||
CV_CAP_ANDROID_FLASH_MODE_OFF,
|
||||
CV_CAP_ANDROID_FLASH_MODE_ON,
|
||||
CV_CAP_ANDROID_FLASH_MODE_RED_EYE,
|
||||
CV_CAP_ANDROID_FLASH_MODE_TORCH
|
||||
};
|
||||
|
||||
// supported Android camera focus modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_EDOF,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_FIXED,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_INFINITY,
|
||||
CV_CAP_ANDROID_FOCUS_MODE_MACRO
|
||||
};
|
||||
|
||||
// supported Android camera white balance modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_SHADE,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT,
|
||||
CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT
|
||||
};
|
||||
|
||||
// supported Android camera antibanding modes
|
||||
enum
|
||||
{
|
||||
CV_CAP_ANDROID_ANTIBANDING_50HZ = 0,
|
||||
CV_CAP_ANDROID_ANTIBANDING_60HZ,
|
||||
CV_CAP_ANDROID_ANTIBANDING_AUTO,
|
||||
CV_CAP_ANDROID_ANTIBANDING_OFF
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
|
||||
CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
|
||||
CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
|
||||
CV_CAP_INTELPERC_IMAGE = 3
|
||||
};
|
||||
|
||||
/* retrieve or set capture properties */
|
||||
CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
|
||||
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
|
||||
|
||||
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
|
||||
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
|
||||
|
||||
/* "black box" video file writer structure */
|
||||
typedef struct CvVideoWriter CvVideoWriter;
|
||||
|
||||
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
|
||||
|
||||
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
|
||||
{
|
||||
return CV_FOURCC_MACRO(c1, c2, c3, c4);
|
||||
}
|
||||
|
||||
#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */
|
||||
#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */
|
||||
|
||||
/* initialize video file writer */
|
||||
CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
|
||||
double fps, CvSize frame_size,
|
||||
int is_color CV_DEFAULT(1));
|
||||
|
||||
/* write frame to video file */
|
||||
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
|
||||
|
||||
/* close video file writer */
|
||||
CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
|
||||
|
||||
/****************************************************************************************\
|
||||
* Obsolete functions/synonyms *
|
||||
\****************************************************************************************/
|
||||
|
||||
#define cvCaptureFromFile cvCreateFileCapture
|
||||
#define cvCaptureFromCAM cvCreateCameraCapture
|
||||
#define cvCaptureFromAVI cvCaptureFromFile
|
||||
#define cvCreateAVIWriter cvCreateVideoWriter
|
||||
#define cvWriteToAVI cvWriteFrame
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //__OPENCV_VIDEOIO_H__
|
@@ -1,3 +1,3 @@
|
||||
#include "perf_precomp.hpp"
|
||||
|
||||
CV_PERF_TEST_MAIN(highgui)
|
||||
CV_PERF_TEST_MAIN(videoio)
|
@@ -11,7 +11,7 @@
|
||||
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
|
||||
#ifdef GTEST_CREATE_SHARED_LIBRARY
|
||||
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
|
@@ -57,7 +57,7 @@
|
||||
#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
|
||||
#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
|
||||
|
||||
class HighguiAndroidCameraActivity;
|
||||
class VideoIOAndroidCameraActivity;
|
||||
|
||||
class CvCapture_Android : public CvCapture
|
||||
{
|
||||
@@ -133,14 +133,14 @@ private:
|
||||
bool convertYUV2Grey(int width, int height, const unsigned char* yuv, cv::Mat& resmat);
|
||||
bool convertYUV2BGR(int width, int height, const unsigned char* yuv, cv::Mat& resmat, bool inRGBorder, bool withAlpha);
|
||||
|
||||
friend class HighguiAndroidCameraActivity;
|
||||
friend class VideoIOAndroidCameraActivity;
|
||||
};
|
||||
|
||||
|
||||
class HighguiAndroidCameraActivity : public CameraActivity
|
||||
class VideoIOAndroidCameraActivity : public CameraActivity
|
||||
{
|
||||
public:
|
||||
HighguiAndroidCameraActivity(CvCapture_Android* capture)
|
||||
VideoIOAndroidCameraActivity(CvCapture_Android* capture)
|
||||
{
|
||||
m_capture = capture;
|
||||
m_framesReceived = 0;
|
||||
@@ -204,7 +204,7 @@ CvCapture_Android::CvCapture_Android(int cameraId)
|
||||
|
||||
//try connect to camera
|
||||
LOGD("CvCapture_Android::CvCapture_Android(%i)", cameraId);
|
||||
m_activity = new HighguiAndroidCameraActivity(this);
|
||||
m_activity = new VideoIOAndroidCameraActivity(this);
|
||||
|
||||
if (m_activity == 0) return;
|
||||
|
||||
@@ -232,7 +232,7 @@ CvCapture_Android::~CvCapture_Android()
|
||||
{
|
||||
if (m_activity)
|
||||
{
|
||||
((HighguiAndroidCameraActivity*)m_activity)->LogFramesRate();
|
||||
((VideoIOAndroidCameraActivity*)m_activity)->LogFramesRate();
|
||||
|
||||
pthread_mutex_lock(&m_nextFrameMutex);
|
||||
|
||||
@@ -344,7 +344,7 @@ bool CvCapture_Android::setProperty( int propIdx, double propValue )
|
||||
|
||||
// Only changes in frame size require camera restart
|
||||
if ((propIdx == CV_CAP_PROP_FRAME_WIDTH) || (propIdx == CV_CAP_PROP_FRAME_HEIGHT))
|
||||
{ // property for highgui class CvCapture_Android only
|
||||
{ // property for videoio class CvCapture_Android only
|
||||
m_CameraParamsChanged = true;
|
||||
}
|
||||
|
||||
@@ -475,7 +475,7 @@ void CvCapture_Android::setFrame(const void* buffer, int bufferSize)
|
||||
cv::Mat m_frameYUV420next_ref = m_frameYUV420next;
|
||||
memcpy(m_frameYUV420next_ref.ptr(), buffer, bufferSize);
|
||||
// LOGD("CvCapture_Android::setFrame -- memcpy is done");
|
||||
// ((HighguiAndroidCameraActivity*)m_activity)->LogFramesRate();
|
||||
// ((VideoIOAndroidCameraActivity*)m_activity)->LogFramesRate();
|
||||
|
||||
m_dataState = CVCAPTURE_ANDROID_STATE_HAS_NEW_FRAME_UNGRABBED;
|
||||
m_waitingNextFrame = false;//set flag that no more frames required at this moment
|
@@ -428,7 +428,7 @@ void CvCaptureCAM::setWidthHeight() {
|
||||
[localpool drain];
|
||||
}
|
||||
|
||||
//added macros into headers in highgui_c.h
|
||||
//added macros into headers in videoio_c.h
|
||||
/*
|
||||
#define CV_CAP_PROP_IOS_DEVICE_FOCUS 9001
|
||||
#define CV_CAP_PROP_IOS_DEVICE_EXPOSURE 9002
|
@@ -1,5 +1,5 @@
|
||||
/* This is the contributed code:
|
||||
Firewire and video4linux camera support for highgui
|
||||
Firewire and video4linux camera support for videoio
|
||||
|
||||
2003-03-12 Magnus Lundin
|
||||
lundin@mlu.mine.nu
|
||||
@@ -17,21 +17,21 @@ INSTALLATION
|
||||
Install OpenCV
|
||||
Install v4l
|
||||
Install dc1394 raw1394 - coriander should work with your camera
|
||||
Backup highgui folder
|
||||
Backup videoio folder
|
||||
Copy new files
|
||||
cd into highgui folder
|
||||
cd into videoio folder
|
||||
make clean (cvcap.cpp must be rebuilt)
|
||||
make
|
||||
make install
|
||||
|
||||
|
||||
The build is controlled by the following entries in the highgui Makefile:
|
||||
The build is controlled by the following entries in the videoio Makefile:
|
||||
|
||||
libhighgui_la_LIBADD = -L/usr/X11R6/lib -lXm -lMrm -lUil -lpng -ljpeg -lz -ltiff -lavcodec -lraw1394 -ldc1394_control
|
||||
libvideoio_la_LIBADD = -L/usr/X11R6/lib -lXm -lMrm -lUil -lpng -ljpeg -lz -ltiff -lavcodec -lraw1394 -ldc1394_control
|
||||
DEFS = -DHAVE_CONFIG_H -DHAVE_DC1394 HAVE_CAMV4L
|
||||
|
||||
|
||||
Now it should be possible to use highgui camera functions, works for me.
|
||||
Now it should be possible to use videoio camera functions, works for me.
|
||||
|
||||
|
||||
THINGS TO DO
|
@@ -30,7 +30,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#import "opencv2/highgui/cap_ios.h"
|
||||
#import "opencv2/videoio/cap_ios.h"
|
||||
#include "precomp.hpp"
|
||||
|
||||
#pragma mark - Private Interface
|
@@ -29,7 +29,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#import "opencv2/highgui/cap_ios.h"
|
||||
#import "opencv2/videoio/cap_ios.h"
|
||||
#include "precomp.hpp"
|
||||
|
||||
#pragma mark - Private Interface
|
@@ -29,7 +29,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#import "opencv2/highgui/cap_ios.h"
|
||||
#import "opencv2/videoio/cap_ios.h"
|
||||
#include "precomp.hpp"
|
||||
#import <AssetsLibrary/AssetsLibrary.h>
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* This is the contributed code:
|
||||
|
||||
File: cvcap_v4l.cpp
|
||||
Current Location: ../opencv-0.9.6/otherlibs/highgui
|
||||
Current Location: ../opencv-0.9.6/otherlibs/videoio
|
||||
|
||||
Original Version: 2003-03-12 Magnus Lundin lundin@mlu.mine.nu
|
||||
Original Comments:
|
||||
@@ -71,7 +71,7 @@ For Release: OpenCV-Linux Beta4 Opencv-0.9.6
|
||||
[FD] I modified the following:
|
||||
- handle YUV420P, YUV420, and YUV411P palettes (for many webcams) without using floating-point
|
||||
- cvGrabFrame should not wait for the end of the first frame, and should return quickly
|
||||
(see highgui doc)
|
||||
(see videoio doc)
|
||||
- cvRetrieveFrame should in turn wait for the end of frame capture, and should not
|
||||
trigger the capture of the next frame (the user choses when to do it using GrabFrame)
|
||||
To get the old behavior, re-call cvRetrieveFrame just after cvGrabFrame.
|
||||
@@ -179,7 +179,7 @@ make & enjoy!
|
||||
Planning for future rewrite of this whole library (July/August 2010)
|
||||
|
||||
15th patch: May 12, 2010, Filipe Almeida filipe.almeida@ist.utl.pt
|
||||
- Broken compile of library (include "_highgui.h")
|
||||
- Broken compile of library (include "_videoio.h")
|
||||
*/
|
||||
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -654,7 +654,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
|
||||
if ((capture->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
|
||||
/* Nope. */
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName);
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName);
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -673,7 +673,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
/* V4L2 have a status field from selected video mode */
|
||||
if (-1 == xioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp))
|
||||
{
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L2: Aren't able to set channel number\n");
|
||||
fprintf (stderr, "VIDEOIO ERROR: V4L2: Aren't able to set channel number\n");
|
||||
icvCloseCAM_V4L (capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -684,7 +684,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
|
||||
|
||||
if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -698,12 +698,12 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->form.fmt.pix.height = capture->height;
|
||||
|
||||
if (-1 == xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) {
|
||||
fprintf(stderr, "HIGHGUI ERROR: libv4l unable to ioctl S_FMT\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: libv4l unable to ioctl S_FMT\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (V4L2_PIX_FMT_BGR24 != capture->form.fmt.pix.pixelformat) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: libv4l unable convert to requested pixfmt\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: libv4l unable convert to requested pixfmt\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -829,7 +829,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
|
||||
if (detect_v4l == -1)
|
||||
{
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L"
|
||||
fprintf (stderr, "VIDEOIO ERROR: V4L"
|
||||
": device %s: Unable to open for READ ONLY\n", deviceName);
|
||||
|
||||
return -1;
|
||||
@@ -837,7 +837,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
|
||||
if (detect_v4l <= 0)
|
||||
{
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L"
|
||||
fprintf (stderr, "VIDEOIO ERROR: V4L"
|
||||
": device %s: Unable to query number of channels\n", deviceName);
|
||||
|
||||
return -1;
|
||||
@@ -846,7 +846,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
{
|
||||
if ((capture->capability.type & VID_TYPE_CAPTURE) == 0) {
|
||||
/* Nope. */
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: "
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: "
|
||||
"device %s is unable to capture video memory.\n",deviceName);
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
@@ -884,7 +884,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
{
|
||||
|
||||
if(v4l1_ioctl(capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) == -1) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: "
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: "
|
||||
"Could not obtain specifics of capture window.\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
@@ -894,7 +894,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
|
||||
{
|
||||
if(v4l1_ioctl(capture->deviceHandle, VIDIOCGPICT, &capture->imageProperties) < 0) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Unable to determine size of incoming image\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Unable to determine size of incoming image\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -902,17 +902,17 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->imageProperties.palette = VIDEO_PALETTE_RGB24;
|
||||
capture->imageProperties.depth = 24;
|
||||
if (v4l1_ioctl(capture->deviceHandle, VIDIOCSPICT, &capture->imageProperties) < 0) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: libv4l unable to ioctl VIDIOCSPICT\n\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: libv4l unable to ioctl VIDIOCSPICT\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
if (v4l1_ioctl(capture->deviceHandle, VIDIOCGPICT, &capture->imageProperties) < 0) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: libv4l unable to ioctl VIDIOCGPICT\n\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: libv4l unable to ioctl VIDIOCGPICT\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
if (capture->imageProperties.palette != VIDEO_PALETTE_RGB24) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: libv4l unable convert to requested pixfmt\n\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: libv4l unable convert to requested pixfmt\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -929,7 +929,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->deviceHandle,
|
||||
0);
|
||||
if (capture->memoryMap == MAP_FAILED) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno));
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno));
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -939,7 +939,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->mmaps = (struct video_mmap *)
|
||||
(malloc(capture->memoryBuffer.frames * sizeof(struct video_mmap)));
|
||||
if (!capture->mmaps) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Could not memory map video frames.\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Could not memory map video frames.\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -972,14 +972,14 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index)
|
||||
//search index in indexList
|
||||
if ( (index>-1) && ! ((1 << index) & indexList) )
|
||||
{
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: index %d is not correct!\n",index);
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: index %d is not correct!\n",index);
|
||||
return NULL; /* Did someone ask for not correct video source number? */
|
||||
}
|
||||
/* Allocate memory for this humongus CvCaptureCAM_V4L structure that contains ALL
|
||||
the handles for V4L processing */
|
||||
CvCaptureCAM_V4L * capture = (CvCaptureCAM_V4L*)cvAlloc(sizeof(CvCaptureCAM_V4L));
|
||||
if (!capture) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Could not allocate memory for capture process.\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Could not allocate memory for capture process.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1161,7 +1161,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) {
|
||||
capture->mmaps[capture->bufferIndex].format = capture->imageProperties.palette;
|
||||
|
||||
if (v4l1_ioctl(capture->deviceHandle, VIDIOCMCAPTURE, &capture->mmaps[capture->bufferIndex]) == -1) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -1208,7 +1208,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
|
||||
|
||||
/* [FD] this really belongs here */
|
||||
if (v4l1_ioctl(capture->deviceHandle, VIDIOCSYNC, &capture->mmaps[capture->bufferIndex].frame) == -1) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno));
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1266,7 +1266,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
|
||||
break;
|
||||
default:
|
||||
fprintf( stderr,
|
||||
"HIGHGUI ERROR: V4L: Cannot convert from palette %d to RGB\n",
|
||||
"VIDEOIO ERROR: V4L: Cannot convert from palette %d to RGB\n",
|
||||
capture->imageProperties.palette);
|
||||
return 0;
|
||||
}
|
||||
@@ -1291,7 +1291,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
/* display an error message, and return an error code */
|
||||
perror ("VIDIOC_G_FMT");
|
||||
if (v4l1_ioctl (capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) < 0) {
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L: Unable to determine size of incoming image\n");
|
||||
fprintf (stderr, " ERROR: V4L: Unable to determine size of incoming image\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
} else {
|
||||
@@ -1333,7 +1333,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
/* all went well */
|
||||
is_v4l2_device = 1;
|
||||
} else {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno));
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno));
|
||||
}
|
||||
|
||||
if (is_v4l2_device == 1) {
|
||||
@@ -1342,7 +1342,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
int v4l2_max = v4l2_get_ctrl_max(capture, capture->control.id);
|
||||
|
||||
if ((v4l2_min == -1) && (v4l2_max == -1)) {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L2: Property %s(%u) not supported by device\n", name, property_id);
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L2: Property %s(%u) not supported by device\n", name, property_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -1367,11 +1367,11 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
retval = capture->imageProperties.hue;
|
||||
break;
|
||||
case CV_CAP_PROP_GAIN:
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
return -1;
|
||||
break;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
return -1;
|
||||
break;
|
||||
}
|
||||
@@ -1440,7 +1440,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) {
|
||||
/* Get window info again, to get the real value */
|
||||
if (-1 == xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form))
|
||||
{
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
|
||||
icvCloseCAM_V4L(capture);
|
||||
|
||||
@@ -1530,14 +1530,14 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val
|
||||
v4l2_max = v4l2_get_ctrl_max(capture, capture->control.id);
|
||||
|
||||
if ((v4l2_min == -1) && (v4l2_max == -1)) {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Property %s(%u) not supported by device\n", name, property_id);
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Property %s(%u) not supported by device\n", name, property_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(v4l2_ioctl(capture->deviceHandle, VIDIOC_G_CTRL, &capture->control) == 0) {
|
||||
/* all went well */
|
||||
} else {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno));
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L2: Unable to get property %s(%u) - %s\n", name, capture->control.id, strerror(errno));
|
||||
}
|
||||
|
||||
if (v4l2_max != 0) {
|
||||
@@ -1558,7 +1558,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val
|
||||
if (v4l2_ioctl(capture->deviceHandle, VIDIOC_S_CTRL, &c) != 0) {
|
||||
/* The driver may clamp the value or return ERANGE, ignored here */
|
||||
if (errno != ERANGE) {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L2: Failed to set control \"%d\": %s (value %d)\n", c.id, strerror(errno), c.value);
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L2: Failed to set control \"%d\": %s (value %d)\n", c.id, strerror(errno), c.value);
|
||||
is_v4l2 = 0;
|
||||
} else {
|
||||
return 0;
|
||||
@@ -1568,7 +1568,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val
|
||||
}
|
||||
|
||||
if (is_v4l2 == 0) { /* use v4l1_ioctl */
|
||||
fprintf(stderr, "HIGHGUI WARNING: Setting property %u through v4l2 failed. Trying with v4l1.\n", c.id);
|
||||
fprintf(stderr, "VIDEOIO WARNING: Setting property %u through v4l2 failed. Trying with v4l1.\n", c.id);
|
||||
int v4l_value;
|
||||
/* scale the value to the wanted integer one */
|
||||
v4l_value = (int)(0xFFFF * value);
|
||||
@@ -1587,18 +1587,18 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val
|
||||
capture->imageProperties.hue = v4l_value;
|
||||
break;
|
||||
case CV_CAP_PROP_GAIN:
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
return -1;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
return -1;
|
||||
default:
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: property #%d is not supported\n", property_id);
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: property #%d is not supported\n", property_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (v4l1_ioctl(capture->deviceHandle, VIDIOCSPICT, &capture->imageProperties) < 0){
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Unable to set video informations\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set video informations\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -1643,7 +1643,7 @@ static int icvSetPropertyCAM_V4L(CvCaptureCAM_V4L* capture, int property_id, dou
|
||||
setfps.parm.capture.timeperframe.numerator = 1;
|
||||
setfps.parm.capture.timeperframe.denominator = value;
|
||||
if (xioctl (capture->deviceHandle, VIDIOC_S_PARM, &setfps) < 0){
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Unable to set camera FPS\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set camera FPS\n");
|
||||
retval=0;
|
||||
}
|
||||
break;
|
@@ -2277,7 +2277,7 @@ protected:
|
||||
|
||||
/* Be sure to declare webcam device capability in manifest
|
||||
For better media capture support, add the following snippet with correct module name to the project manifest
|
||||
(highgui needs DLL activation class factoryentry points):
|
||||
(videoio needs DLL activation class factoryentry points):
|
||||
<Extensions>
|
||||
<Extension Category="windows.activatableClass.inProcessServer">
|
||||
<InProcessServer>
|
@@ -1,7 +1,7 @@
|
||||
/* This is the contributed code:
|
||||
|
||||
File: cvcap_v4l.cpp
|
||||
Current Location: ../opencv-0.9.6/otherlibs/highgui
|
||||
Current Location: ../opencv-0.9.6/otherlibs/videoio
|
||||
|
||||
Original Version: 2003-03-12 Magnus Lundin lundin@mlu.mine.nu
|
||||
Original Comments:
|
||||
@@ -71,7 +71,7 @@ For Release: OpenCV-Linux Beta4 Opencv-0.9.6
|
||||
[FD] I modified the following:
|
||||
- handle YUV420P, YUV420, and YUV411P palettes (for many webcams) without using floating-point
|
||||
- cvGrabFrame should not wait for the end of the first frame, and should return quickly
|
||||
(see highgui doc)
|
||||
(see videoio doc)
|
||||
- cvRetrieveFrame should in turn wait for the end of frame capture, and should not
|
||||
trigger the capture of the next frame (the user choses when to do it using GrabFrame)
|
||||
To get the old behavior, re-call cvRetrieveFrame just after cvGrabFrame.
|
||||
@@ -590,7 +590,7 @@ static int autosetup_capture_mode_v4l2(CvCaptureCAM_V4L* capture)
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L2: Pixel format of incoming image is unsupported by OpenCV\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L2: Pixel format of incoming image is unsupported by OpenCV\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -607,7 +607,7 @@ static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture)
|
||||
{
|
||||
|
||||
if(ioctl(capture->deviceHandle, VIDIOCGPICT, &capture->imageProperties) < 0) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Unable to determine size of incoming image\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Unable to determine size of incoming image\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -627,7 +627,7 @@ static int autosetup_capture_mode_v4l(CvCaptureCAM_V4L* capture)
|
||||
//printf("negotiated palette YUV420P\n");
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L: Pixel format of incoming image is unsupported by OpenCV\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L: Pixel format of incoming image is unsupported by OpenCV\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -829,7 +829,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
|
||||
if ((capture->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
|
||||
/* Nope. */
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName);
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L2: device %s is unable to capture video memory.\n",deviceName);
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -848,7 +848,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
/* V4L2 have a status field from selected video mode */
|
||||
if (-1 == ioctl (capture->deviceHandle, VIDIOC_ENUMINPUT, &capture->inp))
|
||||
{
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L2: Aren't able to set channel number\n");
|
||||
fprintf (stderr, "VIDEOIO ERROR: V4L2: Aren't able to set channel number\n");
|
||||
icvCloseCAM_V4L (capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -859,7 +859,7 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
|
||||
|
||||
if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form)) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -990,7 +990,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
if ((detect_v4l == -1)
|
||||
)
|
||||
{
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L"
|
||||
fprintf (stderr, "VIDEOIO ERROR: V4L"
|
||||
": device %s: Unable to open for READ ONLY\n", deviceName);
|
||||
|
||||
return -1;
|
||||
@@ -999,7 +999,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
if ((detect_v4l <= 0)
|
||||
)
|
||||
{
|
||||
fprintf (stderr, "HIGHGUI ERROR: V4L"
|
||||
fprintf (stderr, "VIDEOIO ERROR: V4L"
|
||||
": device %s: Unable to query number of channels\n", deviceName);
|
||||
|
||||
return -1;
|
||||
@@ -1008,7 +1008,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
{
|
||||
if ((capture->capability.type & VID_TYPE_CAPTURE) == 0) {
|
||||
/* Nope. */
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: "
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: "
|
||||
"device %s is unable to capture video memory.\n",deviceName);
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
@@ -1047,7 +1047,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
{
|
||||
|
||||
if(ioctl(capture->deviceHandle, VIDIOCGWIN, &capture->captureWindow) == -1) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: "
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: "
|
||||
"Could not obtain specifics of capture window.\n\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
@@ -1072,7 +1072,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->deviceHandle,
|
||||
0);
|
||||
if (capture->memoryMap == MAP_FAILED) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno));
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Mapping Memmory from video source error: %s\n", strerror(errno));
|
||||
icvCloseCAM_V4L(capture);
|
||||
}
|
||||
|
||||
@@ -1081,7 +1081,7 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
capture->mmaps = (struct video_mmap *)
|
||||
(malloc(capture->memoryBuffer.frames * sizeof(struct video_mmap)));
|
||||
if (!capture->mmaps) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Could not memory map video frames.\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Could not memory map video frames.\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -1116,14 +1116,14 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index)
|
||||
//search index in indexList
|
||||
if ( (index>-1) && ! ((1 << index) & indexList) )
|
||||
{
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: index %d is not correct!\n",index);
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: index %d is not correct!\n",index);
|
||||
return NULL; /* Did someone ask for not correct video source number? */
|
||||
}
|
||||
/* Allocate memory for this humongus CvCaptureCAM_V4L structure that contains ALL
|
||||
the handles for V4L processing */
|
||||
CvCaptureCAM_V4L * capture = (CvCaptureCAM_V4L*)cvAlloc(sizeof(CvCaptureCAM_V4L));
|
||||
if (!capture) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Could not allocate memory for capture process.\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Could not allocate memory for capture process.\n");
|
||||
return NULL;
|
||||
}
|
||||
/* Select camera, or rather, V4L video source */
|
||||
@@ -1317,7 +1317,7 @@ static int icvGrabFrameCAM_V4L(CvCaptureCAM_V4L* capture) {
|
||||
capture->mmaps[capture->bufferIndex].format = capture->imageProperties.palette;
|
||||
|
||||
if (ioctl(capture->deviceHandle, VIDIOCMCAPTURE, &capture->mmaps[capture->bufferIndex]) == -1) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Initial Capture Error: Unable to load initial memory buffers.\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -2098,7 +2098,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
|
||||
|
||||
/* [FD] this really belongs here */
|
||||
if (ioctl(capture->deviceHandle, VIDIOCSYNC, &capture->mmaps[capture->bufferIndex].frame) == -1) {
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno));
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L: Could not SYNC to video stream. %s\n", strerror(errno));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2255,7 +2255,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
|
||||
break;
|
||||
default:
|
||||
fprintf( stderr,
|
||||
"HIGHGUI ERROR: V4L: Cannot convert from palette %d to RGB\n",
|
||||
"VIDEOIO ERROR: V4L: Cannot convert from palette %d to RGB\n",
|
||||
capture->imageProperties.palette);
|
||||
|
||||
return 0;
|
||||
@@ -2326,7 +2326,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L2: getting property #%d is not supported\n",
|
||||
"VIDEOIO ERROR: V4L2: getting property #%d is not supported\n",
|
||||
property_id);
|
||||
return -1;
|
||||
}
|
||||
@@ -2334,7 +2334,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL,
|
||||
&capture->control)) {
|
||||
|
||||
fprintf( stderr, "HIGHGUI ERROR: V4L2: ");
|
||||
fprintf( stderr, "VIDEOIO ERROR: V4L2: ");
|
||||
switch (property_id) {
|
||||
case CV_CAP_PROP_BRIGHTNESS:
|
||||
fprintf (stderr, "Brightness");
|
||||
@@ -2405,7 +2405,7 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
if (ioctl (capture->deviceHandle,
|
||||
VIDIOCGWIN, &capture->captureWindow) < 0) {
|
||||
fprintf (stderr,
|
||||
"HIGHGUI ERROR: V4L: "
|
||||
"VIDEOIO ERROR: V4L: "
|
||||
"Unable to determine size of incoming image\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
@@ -2432,17 +2432,17 @@ static double icvGetPropertyCAM_V4L (CvCaptureCAM_V4L* capture,
|
||||
break;
|
||||
case CV_CAP_PROP_GAIN:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
"VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
return -1;
|
||||
break;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
"VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
return -1;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: getting property #%d is not supported\n",
|
||||
"VIDEOIO ERROR: V4L: getting property #%d is not supported\n",
|
||||
property_id);
|
||||
}
|
||||
|
||||
@@ -2470,7 +2470,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) {
|
||||
capture->cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
|
||||
|
||||
if (ioctl (capture->deviceHandle, VIDIOC_CROPCAP, &capture->cropcap) < 0) {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_CROPCAP\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: VIDIOC_CROPCAP\n");
|
||||
} else {
|
||||
|
||||
CLEAR (capture->crop);
|
||||
@@ -2479,7 +2479,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) {
|
||||
|
||||
/* set the crop area, but don't exit if the device don't support croping */
|
||||
if (ioctl (capture->deviceHandle, VIDIOC_S_CROP, &capture->crop) < 0) {
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: VIDIOC_S_CROP\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: VIDIOC_S_CROP\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2519,7 +2519,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) {
|
||||
/* Get window info again, to get the real value */
|
||||
if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form))
|
||||
{
|
||||
fprintf(stderr, "HIGHGUI ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
fprintf(stderr, "VIDEOIO ERROR: V4L/V4L2: Could not obtain specifics of capture window.\n\n");
|
||||
|
||||
icvCloseCAM_V4L(capture);
|
||||
|
||||
@@ -2611,7 +2611,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture,
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L2: setting property #%d is not supported\n",
|
||||
"VIDEOIO ERROR: V4L2: setting property #%d is not supported\n",
|
||||
property_id);
|
||||
return -1;
|
||||
}
|
||||
@@ -2678,7 +2678,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture,
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L2: setting property #%d is not supported\n",
|
||||
"VIDEOIO ERROR: V4L2: setting property #%d is not supported\n",
|
||||
property_id);
|
||||
return -1;
|
||||
}
|
||||
@@ -2720,15 +2720,15 @@ static int icvSetControl (CvCaptureCAM_V4L* capture,
|
||||
break;
|
||||
case CV_CAP_PROP_GAIN:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
"VIDEOIO ERROR: V4L: Gain control in V4L is not supported\n");
|
||||
return -1;
|
||||
case CV_CAP_PROP_EXPOSURE:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
"VIDEOIO ERROR: V4L: Exposure control in V4L is not supported\n");
|
||||
return -1;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: property #%d is not supported\n",
|
||||
"VIDEOIO ERROR: V4L: property #%d is not supported\n",
|
||||
property_id);
|
||||
return -1;
|
||||
}
|
||||
@@ -2737,7 +2737,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture,
|
||||
< 0)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: Unable to set video informations\n");
|
||||
"VIDEOIO ERROR: V4L: Unable to set video informations\n");
|
||||
icvCloseCAM_V4L(capture);
|
||||
return -1;
|
||||
}
|
||||
@@ -2786,7 +2786,7 @@ static int icvSetPropertyCAM_V4L( CvCaptureCAM_V4L* capture,
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"HIGHGUI ERROR: V4L: setting property #%d is not supported\n",
|
||||
"VIDEOIO ERROR: V4L: setting property #%d is not supported\n",
|
||||
property_id);
|
||||
}
|
||||
|
178
modules/videoio/src/precomp.hpp
Normal file
178
modules/videoio/src/precomp.hpp
Normal file
@@ -0,0 +1,178 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// Intel License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of Intel Corporation may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __VIDEOIO_H_
|
||||
#define __VIDEOIO_H_
|
||||
|
||||
#include "opencv2/videoio.hpp"
|
||||
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/core/private.hpp"
|
||||
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/imgcodecs/imgcodecs_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <ctype.h>
|
||||
#include <assert.h>
|
||||
|
||||
#if defined WIN32 || defined WINCE
|
||||
#if !defined _WIN32_WINNT
|
||||
#ifdef HAVE_MSMF
|
||||
#define _WIN32_WINNT 0x0600 // Windows Vista
|
||||
#else
|
||||
#define _WIN32_WINNT 0x0500 // Windows 2000
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <windows.h>
|
||||
#undef small
|
||||
#undef min
|
||||
#undef max
|
||||
#undef abs
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
#include "opencv2/videoio/videoio_tegra.hpp"
|
||||
#endif
|
||||
|
||||
#define __BEGIN__ __CV_BEGIN__
|
||||
#define __END__ __CV_END__
|
||||
#define EXIT __CV_EXIT__
|
||||
|
||||
/***************************** CvCapture structure ******************************/
|
||||
|
||||
struct CvCapture
|
||||
{
|
||||
virtual ~CvCapture() {}
|
||||
virtual double getProperty(int) { return 0; }
|
||||
virtual bool setProperty(int, double) { return 0; }
|
||||
virtual bool grabFrame() { return true; }
|
||||
virtual IplImage* retrieveFrame(int) { return 0; }
|
||||
virtual int getCaptureDomain() { return CV_CAP_ANY; } // Return the type of the capture object: CV_CAP_VFW, etc...
|
||||
};
|
||||
|
||||
/*************************** CvVideoWriter structure ****************************/
|
||||
|
||||
struct CvVideoWriter
|
||||
{
|
||||
virtual ~CvVideoWriter() {}
|
||||
virtual bool writeFrame(const IplImage*) { return false; }
|
||||
};
|
||||
|
||||
CvCapture * cvCreateCameraCapture_V4L( int index );
|
||||
CvCapture * cvCreateCameraCapture_DC1394( int index );
|
||||
CvCapture * cvCreateCameraCapture_DC1394_2( int index );
|
||||
CvCapture* cvCreateCameraCapture_MIL( int index );
|
||||
CvCapture* cvCreateCameraCapture_Giganetix( int index );
|
||||
CvCapture * cvCreateCameraCapture_CMU( int index );
|
||||
CV_IMPL CvCapture * cvCreateCameraCapture_TYZX( int index );
|
||||
CvCapture* cvCreateFileCapture_Win32( const char* filename );
|
||||
CvCapture* cvCreateCameraCapture_VFW( int index );
|
||||
CvCapture* cvCreateFileCapture_VFW( const char* filename );
|
||||
CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
CvCapture* cvCreateCameraCapture_DShow( int index );
|
||||
CvCapture* cvCreateCameraCapture_MSMF( int index );
|
||||
CvCapture* cvCreateFileCapture_MSMF (const char* filename);
|
||||
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
CvCapture* cvCreateCameraCapture_OpenNI( int index );
|
||||
CvCapture* cvCreateFileCapture_OpenNI( const char* filename );
|
||||
CvCapture* cvCreateCameraCapture_Android( int index );
|
||||
CvCapture* cvCreateCameraCapture_XIMEA( int index );
|
||||
CvCapture* cvCreateCameraCapture_AVFoundation(int index);
|
||||
|
||||
CvCapture* cvCreateFileCapture_Images(const char* filename);
|
||||
CvVideoWriter* cvCreateVideoWriter_Images(const char* filename);
|
||||
|
||||
CvCapture* cvCreateFileCapture_XINE (const char* filename);
|
||||
|
||||
|
||||
#define CV_CAP_GSTREAMER_1394 0
|
||||
#define CV_CAP_GSTREAMER_V4L 1
|
||||
#define CV_CAP_GSTREAMER_V4L2 2
|
||||
#define CV_CAP_GSTREAMER_FILE 3
|
||||
|
||||
CvCapture* cvCreateCapture_GStreamer(int type, const char *filename);
|
||||
CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename);
|
||||
|
||||
|
||||
CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
CvCapture * cvCreateFileCapture_QT (const char * filename);
|
||||
CvCapture * cvCreateCameraCapture_QT (const int index);
|
||||
|
||||
CvVideoWriter* cvCreateVideoWriter_QT ( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
CvCapture* cvCreateFileCapture_AVFoundation (const char * filename);
|
||||
CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
|
||||
CvCapture * cvCreateCameraCapture_Unicap (const int index);
|
||||
CvCapture * cvCreateCameraCapture_PvAPI (const int index);
|
||||
CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc,
|
||||
double fps, CvSize frameSize, int is_color );
|
||||
|
||||
namespace cv
|
||||
{
|
||||
class IVideoCapture
|
||||
{
|
||||
public:
|
||||
virtual ~IVideoCapture() {}
|
||||
virtual double getProperty(int) { return 0; }
|
||||
virtual bool setProperty(int, double) { return 0; }
|
||||
virtual bool grabFrame() = 0;
|
||||
virtual bool retrieveFrame(int, cv::OutputArray) = 0;
|
||||
virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc...
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* __VIDEOIO_H_ */
|
@@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
|
||||
using namespace cv;
|
||||
|
||||
@@ -145,7 +145,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Highgui_Video, ffmpeg_writebig) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); }
|
||||
TEST(Videoio_Video, ffmpeg_writebig) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); }
|
||||
|
||||
class CV_FFmpegReadImageTest : public cvtest::BaseTest
|
||||
{
|
||||
@@ -174,7 +174,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Highgui_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); }
|
||||
TEST(Videoio_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); }
|
||||
|
||||
#endif
|
||||
|
||||
@@ -360,7 +360,7 @@ private:
|
||||
|
||||
bool ReadImageAndTest::next;
|
||||
|
||||
TEST(Highgui_Video_parallel_writers_and_readers, accuracy)
|
||||
TEST(Videoio_Video_parallel_writers_and_readers, accuracy)
|
||||
{
|
||||
const unsigned int threadsCount = 4;
|
||||
cvtest::TS* ts = cvtest::TS::ptr();
|
@@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
|
||||
#undef DEFINE_GUID
|
||||
#define DEFINE_GUID(n, fourcc, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) fourcc,
|
||||
@@ -101,7 +101,7 @@ DEFINE_GUID(PIN_CATEGORY_PREVIEW,0xfb6c4282,0x0353,0x11d1,0x90,0x5f,0x00,0x00,0x
|
||||
0};
|
||||
|
||||
|
||||
TEST(Highgui_dshow, fourcc_conversion)
|
||||
TEST(Videoio_dshow, fourcc_conversion)
|
||||
{
|
||||
for(int i = 0; allfourcc[i]; ++i)
|
||||
{
|
@@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
@@ -110,5 +110,5 @@ void CV_FramecountTest::run(int)
|
||||
}
|
||||
}
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined HAVE_FFMPEG
|
||||
TEST(Highgui_Video, framecount) {CV_FramecountTest test; test.safe_run();}
|
||||
TEST(Videoio_Video, framecount) {CV_FramecountTest test; test.safe_run();}
|
||||
#endif
|
3
modules/videoio/test/test_main.cpp
Normal file
3
modules/videoio/test/test_main.cpp
Normal file
@@ -0,0 +1,3 @@
|
||||
#include "test_precomp.hpp"
|
||||
|
||||
CV_TEST_MAIN("videoio")
|
@@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
@@ -218,6 +218,6 @@ void CV_VideoRandomPositioningTest::run(int)
|
||||
}
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined HAVE_FFMPEG
|
||||
TEST (Highgui_Video, seek_progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); }
|
||||
TEST (Highgui_Video, seek_random) { CV_VideoRandomPositioningTest test; test.safe_run(); }
|
||||
TEST (Videoio_Video, seek_progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); }
|
||||
TEST (Videoio_Video, seek_random) { CV_VideoRandomPositioningTest test; test.safe_run(); }
|
||||
#endif
|
91
modules/videoio/test/test_precomp.hpp
Normal file
91
modules/videoio/test/test_precomp.hpp
Normal file
@@ -0,0 +1,91 @@
|
||||
#ifdef __GNUC__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
# if defined __clang__ || defined __APPLE__
|
||||
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
|
||||
# pragma GCC diagnostic ignored "-Wextra"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef __OPENCV_TEST_PRECOMP_HPP__
|
||||
#define __OPENCV_TEST_PRECOMP_HPP__
|
||||
|
||||
#include <iostream>
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
|
||||
#include "opencv2/core/private.hpp"
|
||||
|
||||
#if defined(HAVE_DSHOW) || \
|
||||
defined(HAVE_TYZX) || \
|
||||
defined(HAVE_VFW) || \
|
||||
defined(HAVE_LIBV4L) || \
|
||||
(defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \
|
||||
defined(HAVE_GSTREAMER) || \
|
||||
defined(HAVE_DC1394_2) || \
|
||||
defined(HAVE_DC1394) || \
|
||||
defined(HAVE_CMU1394) || \
|
||||
defined(HAVE_MIL) || \
|
||||
defined(HAVE_QUICKTIME) || \
|
||||
defined(HAVE_QTKIT) || \
|
||||
defined(HAVE_UNICAP) || \
|
||||
defined(HAVE_PVAPI) || \
|
||||
defined(HAVE_OPENNI) || \
|
||||
defined(HAVE_XIMEA) || \
|
||||
defined(HAVE_AVFOUNDATION) || \
|
||||
defined(HAVE_GIGE_API) || \
|
||||
defined(HAVE_INTELPERC) || \
|
||||
(0)
|
||||
//defined(HAVE_ANDROID_NATIVE_CAMERA) || - enable after #1193
|
||||
# define BUILD_WITH_CAMERA_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_CAMERA_SUPPORT 0
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_XINE) || \
|
||||
defined(HAVE_GSTREAMER) || \
|
||||
defined(HAVE_QUICKTIME) || \
|
||||
defined(HAVE_QTKIT) || \
|
||||
defined(HAVE_AVFOUNDATION) || \
|
||||
/*defined(HAVE_OPENNI) || too specialized */ \
|
||||
defined(HAVE_FFMPEG) || \
|
||||
defined(HAVE_MSMF)
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
|
||||
#endif
|
||||
|
||||
#if /*defined(HAVE_XINE) || */\
|
||||
defined(HAVE_GSTREAMER) || \
|
||||
defined(HAVE_QUICKTIME) || \
|
||||
defined(HAVE_QTKIT) || \
|
||||
defined(HAVE_AVFOUNDATION) || \
|
||||
defined(HAVE_FFMPEG) || \
|
||||
defined(HAVE_MSMF)
|
||||
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
|
||||
#else
|
||||
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0
|
||||
#endif
|
||||
|
||||
namespace cvtest
|
||||
{
|
||||
|
||||
string fourccToString(int fourcc);
|
||||
|
||||
struct VideoFormat
|
||||
{
|
||||
VideoFormat() { fourcc = -1; }
|
||||
VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {}
|
||||
bool empty() const { return ext.empty(); }
|
||||
|
||||
string ext;
|
||||
int fourcc;
|
||||
};
|
||||
|
||||
extern const VideoFormat g_specific_fmt_list[];
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui/highgui_c.h"
|
||||
#include "opencv2/videoio/videoio_c.h"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
@@ -99,7 +99,7 @@ const VideoFormat g_specific_fmt_list[] =
|
||||
|
||||
}
|
||||
|
||||
class CV_HighGuiTest : public cvtest::BaseTest
|
||||
class CV_VideoIOTest : public cvtest::BaseTest
|
||||
{
|
||||
protected:
|
||||
void ImageTest (const string& dir);
|
||||
@@ -107,12 +107,12 @@ protected:
|
||||
void SpecificImageTest (const string& dir);
|
||||
void SpecificVideoTest (const string& dir, const cvtest::VideoFormat& fmt);
|
||||
|
||||
CV_HighGuiTest() {}
|
||||
~CV_HighGuiTest() {}
|
||||
CV_VideoIOTest() {}
|
||||
~CV_VideoIOTest() {}
|
||||
virtual void run(int) = 0;
|
||||
};
|
||||
|
||||
class CV_ImageTest : public CV_HighGuiTest
|
||||
class CV_ImageTest : public CV_VideoIOTest
|
||||
{
|
||||
public:
|
||||
CV_ImageTest() {}
|
||||
@@ -120,7 +120,7 @@ public:
|
||||
void run(int);
|
||||
};
|
||||
|
||||
class CV_SpecificImageTest : public CV_HighGuiTest
|
||||
class CV_SpecificImageTest : public CV_VideoIOTest
|
||||
{
|
||||
public:
|
||||
CV_SpecificImageTest() {}
|
||||
@@ -128,7 +128,7 @@ public:
|
||||
void run(int);
|
||||
};
|
||||
|
||||
class CV_VideoTest : public CV_HighGuiTest
|
||||
class CV_VideoTest : public CV_VideoIOTest
|
||||
{
|
||||
public:
|
||||
CV_VideoTest() {}
|
||||
@@ -136,7 +136,7 @@ public:
|
||||
void run(int);
|
||||
};
|
||||
|
||||
class CV_SpecificVideoTest : public CV_HighGuiTest
|
||||
class CV_SpecificVideoTest : public CV_VideoIOTest
|
||||
{
|
||||
public:
|
||||
CV_SpecificVideoTest() {}
|
||||
@@ -145,7 +145,7 @@ public:
|
||||
};
|
||||
|
||||
|
||||
void CV_HighGuiTest::ImageTest(const string& dir)
|
||||
void CV_VideoIOTest::ImageTest(const string& dir)
|
||||
{
|
||||
string _name = dir + string("../cv/shared/baboon.png");
|
||||
ts->printf(ts->LOG, "reading image : %s\n", _name.c_str());
|
||||
@@ -251,7 +251,7 @@ void CV_HighGuiTest::ImageTest(const string& dir)
|
||||
}
|
||||
|
||||
|
||||
void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt)
|
||||
void CV_VideoIOTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt)
|
||||
{
|
||||
string src_file = dir + "../cv/shared/video_for_test.avi";
|
||||
string tmp_name = cv::tempfile((cvtest::fourccToString(fmt.fourcc) + "." + fmt.ext).c_str());
|
||||
@@ -337,7 +337,7 @@ void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt
|
||||
ts->printf(ts->LOG, "end test function : ImagesVideo \n");
|
||||
}
|
||||
|
||||
void CV_HighGuiTest::SpecificImageTest(const string& dir)
|
||||
void CV_VideoIOTest::SpecificImageTest(const string& dir)
|
||||
{
|
||||
const size_t IMAGE_COUNT = 10;
|
||||
|
||||
@@ -423,7 +423,7 @@ void CV_HighGuiTest::SpecificImageTest(const string& dir)
|
||||
}
|
||||
|
||||
|
||||
void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFormat& fmt)
|
||||
void CV_VideoIOTest::SpecificVideoTest(const string& dir, const cvtest::VideoFormat& fmt)
|
||||
{
|
||||
string ext = fmt.ext;
|
||||
int fourcc = fmt.fourcc;
|
||||
@@ -568,12 +568,12 @@ void CV_SpecificVideoTest::run(int)
|
||||
}
|
||||
|
||||
#ifdef HAVE_JPEG
|
||||
TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
|
||||
TEST(Videoio_Image, regression) { CV_ImageTest test; test.safe_run(); }
|
||||
#endif
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && !defined(__APPLE__)
|
||||
TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); }
|
||||
TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
|
||||
TEST(Videoio_Video, regression) { CV_VideoTest test; test.safe_run(); }
|
||||
TEST(Videoio_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
|
||||
#endif
|
||||
|
||||
TEST(Highgui_Image, write_read) { CV_SpecificImageTest test; test.safe_run(); }
|
||||
TEST(Videoio_Image, write_read) { CV_SpecificImageTest test; test.safe_run(); }
|
@@ -41,7 +41,7 @@
|
||||
//M*/
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/videoio.hpp"
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
@@ -114,7 +114,7 @@ public:
|
||||
cap.set(CAP_PROP_POS_FRAMES, 0);
|
||||
int N = (int)cap.get(CAP_PROP_FRAME_COUNT);
|
||||
|
||||
// See the same hack in CV_HighGuiTest::SpecificVideoTest for explanation.
|
||||
// See the same hack in CV_VideoIOTest::SpecificVideoTest for explanation.
|
||||
int allowed_extra_frames = 0;
|
||||
if (fmt.fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && fmt.ext == "mkv")
|
||||
allowed_extra_frames = 1;
|
||||
@@ -148,8 +148,6 @@ public:
|
||||
idx1, idx);
|
||||
ts->printf(ts->LOG, "Saving both frames ...\n");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
// imwrite("opencv_test_highgui_postest_actual.png", img);
|
||||
// imwrite("opencv_test_highgui_postest_expected.png", img0);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -167,8 +165,6 @@ public:
|
||||
ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);
|
||||
ts->printf(ts->LOG, "Saving both frames ...\n");
|
||||
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
|
||||
// imwrite("opencv_test_highgui_postest_actual.png", img);
|
||||
// imwrite("opencv_test_highgui_postest_expected.png", img0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -179,5 +175,5 @@ public:
|
||||
};
|
||||
|
||||
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && defined HAVE_FFMPEG
|
||||
TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
|
||||
TEST(Videoio_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
|
||||
#endif
|
@@ -1,3 +1,3 @@
|
||||
set(the_description "Video stabilization")
|
||||
ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video opencv_photo opencv_calib3d
|
||||
OPTIONAL opencv_cuda opencv_cudawarping opencv_cudaoptflow opencv_highgui)
|
||||
OPTIONAL opencv_cuda opencv_cudawarping opencv_cudaoptflow opencv_videoio)
|
||||
|
@@ -45,8 +45,8 @@
|
||||
#include "opencv2/videostab/ring_buffer.hpp"
|
||||
|
||||
#include "opencv2/opencv_modules.hpp"
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
# include "opencv2/highgui.hpp"
|
||||
#ifdef HAVE_OPENCV_VIDEOIO
|
||||
# include "opencv2/videoio.hpp"
|
||||
#endif
|
||||
|
||||
namespace cv
|
||||
@@ -64,7 +64,7 @@ public:
|
||||
|
||||
virtual void reset()
|
||||
{
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
#ifdef HAVE_OPENCV_VIDEOIO
|
||||
vc.release();
|
||||
vc.open(path_);
|
||||
if (!vc.isOpened())
|
||||
@@ -77,13 +77,13 @@ public:
|
||||
virtual Mat nextFrame()
|
||||
{
|
||||
Mat frame;
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
#ifdef HAVE_OPENCV_VIDEOIO
|
||||
vc >> frame;
|
||||
#endif
|
||||
return volatileFrame_ ? frame : frame.clone();
|
||||
}
|
||||
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
#ifdef HAVE_OPENCV_VIDEOIO
|
||||
int width() {return static_cast<int>(vc.get(CAP_PROP_FRAME_WIDTH));}
|
||||
int height() {return static_cast<int>(vc.get(CAP_PROP_FRAME_HEIGHT));}
|
||||
int count() {return static_cast<int>(vc.get(CAP_PROP_FRAME_COUNT));}
|
||||
@@ -98,7 +98,7 @@ public:
|
||||
private:
|
||||
String path_;
|
||||
bool volatileFrame_;
|
||||
#ifdef HAVE_OPENCV_HIGHGUI
|
||||
#ifdef HAVE_OPENCV_VIDEOIO
|
||||
VideoCapture vc;
|
||||
#endif
|
||||
};
|
||||
|
Reference in New Issue
Block a user