Split highgui module to videoio and highgui

This commit is contained in:
vbystricky
2014-07-10 18:27:32 +04:00
committed by VBystricky
parent f773cd9a3e
commit d58f736935
149 changed files with 1673 additions and 1309 deletions

View File

@@ -18,13 +18,9 @@ endif()
set(highgui_hdrs
src/precomp.hpp
src/cap_ffmpeg_impl.hpp
)
set(highgui_srcs
src/cap.cpp
src/cap_images.cpp
src/cap_ffmpeg.cpp
src/window.cpp
)
@@ -76,128 +72,6 @@ elseif(HAVE_COCOA)
list(APPEND HIGHGUI_LIBRARIES "-framework Cocoa")
endif()
if(WIN32 AND NOT ARM)
list(APPEND highgui_srcs src/cap_cmu.cpp)
endif()
if (WIN32 AND HAVE_DSHOW)
list(APPEND highgui_srcs src/cap_dshow.cpp)
endif()
if (WIN32 AND HAVE_MSMF)
list(APPEND highgui_srcs src/cap_msmf.cpp)
endif()
if (WIN32 AND HAVE_VFW)
list(APPEND highgui_srcs src/cap_vfw.cpp)
endif()
if(HAVE_XINE)
list(APPEND highgui_srcs src/cap_xine.cpp)
endif(HAVE_XINE)
if(HAVE_DC1394_2)
list(APPEND highgui_srcs src/cap_dc1394_v2.cpp)
endif(HAVE_DC1394_2)
if(HAVE_DC1394)
list(APPEND highgui_srcs src/cap_dc1394.cpp)
endif(HAVE_DC1394)
if(HAVE_GSTREAMER)
list(APPEND highgui_srcs src/cap_gstreamer.cpp)
endif(HAVE_GSTREAMER)
if(HAVE_UNICAP)
list(APPEND highgui_srcs src/cap_unicap.cpp)
endif(HAVE_UNICAP)
if(HAVE_LIBV4L)
list(APPEND highgui_srcs src/cap_libv4l.cpp)
elseif(HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO)
list(APPEND highgui_srcs src/cap_v4l.cpp)
endif()
if(HAVE_OPENNI)
list(APPEND highgui_srcs src/cap_openni.cpp)
ocv_include_directories(${OPENNI_INCLUDE_DIR})
list(APPEND HIGHGUI_LIBRARIES ${OPENNI_LIBRARY})
endif(HAVE_OPENNI)
if(HAVE_opencv_androidcamera)
list(APPEND highgui_srcs src/cap_android.cpp)
add_definitions(-DHAVE_ANDROID_NATIVE_CAMERA)#TODO: remove this line
endif(HAVE_opencv_androidcamera)
if(HAVE_XIMEA)
list(APPEND highgui_srcs src/cap_ximea.cpp)
ocv_include_directories(${XIMEA_PATH})
if(XIMEA_LIBRARY_DIR)
link_directories("${XIMEA_LIBRARY_DIR}")
endif()
if(X86_64)
list(APPEND HIGHGUI_LIBRARIES m3apiX64)
else()
list(APPEND HIGHGUI_LIBRARIES m3api)
endif()
endif(HAVE_XIMEA)
if(HAVE_FFMPEG)
if(UNIX AND BZIP2_LIBRARIES)
list(APPEND HIGHGUI_LIBRARIES ${BZIP2_LIBRARIES})
endif()
if(APPLE)
list(APPEND HIGHGUI_LIBRARIES "-framework VideoDecodeAcceleration" bz2)
endif()
endif(HAVE_FFMPEG)
if(HAVE_PVAPI)
add_definitions(-DHAVE_PVAPI)
add_definitions(${PVAPI_DEFINITIONS})
ocv_include_directories(${PVAPI_INCLUDE_PATH})
set(highgui_srcs src/cap_pvapi.cpp ${highgui_srcs})
list(APPEND HIGHGUI_LIBRARIES ${PVAPI_LIBRARY})
endif()
if(HAVE_GIGE_API)
add_definitions(-DHAVE_GIGE_API)
ocv_include_directories(${GIGEAPI_INCLUDE_PATH})
set(highgui_srcs src/cap_giganetix.cpp ${highgui_srcs})
list(APPEND HIGHGUI_LIBRARIES ${GIGEAPI_LIBRARIES})
list(APPEND highgui_srcs src/cap_giganetix.cpp)
endif(HAVE_GIGE_API)
if(HAVE_AVFOUNDATION)
list(APPEND highgui_srcs src/cap_avfoundation.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework AVFoundation" "-framework QuartzCore")
endif()
if(HAVE_QUICKTIME)
list(APPEND highgui_srcs src/cap_qt.cpp)
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
elseif(HAVE_QTKIT)
list(APPEND highgui_srcs src/cap_qtkit.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
endif()
if(HAVE_INTELPERC)
list(APPEND highgui_srcs src/cap_intelperc.cpp)
ocv_include_directories(${INTELPERC_INCLUDE_DIR})
list(APPEND HIGHGUI_LIBRARIES ${INTELPERC_LIBRARIES})
endif(HAVE_INTELPERC)
if(IOS)
add_definitions(-DHAVE_IOS=1)
list(APPEND highgui_srcs src/ios_conversions.mm src/cap_ios_abstract_camera.mm src/cap_ios_photo_camera.mm src/cap_ios_video_camera.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework Accelerate" "-framework AVFoundation" "-framework CoreGraphics" "-framework CoreImage" "-framework CoreMedia" "-framework CoreVideo" "-framework QuartzCore" "-framework AssetsLibrary")
endif()
if(WIN32)
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests
endif()
if(UNIX)
#these variables are set by CHECK_MODULE macro
foreach(P ${HIGHGUI_INCLUDE_DIRS})
@@ -248,33 +122,5 @@ set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "")
ocv_add_precompiled_headers(${the_module})
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations)
if(WIN32 AND WITH_FFMPEG)
#copy ffmpeg dll to the output folder
if(MSVC64 OR MINGW64)
set(FFMPEG_SUFFIX _64)
endif()
set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll")
set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}")
if(MSVC_IDE)
add_custom_command(TARGET ${the_module} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}"
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
elseif(MSVC AND (CMAKE_GENERATOR MATCHES "Visual"))
add_custom_command(TARGET ${the_module} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
else()
add_custom_command(TARGET ${the_module} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
endif()
install(FILES "${ffmpeg_path}" DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs RENAME "${ffmpeg_bare_name_ver}")
endif()
ocv_add_accuracy_tests()
ocv_add_perf_tests()

View File

@@ -9,11 +9,9 @@ It provides easy interface to:
* Create and manipulate windows that can display images and "remember" their content (no need to handle repaint events from OS).
* Add trackbars to the windows, handle simple mouse events as well as keyboard commands.
* Read video from camera or file and write video to a file.
.. toctree::
:maxdepth: 2
user_interface
reading_and_writing_video
qt_new_functions

View File

@@ -1,360 +0,0 @@
Reading and Writing Video
=========================
.. highlight:: cpp
VideoCapture
------------
.. ocv:class:: VideoCapture
Class for video capturing from video files, image sequences or cameras.
The class provides C++ API for capturing video from cameras or for reading video files and image sequences. Here is how the class can be used: ::
#include "opencv2/opencv.hpp"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, COLOR_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
.. note:: In C API the black-box structure ``CvCapture`` is used instead of ``VideoCapture``.
.. note::
* A basic sample on using the VideoCapture interface can be found at opencv_source_code/samples/cpp/starter_video.cpp
* Another basic video processing sample can be found at opencv_source_code/samples/cpp/video_dmtx.cpp
* (Python) A basic sample on using the VideoCapture interface can be found at opencv_source_code/samples/python2/video.py
* (Python) Another basic video processing sample can be found at opencv_source_code/samples/python2/video_dmtx.py
* (Python) A multi threaded video processing sample can be found at opencv_source_code/samples/python2/video_threaded.py
VideoCapture::VideoCapture
------------------------------
VideoCapture constructors.
.. ocv:function:: VideoCapture::VideoCapture()
.. ocv:function:: VideoCapture::VideoCapture(const String& filename)
.. ocv:function:: VideoCapture::VideoCapture(int device)
.. ocv:pyfunction:: cv2.VideoCapture() -> <VideoCapture object>
.. ocv:pyfunction:: cv2.VideoCapture(filename) -> <VideoCapture object>
.. ocv:pyfunction:: cv2.VideoCapture(device) -> <VideoCapture object>
.. ocv:cfunction:: CvCapture* cvCaptureFromCAM( int device )
.. ocv:cfunction:: CvCapture* cvCaptureFromFile( const char* filename )
:param filename: name of the opened video file (eg. video.avi) or image sequence (eg. img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
:param device: id of the opened video capturing device (i.e. a camera index). If there is a single camera connected, just pass 0.
.. note:: In C API, when you finished working with video, release ``CvCapture`` structure with ``cvReleaseCapture()``, or use ``Ptr<CvCapture>`` that calls ``cvReleaseCapture()`` automatically in the destructor.
VideoCapture::open
---------------------
Open video file or a capturing device for video capturing
.. ocv:function:: bool VideoCapture::open(const String& filename)
.. ocv:function:: bool VideoCapture::open(int device)
.. ocv:pyfunction:: cv2.VideoCapture.open(filename) -> retval
.. ocv:pyfunction:: cv2.VideoCapture.open(device) -> retval
:param filename: name of the opened video file (eg. video.avi) or image sequence (eg. img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
:param device: id of the opened video capturing device (i.e. a camera index).
The methods first call :ocv:func:`VideoCapture::release` to close the already opened file or camera.
VideoCapture::isOpened
----------------------
Returns true if video capturing has been initialized already.
.. ocv:function:: bool VideoCapture::isOpened()
.. ocv:pyfunction:: cv2.VideoCapture.isOpened() -> retval
If the previous call to ``VideoCapture`` constructor or ``VideoCapture::open`` succeeded, the method returns true.
VideoCapture::release
---------------------
Closes video file or capturing device.
.. ocv:function:: void VideoCapture::release()
.. ocv:pyfunction:: cv2.VideoCapture.release() -> None
.. ocv:cfunction:: void cvReleaseCapture(CvCapture** capture)
The methods are automatically called by subsequent :ocv:func:`VideoCapture::open` and by ``VideoCapture`` destructor.
The C function also deallocates memory and clears ``*capture`` pointer.
VideoCapture::grab
---------------------
Grabs the next frame from video file or capturing device.
.. ocv:function:: bool VideoCapture::grab()
.. ocv:pyfunction:: cv2.VideoCapture.grab() -> retval
.. ocv:cfunction:: int cvGrabFrame(CvCapture* capture)
The methods/functions grab the next frame from video file or camera and return true (non-zero) in the case of success.
The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call ``VideoCapture::grab()`` for each camera and after that call the slower method ``VideoCapture::retrieve()`` to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time.
Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the correct way of retrieving data from it is to call `VideoCapture::grab` first and then call :ocv:func:`VideoCapture::retrieve` one or more times with different values of the ``channel`` parameter. See https://github.com/Itseez/opencv/tree/master/samples/cpp/openni_capture.cpp
VideoCapture::retrieve
----------------------
Decodes and returns the grabbed video frame.
.. ocv:function:: bool VideoCapture::retrieve( OutputArray image, int flag=0 )
.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, flag]]) -> retval, image
.. ocv:cfunction:: IplImage* cvRetrieveFrame( CvCapture* capture, int streamIdx=0 )
The methods/functions decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
.. note:: OpenCV 1.x functions ``cvRetrieveFrame`` and ``cv.RetrieveFrame`` return image stored inside the video capturing structure. It is not allowed to modify or release the image! You can copy the frame using :ocv:cfunc:`cvCloneImage` and then do whatever you want with the copy.
VideoCapture::read
----------------------
Grabs, decodes and returns the next video frame.
.. ocv:function:: VideoCapture& VideoCapture::operator >> (Mat& image)
.. ocv:function:: VideoCapture& VideoCapture::operator >> (UMat& image)
.. ocv:function:: bool VideoCapture::read(OutputArray image)
.. ocv:pyfunction:: cv2.VideoCapture.read([image]) -> retval, image
.. ocv:cfunction:: IplImage* cvQueryFrame(CvCapture* capture)
The methods/functions combine :ocv:func:`VideoCapture::grab` and :ocv:func:`VideoCapture::retrieve` in one call. This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
.. note:: OpenCV 1.x functions ``cvRetrieveFrame`` and ``cv.RetrieveFrame`` return image stored inside the video capturing structure. It is not allowed to modify or release the image! You can copy the frame using :ocv:cfunc:`cvCloneImage` and then do whatever you want with the copy.
VideoCapture::get
---------------------
Returns the specified ``VideoCapture`` property
.. ocv:function:: double VideoCapture::get(int propId)
.. ocv:pyfunction:: cv2.VideoCapture.get(propId) -> retval
.. ocv:cfunction:: double cvGetCaptureProperty( CvCapture* capture, int property_id )
:param propId: Property identifier. It can be one of the following:
* **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video capture timestamp.
* **CV_CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
* **CV_CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the film, 1 - end of the film.
* **CV_CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
* **CV_CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
* **CV_CAP_PROP_FPS** Frame rate.
* **CV_CAP_PROP_FOURCC** 4-character code of codec.
* **CV_CAP_PROP_FRAME_COUNT** Number of frames in the video file.
* **CV_CAP_PROP_FORMAT** Format of the Mat objects returned by ``retrieve()`` .
* **CV_CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
* **CV_CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
* **CV_CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
* **CV_CAP_PROP_SATURATION** Saturation of the image (only for cameras).
* **CV_CAP_PROP_HUE** Hue of the image (only for cameras).
* **CV_CAP_PROP_GAIN** Gain of the image (only for cameras).
* **CV_CAP_PROP_EXPOSURE** Exposure (only for cameras).
* **CV_CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted to RGB.
* **CV_CAP_PROP_WHITE_BALANCE** Currently not supported
* **CV_CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
**Note**: When querying a property that is not supported by the backend used by the ``VideoCapture`` class, value 0 is returned.
VideoCapture::set
---------------------
Sets a property in the ``VideoCapture``.
.. ocv:function:: bool VideoCapture::set( int propId, double value )
.. ocv:pyfunction:: cv2.VideoCapture.set(propId, value) -> retval
.. ocv:cfunction:: int cvSetCaptureProperty( CvCapture* capture, int property_id, double value )
:param propId: Property identifier. It can be one of the following:
* **CV_CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.
* **CV_CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
* **CV_CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the film, 1 - end of the film.
* **CV_CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
* **CV_CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
* **CV_CAP_PROP_FPS** Frame rate.
* **CV_CAP_PROP_FOURCC** 4-character code of codec.
* **CV_CAP_PROP_FRAME_COUNT** Number of frames in the video file.
* **CV_CAP_PROP_FORMAT** Format of the Mat objects returned by ``retrieve()`` .
* **CV_CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
* **CV_CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
* **CV_CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
* **CV_CAP_PROP_SATURATION** Saturation of the image (only for cameras).
* **CV_CAP_PROP_HUE** Hue of the image (only for cameras).
* **CV_CAP_PROP_GAIN** Gain of the image (only for cameras).
* **CV_CAP_PROP_EXPOSURE** Exposure (only for cameras).
* **CV_CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted to RGB.
* **CV_CAP_PROP_WHITE_BALANCE** Currently unsupported
* **CV_CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
:param value: Value of the property.
VideoWriter
-----------
.. ocv:class:: VideoWriter
Video writer class.
VideoWriter::VideoWriter
------------------------
VideoWriter constructors
.. ocv:function:: VideoWriter::VideoWriter()
.. ocv:function:: VideoWriter::VideoWriter(const String& filename, int fourcc, double fps, Size frameSize, bool isColor=true)
.. ocv:pyfunction:: cv2.VideoWriter([filename, fourcc, fps, frameSize[, isColor]]) -> <VideoWriter object>
.. ocv:cfunction:: CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 )
.. ocv:pyfunction:: cv2.VideoWriter.isOpened() -> retval
.. ocv:pyfunction:: cv2.VideoWriter.open(filename, fourcc, fps, frameSize[, isColor]) -> retval
.. ocv:pyfunction:: cv2.VideoWriter.write(image) -> None
:param filename: Name of the output video file.
:param fourcc: 4-character code of codec used to compress the frames. For example, ``CV_FOURCC('P','I','M','1')`` is a MPEG-1 codec, ``CV_FOURCC('M','J','P','G')`` is a motion-jpeg codec etc. List of codes can be obtained at `Video Codecs by FOURCC <http://www.fourcc.org/codecs.php>`_ page.
:param fps: Framerate of the created video stream.
:param frameSize: Size of the video frames.
:param isColor: If it is not zero, the encoder will expect and encode color frames, otherwise it will work with grayscale frames (the flag is currently supported on Windows only).
The constructors/functions initialize video writers. On Linux FFMPEG is used to write videos; on Windows FFMPEG or VFW is used; on MacOSX QTKit is used.
ReleaseVideoWriter
------------------
Releases the AVI writer.
.. ocv:cfunction:: void cvReleaseVideoWriter( CvVideoWriter** writer )
The function should be called after you finished using ``CvVideoWriter`` opened with :ocv:cfunc:`CreateVideoWriter`.
VideoWriter::open
-----------------
Initializes or reinitializes video writer.
.. ocv:function:: bool VideoWriter::open(const String& filename, int fourcc, double fps, Size frameSize, bool isColor=true)
.. ocv:pyfunction:: cv2.VideoWriter.open(filename, fourcc, fps, frameSize[, isColor]) -> retval
The method opens video writer. Parameters are the same as in the constructor :ocv:func:`VideoWriter::VideoWriter`.
VideoWriter::isOpened
---------------------
Returns true if video writer has been successfully initialized.
.. ocv:function:: bool VideoWriter::isOpened()
.. ocv:pyfunction:: cv2.VideoWriter.isOpened() -> retval
VideoWriter::write
------------------
Writes the next video frame
.. ocv:function:: VideoWriter& VideoWriter::operator << (const Mat& image)
.. ocv:function:: void VideoWriter::write(const Mat& image)
.. ocv:pyfunction:: cv2.VideoWriter.write(image) -> None
.. ocv:cfunction:: int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
:param writer: Video writer structure (OpenCV 1.x API)
:param image: The written frame
The functions/methods write the specified image to video file. It must have the same size as has been specified when opening the video writer.

View File

@@ -202,344 +202,4 @@ CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change,
} // cv
////////////////////////////////// video io /////////////////////////////////
typedef struct CvCapture CvCapture;
typedef struct CvVideoWriter CvVideoWriter;
namespace cv
{
// Camera API
enum { CAP_ANY = 0, // autodetect
CAP_VFW = 200, // platform native
CAP_V4L = 200,
CAP_V4L2 = CAP_V4L,
CAP_FIREWARE = 300, // IEEE 1394 drivers
CAP_FIREWIRE = CAP_FIREWARE,
CAP_IEEE1394 = CAP_FIREWARE,
CAP_DC1394 = CAP_FIREWARE,
CAP_CMU1394 = CAP_FIREWARE,
CAP_QT = 500, // QuickTime
CAP_UNICAP = 600, // Unicap drivers
CAP_DSHOW = 700, // DirectShow (via videoInput)
CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK
CAP_OPENNI = 900, // OpenNI (for Kinect)
CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion)
CAP_ANDROID = 1000, // Android
CAP_XIAPI = 1100, // XIMEA Camera API
CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
CAP_MSMF = 1400, // Microsoft Media Foundation (via videoInput)
CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
};
// generic properties (based on DC1394 properties)
enum { CAP_PROP_POS_MSEC =0,
CAP_PROP_POS_FRAMES =1,
CAP_PROP_POS_AVI_RATIO =2,
CAP_PROP_FRAME_WIDTH =3,
CAP_PROP_FRAME_HEIGHT =4,
CAP_PROP_FPS =5,
CAP_PROP_FOURCC =6,
CAP_PROP_FRAME_COUNT =7,
CAP_PROP_FORMAT =8,
CAP_PROP_MODE =9,
CAP_PROP_BRIGHTNESS =10,
CAP_PROP_CONTRAST =11,
CAP_PROP_SATURATION =12,
CAP_PROP_HUE =13,
CAP_PROP_GAIN =14,
CAP_PROP_EXPOSURE =15,
CAP_PROP_CONVERT_RGB =16,
CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CAP_PROP_RECTIFICATION =18,
CAP_PROP_MONOCROME =19,
CAP_PROP_SHARPNESS =20,
CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
CAP_PROP_GAMMA =22,
CAP_PROP_TEMPERATURE =23,
CAP_PROP_TRIGGER =24,
CAP_PROP_TRIGGER_DELAY =25,
CAP_PROP_WHITE_BALANCE_RED_V =26,
CAP_PROP_ZOOM =27,
CAP_PROP_FOCUS =28,
CAP_PROP_GUID =29,
CAP_PROP_ISO_SPEED =30,
CAP_PROP_BACKLIGHT =32,
CAP_PROP_PAN =33,
CAP_PROP_TILT =34,
CAP_PROP_ROLL =35,
CAP_PROP_IRIS =36,
CAP_PROP_SETTINGS =37
};
// DC1394 only
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
// every feature can have only one mode turned on at a time
enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
CAP_PROP_DC1394_MODE_AUTO = -2,
CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
CAP_PROP_DC1394_MAX = 31
};
// OpenNI map generators
enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR
};
// Properties of cameras available through OpenNI interfaces
enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100,
CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
CAP_PROP_OPENNI_BASELINE = 102, // in mm
CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map
// by changing depth generator's view point (if the flag is "on") or
// sets this view point to its normal one (if the flag is "off").
CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION,
CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
CAP_PROP_OPENNI_GENERATOR_PRESENT = 109
};
// OpenNI shortcats
enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,
CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,
CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,
CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,
CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION
};
// OpenNI data given from depth generator
enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
// Data given from RGB image generator
CAP_OPENNI_BGR_IMAGE = 5,
CAP_OPENNI_GRAY_IMAGE = 6
};
// Supported output modes of OpenNI image generator
enum { CAP_OPENNI_VGA_30HZ = 0,
CAP_OPENNI_SXGA_15HZ = 1,
CAP_OPENNI_SXGA_30HZ = 2,
CAP_OPENNI_QVGA_30HZ = 3,
CAP_OPENNI_QVGA_60HZ = 4
};
// GStreamer
enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1
};
// PVAPI
enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301 // FrameStartTriggerMode: Determines how a frame is initiated
};
// PVAPI: FrameStartTriggerMode
enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, // Freerun
CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, // SyncIn1
CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, // SyncIn2
CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, // FixedRate
CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 // Software
};
// Properties of cameras available through XIMEA SDK interface
enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds
};
// Properties for Android cameras
enum { CAP_PROP_ANDROID_AUTOGRAB = 1024,
CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed
CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed
CAP_PROP_ANDROID_FLASH_MODE = 8001,
CAP_PROP_ANDROID_FOCUS_MODE = 8002,
CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
CAP_PROP_ANDROID_ANTIBANDING = 8004,
CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008
};
// Android camera output formats
enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR,
CAP_ANDROID_GREY_FRAME = 1, //Y
CAP_ANDROID_COLOR_FRAME_RGB = 2,
CAP_ANDROID_COLOR_FRAME_BGRA = 3,
CAP_ANDROID_COLOR_FRAME_RGBA = 4
};
// Android camera flash modes
enum { CAP_ANDROID_FLASH_MODE_AUTO = 0,
CAP_ANDROID_FLASH_MODE_OFF = 1,
CAP_ANDROID_FLASH_MODE_ON = 2,
CAP_ANDROID_FLASH_MODE_RED_EYE = 3,
CAP_ANDROID_FLASH_MODE_TORCH = 4
};
// Android camera focus modes
enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0,
CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1,
CAP_ANDROID_FOCUS_MODE_EDOF = 2,
CAP_ANDROID_FOCUS_MODE_FIXED = 3,
CAP_ANDROID_FOCUS_MODE_INFINITY = 4,
CAP_ANDROID_FOCUS_MODE_MACRO = 5
};
// Android camera white balance modes
enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1,
CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2,
CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3,
CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4,
CAP_ANDROID_WHITE_BALANCE_SHADE = 5,
CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6,
CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7
};
// Android camera antibanding modes
enum { CAP_ANDROID_ANTIBANDING_50HZ = 0,
CAP_ANDROID_ANTIBANDING_60HZ = 1,
CAP_ANDROID_ANTIBANDING_AUTO = 2,
CAP_ANDROID_ANTIBANDING_OFF = 3
};
// Properties of cameras available through AVFOUNDATION interface
enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001,
CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
CAP_PROP_IOS_DEVICE_FLASH = 9003,
CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
CAP_PROP_IOS_DEVICE_TORCH = 9005
};
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006
};
enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007
};
// Intel PerC streams
enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR
};
enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
CAP_INTELPERC_IMAGE = 3
};
class IVideoCapture;
class CV_EXPORTS_W VideoCapture
{
public:
CV_WRAP VideoCapture();
CV_WRAP VideoCapture(const String& filename);
CV_WRAP VideoCapture(int device);
virtual ~VideoCapture();
CV_WRAP virtual bool open(const String& filename);
CV_WRAP virtual bool open(int device);
CV_WRAP virtual bool isOpened() const;
CV_WRAP virtual void release();
CV_WRAP virtual bool grab();
CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0);
virtual VideoCapture& operator >> (CV_OUT Mat& image);
virtual VideoCapture& operator >> (CV_OUT UMat& image);
CV_WRAP virtual bool read(OutputArray image);
CV_WRAP virtual bool set(int propId, double value);
CV_WRAP virtual double get(int propId);
protected:
Ptr<CvCapture> cap;
Ptr<IVideoCapture> icap;
private:
static Ptr<IVideoCapture> createCameraCapture(int index);
};
class CV_EXPORTS_W VideoWriter
{
public:
CV_WRAP VideoWriter();
CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,
Size frameSize, bool isColor = true);
virtual ~VideoWriter();
CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,
Size frameSize, bool isColor = true);
CV_WRAP virtual bool isOpened() const;
CV_WRAP virtual void release();
virtual VideoWriter& operator << (const Mat& image);
CV_WRAP virtual void write(const Mat& image);
CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);
protected:
Ptr<CvVideoWriter> writer;
};
template<> CV_EXPORTS void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const;
template<> CV_EXPORTS void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const;
} // cv
#endif

View File

@@ -1,169 +0,0 @@
/* For iOS video I/O
* by Eduard Feicho on 29/07/12
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import <UIKit/UIKit.h>
#import <Accelerate/Accelerate.h>
#import <AVFoundation/AVFoundation.h>
#import <ImageIO/ImageIO.h>
#include "opencv2/core.hpp"
/////////////////////////////////////// CvAbstractCamera /////////////////////////////////////
@class CvAbstractCamera;
@interface CvAbstractCamera : NSObject
{
AVCaptureSession* captureSession;
AVCaptureConnection* videoCaptureConnection;
AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;
UIDeviceOrientation currentDeviceOrientation;
BOOL cameraAvailable;
BOOL captureSessionLoaded;
BOOL running;
BOOL useAVCaptureVideoPreviewLayer;
AVCaptureDevicePosition defaultAVCaptureDevicePosition;
AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
NSString *const defaultAVCaptureSessionPreset;
int defaultFPS;
UIView* parentView;
int imageWidth;
int imageHeight;
}
@property (nonatomic, retain) AVCaptureSession* captureSession;
@property (nonatomic, retain) AVCaptureConnection* videoCaptureConnection;
@property (nonatomic, readonly) BOOL running;
@property (nonatomic, readonly) BOOL captureSessionLoaded;
@property (nonatomic, assign) int defaultFPS;
@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition;
@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer;
@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset;
@property (nonatomic, assign) int imageWidth;
@property (nonatomic, assign) int imageHeight;
@property (nonatomic, retain) UIView* parentView;
- (void)start;
- (void)stop;
- (void)switchCameras;
- (id)initWithParentView:(UIView*)parent;
- (void)createCaptureOutput;
- (void)createVideoPreviewLayer;
- (void)updateOrientation;
- (void)lockFocus;
- (void)unlockFocus;
- (void)lockExposure;
- (void)unlockExposure;
- (void)lockBalance;
- (void)unlockBalance;
@end
///////////////////////////////// CvVideoCamera ///////////////////////////////////////////
@class CvVideoCamera;
@protocol CvVideoCameraDelegate <NSObject>
#ifdef __cplusplus
// delegate method for processing image frames
- (void)processImage:(cv::Mat&)image;
#endif
@end
@interface CvVideoCamera : CvAbstractCamera<AVCaptureVideoDataOutputSampleBufferDelegate>
{
AVCaptureVideoDataOutput *videoDataOutput;
dispatch_queue_t videoDataOutputQueue;
CALayer *customPreviewLayer;
BOOL grayscaleMode;
BOOL recordVideo;
BOOL rotateVideo;
AVAssetWriterInput* recordAssetWriterInput;
AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
AVAssetWriter* recordAssetWriter;
CMTime lastSampleTime;
}
@property (nonatomic, assign) id<CvVideoCameraDelegate> delegate;
@property (nonatomic, assign) BOOL grayscaleMode;
@property (nonatomic, assign) BOOL recordVideo;
@property (nonatomic, assign) BOOL rotateVideo;
@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput;
@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
@property (nonatomic, retain) AVAssetWriter* recordAssetWriter;
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
- (void)layoutPreviewLayer;
- (void)saveVideo;
- (NSURL *)videoFileURL;
@end
///////////////////////////////// CvPhotoCamera ///////////////////////////////////////////
@class CvPhotoCamera;
@protocol CvPhotoCameraDelegate <NSObject>
- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image;
- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera;
@end
@interface CvPhotoCamera : CvAbstractCamera
{
AVCaptureStillImageOutput *stillImageOutput;
}
@property (nonatomic, assign) id<CvPhotoCameraDelegate> delegate;
- (void)takePicture;
@end

View File

@@ -206,364 +206,10 @@ CVAPI(void) cvSetOpenGlContext(const char* window_name);
CVAPI(void) cvUpdateWindow(const char* window_name);
/****************************************************************************************\
* Working with Video Files and Cameras *
\****************************************************************************************/
/* "black box" capture structure */
typedef struct CvCapture CvCapture;
/* start capturing frames from video file */
CVAPI(CvCapture*) cvCreateFileCapture( const char* filename );
enum
{
CV_CAP_ANY =0, // autodetect
CV_CAP_MIL =100, // MIL proprietary drivers
CV_CAP_VFW =200, // platform native
CV_CAP_V4L =200,
CV_CAP_V4L2 =200,
CV_CAP_FIREWARE =300, // IEEE 1394 drivers
CV_CAP_FIREWIRE =300,
CV_CAP_IEEE1394 =300,
CV_CAP_DC1394 =300,
CV_CAP_CMU1394 =300,
CV_CAP_STEREO =400, // TYZX proprietary drivers
CV_CAP_TYZX =400,
CV_TYZX_LEFT =400,
CV_TYZX_RIGHT =401,
CV_TYZX_COLOR =402,
CV_TYZX_Z =403,
CV_CAP_QT =500, // QuickTime
CV_CAP_UNICAP =600, // Unicap drivers
CV_CAP_DSHOW =700, // DirectShow (via videoInput)
CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput)
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
CV_CAP_OPENNI =900, // OpenNI (for Kinect)
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
CV_CAP_ANDROID =1000, // Android
CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera
CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera
CV_CAP_XIAPI =1100, // XIMEA Camera API
CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
};
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
CVAPI(CvCapture*) cvCreateCameraCapture( int index );
/* grab a frame, return 1 on success, 0 on fail.
this function is thought to be fast */
CVAPI(int) cvGrabFrame( CvCapture* capture );
/* get the frame grabbed with cvGrabFrame(..)
This function may apply some frame processing like
frame decompression, flipping etc.
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );
/* Just a combination of cvGrabFrame and cvRetrieveFrame
!!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
CVAPI(IplImage*) cvQueryFrame( CvCapture* capture );
/* stop capturing/reading and free resources */
CVAPI(void) cvReleaseCapture( CvCapture** capture );
enum
{
// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
// every feature can have only one mode turned on at a time
CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
CV_CAP_PROP_DC1394_MODE_AUTO = -2,
CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
CV_CAP_PROP_POS_MSEC =0,
CV_CAP_PROP_POS_FRAMES =1,
CV_CAP_PROP_POS_AVI_RATIO =2,
CV_CAP_PROP_FRAME_WIDTH =3,
CV_CAP_PROP_FRAME_HEIGHT =4,
CV_CAP_PROP_FPS =5,
CV_CAP_PROP_FOURCC =6,
CV_CAP_PROP_FRAME_COUNT =7,
CV_CAP_PROP_FORMAT =8,
CV_CAP_PROP_MODE =9,
CV_CAP_PROP_BRIGHTNESS =10,
CV_CAP_PROP_CONTRAST =11,
CV_CAP_PROP_SATURATION =12,
CV_CAP_PROP_HUE =13,
CV_CAP_PROP_GAIN =14,
CV_CAP_PROP_EXPOSURE =15,
CV_CAP_PROP_CONVERT_RGB =16,
CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17,
CV_CAP_PROP_RECTIFICATION =18,
CV_CAP_PROP_MONOCROME =19,
CV_CAP_PROP_SHARPNESS =20,
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
// user can adjust refernce level
// using this feature
CV_CAP_PROP_GAMMA =22,
CV_CAP_PROP_TEMPERATURE =23,
CV_CAP_PROP_TRIGGER =24,
CV_CAP_PROP_TRIGGER_DELAY =25,
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
CV_CAP_PROP_ZOOM =27,
CV_CAP_PROP_FOCUS =28,
CV_CAP_PROP_GUID =29,
CV_CAP_PROP_ISO_SPEED =30,
CV_CAP_PROP_MAX_DC1394 =31,
CV_CAP_PROP_BACKLIGHT =32,
CV_CAP_PROP_PAN =33,
CV_CAP_PROP_TILT =34,
CV_CAP_PROP_ROLL =35,
CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_SETTINGS =37,
CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed
CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed
// OpenNI map generators
CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR,
// Properties of cameras available through OpenNI interfaces
CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100,
CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm
CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag
CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map
// by changing depth generator's view point (if the flag is "on") or
// sets this view point to its normal one (if the flag is "off").
CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,
CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
// Properties of cameras available through GStreamer interface
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
// PVAPI
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated
// Properties of cameras available through XIMEA SDK interface
CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
// Properties for Android cameras
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,
CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
// Properties of cameras available through AVFOUNDATION interface
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,
CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,
// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,
CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007,
// Intel PerC streams
CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR
};
enum
{
// Data given from depth generator.
CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
// Data given from RGB image generator.
CV_CAP_OPENNI_BGR_IMAGE = 5,
CV_CAP_OPENNI_GRAY_IMAGE = 6
};
// Supported output modes of OpenNI image generator
enum
{
CV_CAP_OPENNI_VGA_30HZ = 0,
CV_CAP_OPENNI_SXGA_15HZ = 1,
CV_CAP_OPENNI_SXGA_30HZ = 2,
CV_CAP_OPENNI_QVGA_30HZ = 3,
CV_CAP_OPENNI_QVGA_60HZ = 4
};
//supported by Android camera output formats
enum
{
CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR,
CV_CAP_ANDROID_GREY_FRAME = 1, //Y
CV_CAP_ANDROID_COLOR_FRAME_RGB = 2,
CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3,
CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4
};
// supported Android camera flash modes
enum
{
CV_CAP_ANDROID_FLASH_MODE_AUTO = 0,
CV_CAP_ANDROID_FLASH_MODE_OFF,
CV_CAP_ANDROID_FLASH_MODE_ON,
CV_CAP_ANDROID_FLASH_MODE_RED_EYE,
CV_CAP_ANDROID_FLASH_MODE_TORCH
};
// supported Android camera focus modes
enum
{
CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
CV_CAP_ANDROID_FOCUS_MODE_EDOF,
CV_CAP_ANDROID_FOCUS_MODE_FIXED,
CV_CAP_ANDROID_FOCUS_MODE_INFINITY,
CV_CAP_ANDROID_FOCUS_MODE_MACRO
};
// supported Android camera white balance modes
enum
{
CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT,
CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT,
CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT,
CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT,
CV_CAP_ANDROID_WHITE_BALANCE_SHADE,
CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT,
CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT
};
// supported Android camera antibanding modes
enum
{
CV_CAP_ANDROID_ANTIBANDING_50HZ = 0,
CV_CAP_ANDROID_ANTIBANDING_60HZ,
CV_CAP_ANDROID_ANTIBANDING_AUTO,
CV_CAP_ANDROID_ANTIBANDING_OFF
};
enum
{
CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
CV_CAP_INTELPERC_IMAGE = 3
};
/* retrieve or set capture properties */
CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
/* "black box" video file writer structure */
typedef struct CvVideoWriter CvVideoWriter;
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{
return CV_FOURCC_MACRO(c1, c2, c3, c4);
}
#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */
#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */
/* initialize video file writer */
CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frame_size,
int is_color CV_DEFAULT(1));
/* write frame to video file */
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
/* close video file writer */
CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
/****************************************************************************************\
* Obsolete functions/synonyms *
\****************************************************************************************/
#define cvCaptureFromFile cvCreateFileCapture
#define cvCaptureFromCAM cvCreateCameraCapture
#define cvCaptureFromAVI cvCaptureFromFile
#define cvCreateAVIWriter cvCreateVideoWriter
#define cvWriteToAVI cvWriteFrame
#define cvAddSearchPath(path)
#define cvvInitSystem cvInitSystem
#define cvvNamedWindow cvNamedWindow
@@ -571,12 +217,9 @@ CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
#define cvvResizeWindow cvResizeWindow
#define cvvDestroyWindow cvDestroyWindow
#define cvvCreateTrackbar cvCreateTrackbar
#define cvvLoadImage(name) cvLoadImage((name),1)
#define cvvSaveImage cvSaveImage
#define cvvAddSearchPath cvAddSearchPath
#define cvvWaitKey(name) cvWaitKey(0)
#define cvvWaitKeyEx(name,delay) cvWaitKey(delay)
#define cvvConvertImage cvConvertImage
#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
#define set_preprocess_func cvSetPreprocessFuncWin32
#define set_postprocess_func cvSetPostprocessFuncWin32

View File

@@ -1,49 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/core.hpp"
#import "opencv2/highgui/cap_ios.h"
UIImage* MatToUIImage(const cv::Mat& image);
void UIImageToMat(const UIImage* image,
cv::Mat& m, bool alphaExist = false);

View File

@@ -1,40 +0,0 @@
#include "perf_precomp.hpp"
#if BUILD_WITH_VIDEO_INPUT_SUPPORT
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
typedef perf::TestBaseWithParam<std::string> VideoCapture_Reading;
#if defined(HAVE_MSMF)
// MPEG2 is not supported by Media Foundation yet
// http://social.msdn.microsoft.com/Forums/en-US/mediafoundationdevelopment/thread/39a36231-8c01-40af-9af5-3c105d684429
PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::Values( "highgui/video/big_buck_bunny.avi",
"highgui/video/big_buck_bunny.mov",
"highgui/video/big_buck_bunny.mp4",
"highgui/video/big_buck_bunny.wmv" ) )
#else
PERF_TEST_P(VideoCapture_Reading, ReadFile, testing::Values( "highgui/video/big_buck_bunny.avi",
"highgui/video/big_buck_bunny.mov",
"highgui/video/big_buck_bunny.mp4",
"highgui/video/big_buck_bunny.mpg",
"highgui/video/big_buck_bunny.wmv" ) )
#endif
{
string filename = getDataPath(GetParam());
VideoCapture cap;
TEST_CYCLE() cap.open(filename);
bool dummy = cap.isOpened();
SANITY_CHECK(dummy);
}
#endif // BUILD_WITH_VIDEO_INPUT_SUPPORT

View File

@@ -1,3 +0,0 @@
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(highgui)

View File

@@ -1,40 +0,0 @@
#include "perf_precomp.hpp"
#if BUILD_WITH_VIDEO_OUTPUT_SUPPORT
using namespace std;
using namespace cv;
using namespace perf;
using std::tr1::make_tuple;
using std::tr1::get;
typedef std::tr1::tuple<std::string, bool> VideoWriter_Writing_t;
typedef perf::TestBaseWithParam<VideoWriter_Writing_t> VideoWriter_Writing;
PERF_TEST_P(VideoWriter_Writing, WriteFrame,
testing::Combine( testing::Values( "python/images/QCIF_00.bmp",
"python/images/QCIF_01.bmp",
"python/images/QCIF_02.bmp",
"python/images/QCIF_03.bmp",
"python/images/QCIF_04.bmp",
"python/images/QCIF_05.bmp" ),
testing::Bool()))
{
string filename = getDataPath(get<0>(GetParam()));
bool isColor = get<1>(GetParam());
Mat image = imread(filename, 1);
#if defined(HAVE_MSMF) && !defined(HAVE_VFW) && !defined(HAVE_FFMPEG) // VFW has greater priority
VideoWriter writer(cv::tempfile(".wmv"), VideoWriter::fourcc('W', 'M', 'V', '3'),
25, cv::Size(image.cols, image.rows), isColor);
#else
VideoWriter writer(cv::tempfile(".avi"), VideoWriter::fourcc('X', 'V', 'I', 'D'),
25, cv::Size(image.cols, image.rows), isColor);
#endif
TEST_CYCLE() { image = imread(filename, 1); writer << image; }
bool dummy = writer.isOpened();
SANITY_CHECK(dummy);
}
#endif // BUILD_WITH_VIDEO_OUTPUT_SUPPORT

View File

@@ -1,49 +0,0 @@
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF) || \
defined(HAVE_VFW)
/*defined(HAVE_OPENNI) too specialized */ \
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
#if /*defined(HAVE_XINE) || */\
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF) || \
defined(HAVE_VFW)
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0
#endif
#endif

View File

@@ -1,678 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "cap_intelperc.hpp"
#include "cap_dshow.hpp"
#if defined _M_X64 && defined _MSC_VER && !defined CV_ICC
#pragma optimize("",off)
#pragma warning(disable: 4748)
#endif
namespace cv
{
template<> void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const
{ cvReleaseCapture(&obj); }
template<> void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const
{ cvReleaseVideoWriter(&obj); }
}
/************************* Reading AVIs & Camera data **************************/
CV_IMPL void cvReleaseCapture( CvCapture** pcapture )
{
if( pcapture && *pcapture )
{
delete *pcapture;
*pcapture = 0;
}
}
CV_IMPL IplImage* cvQueryFrame( CvCapture* capture )
{
if(!capture)
return 0;
if(!capture->grabFrame())
return 0;
return capture->retrieveFrame(0);
}
CV_IMPL int cvGrabFrame( CvCapture* capture )
{
return capture ? capture->grabFrame() : 0;
}
CV_IMPL IplImage* cvRetrieveFrame( CvCapture* capture, int idx )
{
return capture ? capture->retrieveFrame(idx) : 0;
}
CV_IMPL double cvGetCaptureProperty( CvCapture* capture, int id )
{
return capture ? capture->getProperty(id) : 0;
}
CV_IMPL int cvSetCaptureProperty( CvCapture* capture, int id, double value )
{
return capture ? capture->setProperty(id, value) : 0;
}
CV_IMPL int cvGetCaptureDomain( CvCapture* capture)
{
return capture ? capture->getCaptureDomain() : 0;
}
/**
* Camera dispatching method: index is the camera number.
* If given an index from 0 to 99, it tries to find the first
* API that can access a given camera index.
* Add multiples of 100 to select an API.
*/
CV_IMPL CvCapture * cvCreateCameraCapture (int index)
{
int domains[] =
{
#ifdef HAVE_MSMF
CV_CAP_MSMF,
#endif
#if 1
CV_CAP_IEEE1394, // identical to CV_CAP_DC1394
#endif
#ifdef HAVE_TYZX
CV_CAP_STEREO,
#endif
#ifdef HAVE_PVAPI
CV_CAP_PVAPI,
#endif
#if 1
CV_CAP_VFW, // identical to CV_CAP_V4L
#endif
#ifdef HAVE_MIL
CV_CAP_MIL,
#endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
CV_CAP_QT,
#endif
#ifdef HAVE_UNICAP
CV_CAP_UNICAP,
#endif
#ifdef HAVE_OPENNI
CV_CAP_OPENNI,
#endif
#ifdef HAVE_ANDROID_NATIVE_CAMERA
CV_CAP_ANDROID,
#endif
#ifdef HAVE_XIMEA
CV_CAP_XIAPI,
#endif
#ifdef HAVE_AVFOUNDATION
CV_CAP_AVFOUNDATION,
#endif
#ifdef HAVE_GIGE_API
CV_CAP_GIGANETIX,
#endif
#ifdef HAVE_INTELPERC
CV_CAP_INTELPERC,
#endif
-1
};
// interpret preferred interface (0 = autodetect)
int pref = (index / 100) * 100;
if (pref)
{
domains[0]=pref;
index %= 100;
domains[1]=-1;
}
// try every possibly installed camera API
for (int i = 0; domains[i] >= 0; i++)
{
#if defined(HAVE_MSMF) || \
defined(HAVE_TYZX) || \
defined(HAVE_VFW) || \
defined(HAVE_LIBV4L) || \
defined(HAVE_CAMV4L) || \
defined(HAVE_CAMV4L2) || \
defined(HAVE_VIDEOIO) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_DC1394_2) || \
defined(HAVE_DC1394) || \
defined(HAVE_CMU1394) || \
defined(HAVE_MIL) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_UNICAP) || \
defined(HAVE_PVAPI) || \
defined(HAVE_OPENNI) || \
defined(HAVE_XIMEA) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_ANDROID_NATIVE_CAMERA) || \
defined(HAVE_GIGE_API) || \
defined(HAVE_INTELPERC) || \
(0)
// local variable to memorize the captured device
CvCapture *capture;
#endif
switch (domains[i])
{
#ifdef HAVE_MSMF
case CV_CAP_MSMF:
capture = cvCreateCameraCapture_MSMF (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_TYZX
case CV_CAP_STEREO:
capture = cvCreateCameraCapture_TYZX (index);
if (capture)
return capture;
break;
#endif
case CV_CAP_VFW:
#ifdef HAVE_VFW
capture = cvCreateCameraCapture_VFW (index);
if (capture)
return capture;
#endif
#if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO
capture = cvCreateCameraCapture_V4L (index);
if (capture)
return capture;
#endif
#ifdef HAVE_GSTREAMER
capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L2, 0);
if (capture)
return capture;
capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L, 0);
if (capture)
return capture;
#endif
break; //CV_CAP_VFW
case CV_CAP_FIREWIRE:
#ifdef HAVE_DC1394_2
capture = cvCreateCameraCapture_DC1394_2 (index);
if (capture)
return capture;
#endif
#ifdef HAVE_DC1394
capture = cvCreateCameraCapture_DC1394 (index);
if (capture)
return capture;
#endif
#ifdef HAVE_CMU1394
capture = cvCreateCameraCapture_CMU (index);
if (capture)
return capture;
#endif
#if defined(HAVE_GSTREAMER) && 0
//Re-enable again when gstreamer 1394 support will land in the backend code
capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_1394, 0);
if (capture)
return capture;
#endif
break; //CV_CAP_FIREWIRE
#ifdef HAVE_MIL
case CV_CAP_MIL:
capture = cvCreateCameraCapture_MIL (index);
if (capture)
return capture;
break;
#endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
case CV_CAP_QT:
capture = cvCreateCameraCapture_QT (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_UNICAP
case CV_CAP_UNICAP:
capture = cvCreateCameraCapture_Unicap (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_PVAPI
case CV_CAP_PVAPI:
capture = cvCreateCameraCapture_PvAPI (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_OPENNI
case CV_CAP_OPENNI:
capture = cvCreateCameraCapture_OpenNI (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_ANDROID_NATIVE_CAMERA
case CV_CAP_ANDROID:
capture = cvCreateCameraCapture_Android (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_XIMEA
case CV_CAP_XIAPI:
capture = cvCreateCameraCapture_XIMEA (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_AVFOUNDATION
case CV_CAP_AVFOUNDATION:
capture = cvCreateCameraCapture_AVFoundation (index);
if (capture)
return capture;
break;
#endif
#ifdef HAVE_GIGE_API
case CV_CAP_GIGANETIX:
capture = cvCreateCameraCapture_Giganetix (index);
if (capture)
return capture;
break; // CV_CAP_GIGANETIX
#endif
}
}
// failed open a camera
return 0;
}
/**
* Videoreader dispatching method: it tries to find the first
* API that can access a given filename.
*/
CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
{
CvCapture * result = 0;
if (! result)
result = cvCreateFileCapture_FFMPEG_proxy (filename);
#ifdef HAVE_VFW
if (! result)
result = cvCreateFileCapture_VFW (filename);
#endif
#ifdef HAVE_MSMF
if (! result)
result = cvCreateFileCapture_MSMF (filename);
#endif
#ifdef HAVE_XINE
if (! result)
result = cvCreateFileCapture_XINE (filename);
#endif
#ifdef HAVE_GSTREAMER
if (! result)
result = cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename);
#endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
if (! result)
result = cvCreateFileCapture_QT (filename);
#endif
#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateFileCapture_AVFoundation (filename);
#endif
#ifdef HAVE_OPENNI
if (! result)
result = cvCreateFileCapture_OpenNI (filename);
#endif
if (! result)
result = cvCreateFileCapture_Images (filename);
return result;
}
/**
* Videowriter dispatching method: it tries to find the first
* API that can write a given stream.
*/
CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color )
{
//CV_FUNCNAME( "cvCreateVideoWriter" );
CvVideoWriter *result = 0;
if(!fourcc || !fps)
result = cvCreateVideoWriter_Images(filename);
if(!result)
result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
#ifdef HAVE_VFW
if(!result)
result = cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_MSMF
if (!result)
result = cvCreateVideoWriter_MSMF(filename, fourcc, fps, frameSize, is_color);
#endif
/* #ifdef HAVE_XINE
if(!result)
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
#endif
*/
#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color);
#endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
if(!result)
result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_GSTREAMER
if (! result)
result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
#endif
if(!result)
result = cvCreateVideoWriter_Images(filename);
return result;
}
CV_IMPL int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
{
return writer ? writer->writeFrame(image) : 0;
}
CV_IMPL void cvReleaseVideoWriter( CvVideoWriter** pwriter )
{
if( pwriter && *pwriter )
{
delete *pwriter;
*pwriter = 0;
}
}
namespace cv
{
VideoCapture::VideoCapture()
{}
VideoCapture::VideoCapture(const String& filename)
{
open(filename);
}
VideoCapture::VideoCapture(int device)
{
open(device);
}
VideoCapture::~VideoCapture()
{
icap.release();
cap.release();
}
bool VideoCapture::open(const String& filename)
{
if (isOpened()) release();
cap.reset(cvCreateFileCapture(filename.c_str()));
return isOpened();
}
bool VideoCapture::open(int device)
{
if (isOpened()) release();
icap = createCameraCapture(device);
if (!icap.empty())
return true;
cap.reset(cvCreateCameraCapture(device));
return isOpened();
}
bool VideoCapture::isOpened() const
{
return (!cap.empty() || !icap.empty());
}
void VideoCapture::release()
{
icap.release();
cap.release();
}
bool VideoCapture::grab()
{
if (!icap.empty())
return icap->grabFrame();
return cvGrabFrame(cap) != 0;
}
bool VideoCapture::retrieve(OutputArray image, int channel)
{
if (!icap.empty())
return icap->retrieveFrame(channel, image);
IplImage* _img = cvRetrieveFrame(cap, channel);
if( !_img )
{
image.release();
return false;
}
if(_img->origin == IPL_ORIGIN_TL)
cv::cvarrToMat(_img).copyTo(image);
else
{
Mat temp = cv::cvarrToMat(_img);
flip(temp, image, 0);
}
return true;
}
bool VideoCapture::read(OutputArray image)
{
if(grab())
retrieve(image);
else
image.release();
return !image.empty();
}
VideoCapture& VideoCapture::operator >> (Mat& image)
{
read(image);
return *this;
}
VideoCapture& VideoCapture::operator >> (UMat& image)
{
read(image);
return *this;
}
bool VideoCapture::set(int propId, double value)
{
if (!icap.empty())
return icap->setProperty(propId, value);
return cvSetCaptureProperty(cap, propId, value) != 0;
}
double VideoCapture::get(int propId)
{
if (!icap.empty())
return icap->getProperty(propId);
return cvGetCaptureProperty(cap, propId);
}
Ptr<IVideoCapture> VideoCapture::createCameraCapture(int index)
{
int domains[] =
{
#ifdef HAVE_DSHOW
CV_CAP_DSHOW,
#endif
#ifdef HAVE_INTELPERC
CV_CAP_INTELPERC,
#endif
-1, -1
};
// interpret preferred interface (0 = autodetect)
int pref = (index / 100) * 100;
if (pref)
{
domains[0]=pref;
index %= 100;
domains[1]=-1;
}
// try every possibly installed camera API
for (int i = 0; domains[i] >= 0; i++)
{
#if defined(HAVE_DSHOW) || \
defined(HAVE_INTELPERC) || \
(0)
Ptr<IVideoCapture> capture;
switch (domains[i])
{
#ifdef HAVE_DSHOW
case CV_CAP_DSHOW:
capture = Ptr<IVideoCapture>(new cv::VideoCapture_DShow(index));
if (capture)
return capture;
break; // CV_CAP_DSHOW
#endif
#ifdef HAVE_INTELPERC
case CV_CAP_INTELPERC:
capture = Ptr<IVideoCapture>(new cv::VideoCapture_IntelPerC());
if (capture)
return capture;
break; // CV_CAP_INTEL_PERC
#endif
}
#endif
}
// failed open a camera
return Ptr<IVideoCapture>();
}
VideoWriter::VideoWriter()
{}
VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
open(filename, _fourcc, fps, frameSize, isColor);
}
void VideoWriter::release()
{
writer.release();
}
VideoWriter::~VideoWriter()
{
release();
}
bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
writer.reset(cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor));
return isOpened();
}
bool VideoWriter::isOpened() const
{
return !writer.empty();
}
void VideoWriter::write(const Mat& image)
{
IplImage _img = image;
cvWriteFrame(writer, &_img);
}
VideoWriter& VideoWriter::operator << (const Mat& image)
{
write(image);
return *this;
}
int VideoWriter::fourcc(char c1, char c2, char c3, char c4)
{
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
}
}

View File

@@ -1,554 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_ANDROID_NATIVE_CAMERA
#include <opencv2/imgproc.hpp>
#include <pthread.h>
#include <android/log.h>
#include <camera_activity.hpp>
#undef LOG_TAG
#undef LOGD
#undef LOGE
#undef LOGI
#define LOG_TAG "OpenCV::camera"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
class HighguiAndroidCameraActivity;
class CvCapture_Android : public CvCapture
{
public:
CvCapture_Android(int);
virtual ~CvCapture_Android();
virtual double getProperty(int propIdx);
virtual bool setProperty(int probIdx, double propVal);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int outputType);
virtual int getCaptureDomain() { return CV_CAP_ANDROID; }
bool isOpened() const;
protected:
struct OutputMap
{
public:
cv::Mat mat;
IplImage* getIplImagePtr();
private:
IplImage iplHeader;
};
CameraActivity* m_activity;
//raw from camera
int m_width;
int m_height;
cv::Mat m_frameYUV420;
cv::Mat m_frameYUV420next;
enum YUVformat
{
noformat = 0,
yuv420sp,
yvu420sp,
yuvUnknown
};
YUVformat m_frameFormat;
void setFrame(const void* buffer, int bufferSize);
private:
bool m_isOpened;
bool m_CameraParamsChanged;
//frames counter for statistics
int m_framesGrabbed;
//cached converted frames
OutputMap m_frameGray;
OutputMap m_frameColor;
bool m_hasGray;
bool m_hasColor;
enum CvCapture_Android_DataState {
CVCAPTURE_ANDROID_STATE_NO_FRAME=0,
CVCAPTURE_ANDROID_STATE_HAS_NEW_FRAME_UNGRABBED,
CVCAPTURE_ANDROID_STATE_HAS_FRAME_GRABBED
};
volatile CvCapture_Android_DataState m_dataState;
//synchronization
pthread_mutex_t m_nextFrameMutex;
pthread_cond_t m_nextFrameCond;
volatile bool m_waitingNextFrame;
volatile bool m_shouldAutoGrab;
void prepareCacheForYUV(int width, int height);
bool convertYUV2Grey(int width, int height, const unsigned char* yuv, cv::Mat& resmat);
bool convertYUV2BGR(int width, int height, const unsigned char* yuv, cv::Mat& resmat, bool inRGBorder, bool withAlpha);
friend class HighguiAndroidCameraActivity;
};
class HighguiAndroidCameraActivity : public CameraActivity
{
public:
HighguiAndroidCameraActivity(CvCapture_Android* capture)
{
m_capture = capture;
m_framesReceived = 0;
}
virtual bool onFrameBuffer(void* buffer, int bufferSize)
{
if(isConnected() && buffer != 0 && bufferSize > 0)
{
m_framesReceived++;
if (m_capture->m_waitingNextFrame || m_capture->m_shouldAutoGrab)
{
pthread_mutex_lock(&m_capture->m_nextFrameMutex);
m_capture->setFrame(buffer, bufferSize);
pthread_cond_broadcast(&m_capture->m_nextFrameCond);
pthread_mutex_unlock(&m_capture->m_nextFrameMutex);
}
return true;
}
return false;
}
void LogFramesRate()
{
LOGI("FRAMES received: %d grabbed: %d", m_framesReceived, m_capture->m_framesGrabbed);
}
private:
CvCapture_Android* m_capture;
int m_framesReceived;
};
IplImage* CvCapture_Android::OutputMap::getIplImagePtr()
{
if( mat.empty() )
return 0;
iplHeader = IplImage(mat);
return &iplHeader;
}
CvCapture_Android::CvCapture_Android(int cameraId)
{
//defaults
m_width = 0;
m_height = 0;
m_activity = 0;
m_isOpened = false;
// m_frameYUV420 = 0;
// m_frameYUV420next = 0;
m_hasGray = false;
m_hasColor = false;
m_dataState = CVCAPTURE_ANDROID_STATE_NO_FRAME;
m_waitingNextFrame = false;
m_shouldAutoGrab = false;
m_framesGrabbed = 0;
m_CameraParamsChanged = false;
m_frameFormat = noformat;
//try connect to camera
LOGD("CvCapture_Android::CvCapture_Android(%i)", cameraId);
m_activity = new HighguiAndroidCameraActivity(this);
if (m_activity == 0) return;
pthread_mutex_init(&m_nextFrameMutex, NULL);
pthread_cond_init (&m_nextFrameCond, NULL);
CameraActivity::ErrorCode errcode = m_activity->connect(cameraId);
if(errcode == CameraActivity::NO_ERROR)
m_isOpened = true;
else
{
LOGE("Native_camera returned opening error: %d", errcode);
delete m_activity;
m_activity = 0;
}
}
bool CvCapture_Android::isOpened() const
{
return m_isOpened;
}
CvCapture_Android::~CvCapture_Android()
{
if (m_activity)
{
((HighguiAndroidCameraActivity*)m_activity)->LogFramesRate();
pthread_mutex_lock(&m_nextFrameMutex);
// unsigned char *tmp1=m_frameYUV420;
// unsigned char *tmp2=m_frameYUV420next;
// m_frameYUV420 = 0;
// m_frameYUV420next = 0;
// delete tmp1;
// delete tmp2;
m_dataState=CVCAPTURE_ANDROID_STATE_NO_FRAME;
pthread_cond_broadcast(&m_nextFrameCond);
pthread_mutex_unlock(&m_nextFrameMutex);
//m_activity->disconnect() will be automatically called inside destructor;
delete m_activity;
m_activity = 0;
pthread_mutex_destroy(&m_nextFrameMutex);
pthread_cond_destroy(&m_nextFrameCond);
}
}
double CvCapture_Android::getProperty( int propIdx )
{
switch ( propIdx )
{
case CV_CAP_PROP_FRAME_WIDTH:
return (double)m_activity->getFrameWidth();
case CV_CAP_PROP_FRAME_HEIGHT:
return (double)m_activity->getFrameHeight();
case CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_SUPPORTED_PREVIEW_SIZES_STRING);
case CV_CAP_PROP_PREVIEW_FORMAT:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_PREVIEW_FORMAT_STRING);
case CV_CAP_PROP_FPS:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FPS);
case CV_CAP_PROP_EXPOSURE:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_EXPOSURE);
case CV_CAP_PROP_ANDROID_FLASH_MODE:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FLASH_MODE);
case CV_CAP_PROP_ANDROID_FOCUS_MODE:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_MODE);
case CV_CAP_PROP_ANDROID_WHITE_BALANCE:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_WHITE_BALANCE);
case CV_CAP_PROP_ANDROID_ANTIBANDING:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_ANTIBANDING);
case CV_CAP_PROP_ANDROID_FOCAL_LENGTH:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH);
case CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR);
case CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_OPTIMAL);
case CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR);
case CV_CAP_PROP_ANDROID_EXPOSE_LOCK:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK);
case CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK);
default:
CV_Error( CV_StsOutOfRange, "Failed attempt to GET unsupported camera property." );
break;
}
return -1.0;
}
bool CvCapture_Android::setProperty( int propIdx, double propValue )
{
bool res = false;
if( isOpened() )
{
switch ( propIdx )
{
case CV_CAP_PROP_FRAME_WIDTH:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_FRAMEWIDTH, propValue);
break;
case CV_CAP_PROP_FRAME_HEIGHT:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT, propValue);
break;
case CV_CAP_PROP_AUTOGRAB:
m_shouldAutoGrab=(propValue != 0);
break;
case CV_CAP_PROP_EXPOSURE:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_EXPOSURE, propValue);
break;
case CV_CAP_PROP_ANDROID_FLASH_MODE:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_FLASH_MODE, propValue);
break;
case CV_CAP_PROP_ANDROID_FOCUS_MODE:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_FOCUS_MODE, propValue);
break;
case CV_CAP_PROP_ANDROID_WHITE_BALANCE:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_WHITE_BALANCE, propValue);
break;
case CV_CAP_PROP_ANDROID_ANTIBANDING:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_ANTIBANDING, propValue);
break;
case CV_CAP_PROP_ANDROID_EXPOSE_LOCK:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK, propValue);
break;
case CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK, propValue);
break;
default:
CV_Error( CV_StsOutOfRange, "Failed attempt to SET unsupported camera property." );
return false;
}
// Only changes in frame size require camera restart
if ((propIdx == CV_CAP_PROP_FRAME_WIDTH) || (propIdx == CV_CAP_PROP_FRAME_HEIGHT))
{ // property for highgui class CvCapture_Android only
m_CameraParamsChanged = true;
}
res = true;
}
return res;
}
bool CvCapture_Android::grabFrame()
{
if( !isOpened() ) {
LOGE("CvCapture_Android::grabFrame(): camera is not opened");
return false;
}
bool res=false;
pthread_mutex_lock(&m_nextFrameMutex);
if (m_CameraParamsChanged)
{
m_activity->applyProperties();
m_CameraParamsChanged = false;
m_dataState = CVCAPTURE_ANDROID_STATE_NO_FRAME;//we will wait new frame
}
if (m_dataState != CVCAPTURE_ANDROID_STATE_HAS_NEW_FRAME_UNGRABBED)
{
m_waitingNextFrame = true;
pthread_cond_wait(&m_nextFrameCond, &m_nextFrameMutex);
}
if (m_dataState == CVCAPTURE_ANDROID_STATE_HAS_NEW_FRAME_UNGRABBED)
{
//LOGD("CvCapture_Android::grabFrame: get new frame");
//swap current and new frames
cv::swap(m_frameYUV420, m_frameYUV420next);
//discard cached frames
m_hasGray = false;
m_hasColor = false;
m_dataState=CVCAPTURE_ANDROID_STATE_HAS_FRAME_GRABBED;
m_framesGrabbed++;
res=true;
} else {
LOGE("CvCapture_Android::grabFrame: NO new frame");
}
int res_unlock=pthread_mutex_unlock(&m_nextFrameMutex);
if (res_unlock) {
LOGE("Error in CvCapture_Android::grabFrame: pthread_mutex_unlock returned %d --- probably, this object has been destroyed", res_unlock);
return false;
}
return res;
}
IplImage* CvCapture_Android::retrieveFrame( int outputType )
{
IplImage* image = NULL;
cv::Mat m_frameYUV420_ref = m_frameYUV420;
unsigned char *current_frameYUV420=m_frameYUV420_ref.ptr();
//Attention! all the operations in this function below should occupy less time than the period between two frames from camera
if (NULL != current_frameYUV420)
{
if (m_frameFormat == noformat)
{
union {double prop; const char* name;} u;
u.prop = getProperty(CV_CAP_PROP_PREVIEW_FORMAT);
if (0 == strcmp(u.name, "yuv420sp"))
m_frameFormat = yuv420sp;
else if (0 == strcmp(u.name, "yvu420sp"))
m_frameFormat = yvu420sp;
else
m_frameFormat = yuvUnknown;
}
switch(outputType)
{
case CV_CAP_ANDROID_GREY_FRAME:
if (!m_hasGray)
if (!(m_hasGray = convertYUV2Grey(m_width, m_height, current_frameYUV420, m_frameGray.mat)))
return NULL;
image = m_frameGray.getIplImagePtr();
break;
case CV_CAP_ANDROID_COLOR_FRAME_BGR: case CV_CAP_ANDROID_COLOR_FRAME_RGB:
if (!m_hasColor)
if (!(m_hasColor = convertYUV2BGR(m_width, m_height, current_frameYUV420, m_frameColor.mat, outputType == CV_CAP_ANDROID_COLOR_FRAME_RGB, false)))
return NULL;
image = m_frameColor.getIplImagePtr();
break;
case CV_CAP_ANDROID_COLOR_FRAME_BGRA: case CV_CAP_ANDROID_COLOR_FRAME_RGBA:
if (!m_hasColor)
if (!(m_hasColor = convertYUV2BGR(m_width, m_height, current_frameYUV420, m_frameColor.mat, outputType == CV_CAP_ANDROID_COLOR_FRAME_RGBA, true)))
return NULL;
image = m_frameColor.getIplImagePtr();
break;
default:
LOGE("Unsupported frame output format: %d", outputType);
CV_Error( CV_StsOutOfRange, "Output frame format is not supported." );
image = NULL;
break;
}
}
return image;
}
//Attention: this method should be called inside pthread_mutex_lock(m_nextFrameMutex) only
void CvCapture_Android::setFrame(const void* buffer, int bufferSize)
{
int width = m_activity->getFrameWidth();
int height = m_activity->getFrameHeight();
int expectedSize = (width * height * 3) >> 1;
if ( expectedSize != bufferSize)
{
LOGE("ERROR reading YUV buffer: width=%d, height=%d, size=%d, receivedSize=%d", width, height, expectedSize, bufferSize);
return;
}
//allocate memory if needed
prepareCacheForYUV(width, height);
//copy data
cv::Mat m_frameYUV420next_ref = m_frameYUV420next;
memcpy(m_frameYUV420next_ref.ptr(), buffer, bufferSize);
// LOGD("CvCapture_Android::setFrame -- memcpy is done");
// ((HighguiAndroidCameraActivity*)m_activity)->LogFramesRate();
m_dataState = CVCAPTURE_ANDROID_STATE_HAS_NEW_FRAME_UNGRABBED;
m_waitingNextFrame = false;//set flag that no more frames required at this moment
}
//Attention: this method should be called inside pthread_mutex_lock(m_nextFrameMutex) only
void CvCapture_Android::prepareCacheForYUV(int width, int height)
{
if (width != m_width || height != m_height)
{
LOGD("CvCapture_Android::prepareCacheForYUV: Changing size of buffers: from width=%d height=%d to width=%d height=%d", m_width, m_height, width, height);
m_width = width;
m_height = height;
/*
unsigned char *tmp = m_frameYUV420next;
m_frameYUV420next = new unsigned char [width * height * 3 / 2];
if (tmp != NULL)
{
delete[] tmp;
}
tmp = m_frameYUV420;
m_frameYUV420 = new unsigned char [width * height * 3 / 2];
if (tmp != NULL)
{
delete[] tmp;
}*/
m_frameYUV420.create(height * 3 / 2, width, CV_8UC1);
m_frameYUV420next.create(height * 3 / 2, width, CV_8UC1);
}
}
bool CvCapture_Android::convertYUV2Grey(int width, int height, const unsigned char* yuv, cv::Mat& resmat)
{
if (yuv == 0) return false;
if (m_frameFormat != yuv420sp && m_frameFormat != yvu420sp) return false;
#define ALWAYS_COPY_GRAY 0
#if ALWAYS_COPY_GRAY
resmat.create(height, width, CV_8UC1);
unsigned char* matBuff = resmat.ptr<unsigned char> (0);
memcpy(matBuff, yuv, width * height);
#else
resmat = cv::Mat(height, width, CV_8UC1, (void*)yuv);
#endif
return !resmat.empty();
}
bool CvCapture_Android::convertYUV2BGR(int width, int height, const unsigned char* yuv, cv::Mat& resmat, bool inRGBorder, bool withAlpha)
{
if (yuv == 0) return false;
if (m_frameFormat != yuv420sp && m_frameFormat != yvu420sp) return false;
CV_Assert(width % 2 == 0 && height % 2 == 0);
cv::Mat src(height*3/2, width, CV_8UC1, (void*)yuv);
if (m_frameFormat == yuv420sp)
cv::cvtColor(src, resmat, inRGBorder ? CV_YUV420sp2RGB : CV_YUV420sp2BGR, withAlpha ? 4 : 3);
else if (m_frameFormat == yvu420sp)
cv::cvtColor(src, resmat, inRGBorder ? CV_YUV2RGB_NV21 : CV_YUV2BGR_NV12, withAlpha ? 4 : 3);
return !resmat.empty();
}
CvCapture* cvCreateCameraCapture_Android( int cameraId )
{
CvCapture_Android* capture = new CvCapture_Android(cameraId);
if( capture->isOpened() )
return capture;
delete capture;
return 0;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,551 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef WIN32
/****************** Capturing video from camera via CMU lib *******************/
#ifdef HAVE_CMU1394
// This firewire capability added by Philip Gruebele (pgruebele@cox.net).
// For this to work you need to install the CMU firewire DCAM drivers,
// located at http://www-2.cs.cmu.edu/~iwan/1394/.
#include "1394camera.h"
class CvCaptureCAM_CMU : public CvCapture
{
public:
CvCaptureCAM_CMU()
{
index = -1;
image = 0;
}
virtual ~CvCaptureCAM_CMU()
{
close();
}
virtual bool open(int cameraId);
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
protected:
C1394Camera* camera();
CvSize getSize();
int getDepth();
int getNChannels();
bool setVideoSize(int, int);
bool setMode(int mode);
bool setFrameRate(int rate);
bool setFormat(int format);
int fps; // 0-5
int mode; // 0-7
int format; // 0-2, 7 ?
int index;
IplImage* image;
};
// CMU 1394 camera stuff.
// This firewire capability added by Philip Gruebele (pgruebele@cox.net)
// and modified by Roman Stanchak (rstanchak@yahoo.com).
// For this to work you need to install the CMU firewire DCAM drivers,
// located at http://www-2.cs.cmu.edu/~iwan/1394/.
#define CMU_MAX_CAMERAS 20
int CMU_numCameras = 0;
int CMU_numActiveCameras = 0;
bool CMU_useCameraFlags[CMU_MAX_CAMERAS];
C1394Camera *CMU_theCamera = 0;
// stupid defines for mode, format, FPS
#define CV_CAP_IEEE1394_FPS_1_875 0
#define CV_CAP_IEEE1394_FPS_3_75 1
#define CV_CAP_IEEE1394_FPS_7_5 2
#define CV_CAP_IEEE1394_FPS_15 3
#define CV_CAP_IEEE1394_FPS_30 4
#define CV_CAP_IEEE1394_FPS_60 5
// index by size, color
#define CV_CAP_IEEE1394_COLOR_MONO 0
#define CV_CAP_IEEE1394_COLOR_MONO16 1
#define CV_CAP_IEEE1394_COLOR_YUV444 2
#define CV_CAP_IEEE1394_COLOR_YUV422 3
#define CV_CAP_IEEE1394_COLOR_YUV411 4
#define CV_CAP_IEEE1394_COLOR_RGB 5
#define CV_CAP_IEEE1394_SIZE_160X120 0
#define CV_CAP_IEEE1394_SIZE_320X240 1
#define CV_CAP_IEEE1394_SIZE_640X480 2
#define CV_CAP_IEEE1394_SIZE_800X600 3
#define CV_CAP_IEEE1394_SIZE_1024X768 4
#define CV_CAP_IEEE1394_SIZE_1280X960 5
#define CV_CAP_IEEE1394_SIZE_1600X1200 6
// given color, size, output format
// 1 16 444 422 411 RGB
static char CV_CAP_IEEE1394_FORMAT[7][6] =
{
{-1, -1, 0, -1, -1, -1}, // 160x120
{-1, -1, -1, 0, -1, -1}, // 320x240
{ 0, 0, -1, 0, 0, 0}, // 640x480
{ 1, 1, -1, 1, -1, 1}, // 800x600
{ 1, 1, -1, 1, -1, 1}, // 1024x768
{ 2, 2, -1, 2, -1, 2}, // 1280x960
{ 2, 2, -1, 2, -1, 2} // 1600x1200
};
// given color, size, output corresponding mode
static char CV_CAP_IEEE1394_MODE[7][6] =
{
{-1, -1, 0, -1, -1, -1}, // 160x120
{-1, -1, -1, 1, -1, -1}, // 320x240
{ 5, 6, -1, 3, 2, 4}, // 640x480
{ 2, 6, -1, 0, -1, 1}, // 800x600
{ 5, 7, -1, 3, -1, 4}, // 1024x768
{ 2, 6, -1, 0, -1, 1}, // 1280x960
{ 5, 7, -1, 3, -1, 4} // 1600x1200
};
// given format, mode, return COLOR
static char CV_CAP_IEEE1394_COLOR[2][8] =
{
{
CV_CAP_IEEE1394_COLOR_YUV444,
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_YUV411,
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_RGB,
CV_CAP_IEEE1394_COLOR_MONO,
CV_CAP_IEEE1394_COLOR_MONO16
},
{
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_RGB,
CV_CAP_IEEE1394_COLOR_MONO,
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_RGB,
CV_CAP_IEEE1394_COLOR_MONO,
CV_CAP_IEEE1394_COLOR_MONO16,
CV_CAP_IEEE1394_COLOR_MONO16
}
};
// convert frame rate to suitable enum
/*static int icvFrameRateToIndex_CMU(double framerate){
if(framerate > 30) return CV_CAP_IEEE1394_FPS_60;
else if(framerate > 15) return CV_CAP_IEEE1394_FPS_30;
else if(framerate > 7.5) return CV_CAP_IEEE1394_FPS_15;
else if(framerate > 3.75) return CV_CAP_IEEE1394_FPS_7_5;
else if(framerate > 1.875) return CV_CAP_IEEE1394_FPS_3_75;
return CV_CAP_IEEE1394_FPS_1_875;
}*/
#if _MSC_VER >= 1200
#pragma comment(lib,"1394camera.lib")
#endif
C1394Camera* CvCaptureCAM_CMU::camera()
{
return CMU_theCamera && index >= 0 ? &CMU_theCamera[index] : 0;
}
// return the size of the image
CvSize CvCaptureCAM_CMU::getSize()
{
C1394Camera* cmucam = camera();
unsigned long width = 0, height = 0;
cmucam->GetVideoFrameDimensions( &width, &height );
return cvSize((int)width, (int)height);
}
// return the opencv depth flag corresponding to the camera format
int CvCaptureCAM_CMU::getDepth()
{
C1394Camera* cmucam = camera();
int format = cmucam->GetVideoFormat();
int mode = cmucam->GetVideoMode();
// TODO
if( format==7 ) {
assert(0);
return 1;
}
// irrelvant to depth
if( format > 1 )
format = 1;
if( CV_CAP_IEEE1394_COLOR[format][mode]==CV_CAP_IEEE1394_COLOR_MONO16 )
return IPL_DEPTH_16S;
return IPL_DEPTH_8U;
}
// return the number of channels for camera
int CvCaptureCAM_CMU::getNChannels()
{
C1394Camera* cmucam = camera();
int format = cmucam->GetVideoFormat();
int mode = cmucam->GetVideoMode();
if( format==7 ){
assert(0);
return 1;
}
// irrelvant to nchannels
if( format > 1 )
format = 1;
switch(CV_CAP_IEEE1394_COLOR[format][mode]){
case CV_CAP_IEEE1394_COLOR_RGB:
return 3;
case CV_CAP_IEEE1394_COLOR_MONO:
case CV_CAP_IEEE1394_COLOR_MONO16:
return 1;
case CV_CAP_IEEE1394_COLOR_YUV422:
case CV_CAP_IEEE1394_COLOR_YUV444:
case CV_CAP_IEEE1394_COLOR_YUV411:
return 3;
default:
;
}
return -1;
}
bool CvCaptureCAM_CMU::open( int _index )
{
close();
// if first time, then allocate all available cameras
if( CMU_numCameras == 0 )
{
CMU_numActiveCameras = 0;
CMU_theCamera = new C1394Camera[CMU_MAX_CAMERAS];
////////////////////////////////////////////////////////////////////////////////////////////////////////
// create all cameras
try
{
// create camera0
if( CMU_theCamera[0].CheckLink() != CAM_SUCCESS )
throw 1;
// we have one pin per camera
CMU_numCameras = CMU_theCamera[0].GetNumberCameras();
// allocate remaining cameras
for(int i = 1; i < CMU_numCameras && i<CMU_MAX_CAMERAS; i++ )
{
CMU_useCameraFlags[i] = false;
if (CMU_theCamera[i].CheckLink() != CAM_SUCCESS)
throw 1;
}
}
catch (...)
{
// free any allocated cameras
// ...
CMU_numCameras = 0;
return false;
}
}
try
{
CvSize size;
// pick first unused camera
if(_index==-1){
for(int i = 0; i < CMU_numCameras; i++ )
{
if( !CMU_useCameraFlags[i] ){
_index = i;
break;
}
}
}
// no empty camera found
if (_index==-1)
throw 1;
if (CMU_theCamera[_index].SelectCamera(_index) != CAM_SUCCESS)
throw 2;
if (CMU_theCamera[_index].InitCamera() != CAM_SUCCESS)
throw 3;
// set initial format -- try to pick best frame rate first, then color, then size
bool found_format = false;
for (int rate=5; rate>=0 && !found_format; rate--)
{
for (int color=CV_CAP_IEEE1394_COLOR_RGB; color>=0 && !found_format; color--)
{
for (int size=CV_CAP_IEEE1394_SIZE_1600X1200; size>=0 && !found_format; size--)
{
int format = CV_CAP_IEEE1394_FORMAT[size][color];
int mode = CV_CAP_IEEE1394_MODE[size][color];
if (format!=-1 && mode!=-1 &&
CMU_theCamera[_index].HasVideoFrameRate(format,mode,rate))
{
CMU_theCamera[_index].SetVideoFormat(format);
CMU_theCamera[_index].SetVideoMode(mode);
CMU_theCamera[_index].SetVideoFrameRate(rate);
found_format = (CMU_theCamera[_index].StartImageAcquisition() == CAM_SUCCESS);
}
}
}
}
// try format 7
if(!found_format){
CMU_theCamera[_index].SetVideoFormat(7);
CMU_theCamera[_index].SetVideoMode(0);
if(CMU_theCamera[_index].StartImageAcquisition() != CAM_SUCCESS){
// no format found
throw 9;
}
}
index = _index;
size = getSize();
// allocate image frame
image = cvCreateImage( size, 8, 3 );
cvZero(image);
// successfully activated camera
CMU_numActiveCameras++;
CMU_useCameraFlags[_index] = true;
}
catch ( int )
{
return false;
}
return true;
}
void CvCaptureCAM_CMU::close()
{
C1394Camera* cmucam = camera();
if( cmucam )
{
cvReleaseImage( &image );
cmucam->StopImageAcquisition();
CMU_useCameraFlags[index] = false;
index = -1;
if( --CMU_numActiveCameras == 0 )
{
delete[] CMU_theCamera;
CMU_theCamera = 0;
CMU_numCameras = 0;
}
}
}
bool CvCaptureCAM_CMU::grabFrame()
{
C1394Camera* cmucam = camera();
return cmucam ? cmucam->AcquireImage() == CAM_SUCCESS : false;
}
/*static void swapRedBlue(IplImage * im)
{
uchar * ptr = (uchar *) im->imageData;
uchar t;
for(int i=0; i<im->height; i++){
ptr = (uchar *) im->imageData+im->widthStep*i;
for(int j=0; j<im->width; j++){
t = ptr[0];
ptr[0] = ptr[2];
ptr[2] = t;
ptr+=3;
}
}
}*/
IplImage* CvCaptureCAM_CMU::retrieveFrame(int)
{
C1394Camera* cmucam = camera();
if( !cmucam )
return 0;
cmucam->getRGB((uchar*)image->imageData, image->imageSize);
cvConvertImage( image, image, CV_CVTIMG_SWAP_RB );
return image;
}
double CvCaptureCAM_CMU::getProperty( int property_id )
{
C1394Camera* cmucam = camera();
if( !cmucam )
return 0;
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
return image->width;
case CV_CAP_PROP_FRAME_HEIGHT:
return image->height;
case CV_CAP_PROP_FPS:
return cmucam->GetVideoFrameRate();
case CV_CAP_PROP_MODE:
return cmucam->GetVideoMode();
case CV_CAP_PROP_FORMAT:
return cmucam->GetVideoFormat();
}
return 0;
}
bool CvCaptureCAM_CMU::setVideoSize(int, int)
{
return false;
}
bool CvCaptureCAM_CMU::setMode(int mode)
{
int format;
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
format = cmucam->GetVideoFormat();
if( mode < 0 || mode > 7 || !cmucam->HasVideoMode(format, mode))
return false;
cmucam->StopImageAcquisition();
cmucam->SetVideoMode(mode);
cmucam->StartImageAcquisition();
return true;
}
bool CvCaptureCAM_CMU::setFrameRate(int rate)
{
int format, mode;
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
mode = cmucam->GetVideoMode();
format = cmucam->GetVideoFormat();
if( rate < 0 || rate > 5 || !cmucam->HasVideoFrameRate(format, mode, rate) )
return false;
cmucam->StopImageAcquisition();
cmucam->SetVideoFrameRate(rate);
cmucam->StartImageAcquisition();
return true;
}
bool CvCaptureCAM_CMU::setFormat(int format)
{
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
if( format < 0 || format > 2 || !cmucam->HasVideoFormat(format) )
return false;
cmucam->StopImageAcquisition();
cmucam->SetVideoFormat(format);
cmucam->StartImageAcquisition();
return true;
}
bool CvCaptureCAM_CMU::setProperty( int property_id, double value )
{
bool retval = false;
int ival = cvRound(value);
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case CV_CAP_PROP_FRAME_HEIGHT:
{
int width, height;
if (property_id == CV_CAP_PROP_FRAME_WIDTH)
{
width = ival;
height = width*3/4;
}
else {
height = ival;
width = height*4/3;
}
retval = setVideoSize(width, height);
}
break;
case CV_CAP_PROP_FPS:
retval = setFrameRate(ival);
break;
case CV_CAP_PROP_MODE:
retval = setMode(ival);
break;
case CV_CAP_PROP_FORMAT:
retval = setFormat(ival);
break;
}
// resize image if its not the right size anymore
CvSize size = getSize();
if( !image || image->width != size.width || image->height != size.height )
{
cvReleaseImage( &image );
image = cvCreateImage( size, 8, 3 );
}
return retval;
}
CvCapture * cvCreateCameraCapture_CMU (int index)
{
CvCaptureCAM_CMU* capture = new CvCaptureCAM_CMU;
if( capture->open(index) )
return capture;
delete capture;
return 0;
}
#endif // CMU
#endif // WIN32

File diff suppressed because it is too large Load Diff

View File

@@ -1,932 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_DC1394_2
#include <unistd.h>
#include <stdint.h>
#ifdef WIN32
// On Windows, we have no sys/select.h, but we need to pick up
// select() which is in winsock2.
#ifndef __SYS_SELECT_H__
#define __SYS_SELECT_H__ 1
#include <winsock2.h>
#endif
#else
#include <sys/select.h>
#endif /*WIN32*/
#include <dc1394/dc1394.h>
#include <stdlib.h>
#include <string.h>
static dc1394error_t adaptBufferStereoLocal(dc1394video_frame_t *in, dc1394video_frame_t *out)
{
uint32_t bpp;
// buffer position is not changed. Size is boubled in Y
out->size[0] = in->size[0];
out->size[1] = in->size[1] * 2;
out->position[0] = in->position[0];
out->position[1] = in->position[1];
// color coding is set to mono8 or raw8.
switch (in->color_coding)
{
case DC1394_COLOR_CODING_RAW16:
out->color_coding = DC1394_COLOR_CODING_RAW8;
break;
case DC1394_COLOR_CODING_MONO16:
case DC1394_COLOR_CODING_YUV422:
out->color_coding = DC1394_COLOR_CODING_MONO8;
break;
default:
return DC1394_INVALID_COLOR_CODING;
}
// keep the color filter value in all cases. if the format is not raw it will not be further used anyway
out->color_filter = in->color_filter;
// the output YUV byte order must be already set if the buffer is YUV422 at the output
// if the output is not YUV we don't care about this field.
// Hence nothing to do.
// we always convert to 8bits (at this point) we can safely set this value to 8.
out->data_depth = 8;
// don't know what to do with stride... >>>> TODO: STRIDE SHOULD BE TAKEN INTO ACCOUNT... <<<<
// out->stride=??
// the video mode should not change. Color coding and other stuff can be accessed in specific fields of this struct
out->video_mode = in->video_mode;
// padding is kept:
out->padding_bytes = in->padding_bytes;
// image bytes changes: >>>> TODO: STRIDE SHOULD BE TAKEN INTO ACCOUNT... <<<<
dc1394_get_color_coding_bit_size(out->color_coding, &bpp);
out->image_bytes = (out->size[0] * out->size[1] * bpp) / 8;
// total is image_bytes + padding_bytes
out->total_bytes = out->image_bytes + out->padding_bytes;
// bytes-per-packet and packets_per_frame are internal data that can be kept as is.
out->packet_size = in->packet_size;
out->packets_per_frame = in->packets_per_frame;
// timestamp, frame_behind, id and camera are copied too:
out->timestamp = in->timestamp;
out->frames_behind = in->frames_behind;
out->camera = in->camera;
out->id = in->id;
// verify memory allocation:
if (out->total_bytes > out->allocated_image_bytes)
{
free(out->image);
out->image = (uint8_t*)malloc(out->total_bytes * sizeof(uint8_t));
out->allocated_image_bytes = out->total_bytes;
}
// Copy padding bytes:
memcpy(&(out->image[out->image_bytes]), &(in->image[in->image_bytes]), out->padding_bytes);
out->little_endian = DC1394_FALSE; // not used before 1.32 is out.
out->data_in_padding = DC1394_FALSE; // not used before 1.32 is out.
return DC1394_SUCCESS;
}
static dc1394error_t dc1394_deinterlace_stereo_frames_fixed(dc1394video_frame_t *in,
dc1394video_frame_t *out, dc1394stereo_method_t method)
{
if((in->color_coding == DC1394_COLOR_CODING_RAW16) ||
(in->color_coding == DC1394_COLOR_CODING_MONO16) ||
(in->color_coding == DC1394_COLOR_CODING_YUV422))
{
switch (method)
{
case DC1394_STEREO_METHOD_INTERLACED:
adaptBufferStereoLocal(in, out);
//FIXED by AB:
// dc1394_deinterlace_stereo(in->image, out->image, in->size[0], in->size[1]);
dc1394_deinterlace_stereo(in->image, out->image, out->size[0], out->size[1]);
break;
case DC1394_STEREO_METHOD_FIELD:
adaptBufferStereoLocal(in, out);
memcpy(out->image, in->image, out->image_bytes);
break;
}
return DC1394_INVALID_STEREO_METHOD;
}
else
return DC1394_FUNCTION_NOT_SUPPORTED;
}
static uint32_t getControlRegister(dc1394camera_t *camera, uint64_t offset)
{
uint32_t value = 0;
dc1394error_t err = dc1394_get_control_register(camera, offset, &value);
assert(err == DC1394_SUCCESS);
return err == DC1394_SUCCESS ? value : 0xffffffff;
}
struct CvDC1394
{
CvDC1394();
~CvDC1394();
dc1394_t* dc;
fd_set camFds;
};
CvDC1394::CvDC1394()
{
dc = dc1394_new();
FD_ZERO(&camFds);
}
CvDC1394::~CvDC1394()
{
if (dc)
dc1394_free(dc);
dc = 0;
}
static CvDC1394 dc1394;
class CvCaptureCAM_DC1394_v2_CPP : public CvCapture
{
public:
static int dc1394properties[CV_CAP_PROP_MAX_DC1394];
CvCaptureCAM_DC1394_v2_CPP();
virtual ~CvCaptureCAM_DC1394_v2_CPP()
{
close();
}
virtual bool open(int index);
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_DC1394; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
virtual bool startCapture();
virtual bool getVidereCalibrationInfo( char* buf, int bufSize );
virtual bool initVidereRectifyMaps( const char* info, IplImage* ml[2], IplImage* mr[2] );
uint64_t guid;
dc1394camera_t* dcCam;
int isoSpeed;
int videoMode;
int frameWidth, frameHeight;
double fps;
int nDMABufs;
bool started;
int userMode;
enum { VIDERE = 0x5505 };
int cameraId;
bool colorStereo;
dc1394bayer_method_t bayer;
dc1394color_filter_t bayerFilter;
enum { NIMG = 2 };
IplImage *img[NIMG];
dc1394video_frame_t* frameC;
int nimages;
bool rectify;
bool init_rectify;
IplImage *maps[NIMG][2];
dc1394featureset_t feature_set;
};
//mapping CV_CAP_PROP_ to DC1394_FEATUREs
int CvCaptureCAM_DC1394_v2_CPP::dc1394properties[CV_CAP_PROP_MAX_DC1394] = {
-1, //no corresponding feature for CV_CAP_PROP_POS_MSEC
-1,-1,-1,-1,
DC1394_FEATURE_FRAME_RATE, //CV_CAP_PROP_FPS - fps can be set for format 7 only!
-1,-1,-1,-1,
DC1394_FEATURE_BRIGHTNESS, //CV_CAP_PROP_BRIGHTNESS 10
-1,
DC1394_FEATURE_SATURATION, //CV_CAP_PROP_SATURATION
DC1394_FEATURE_HUE,
DC1394_FEATURE_GAIN,
DC1394_FEATURE_SHUTTER, //CV_CAP_PROP_EXPOSURE
-1, //CV_CAP_PROP_CONVERT_RGB
DC1394_FEATURE_WHITE_BALANCE, //corresponds to CV_CAP_PROP_WHITE_BALANCE_BLUE_U and CV_CAP_PROP_WHITE_BALANCE_RED_V, see set function to check these props are set
-1,-1,
DC1394_FEATURE_SHARPNESS, //20
DC1394_FEATURE_EXPOSURE, //CV_CAP_PROP_AUTO_EXPOSURE - this is auto exposure according to the IIDC standard
DC1394_FEATURE_GAMMA, //CV_CAP_PROP_GAMMA
DC1394_FEATURE_TEMPERATURE, //CV_CAP_PROP_TEMPERATURE
DC1394_FEATURE_TRIGGER, //CV_CAP_PROP_TRIGGER
DC1394_FEATURE_TRIGGER_DELAY, //CV_CAP_PROP_TRIGGER_DELAY
DC1394_FEATURE_WHITE_BALANCE, //CV_CAP_PROP_WHITE_BALANCE_RED_V
DC1394_FEATURE_ZOOM, //CV_CAP_PROP_ZOOM
DC1394_FEATURE_FOCUS, //CV_CAP_PROP_FOCUS
-1 //CV_CAP_PROP_GUID
};
CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
{
guid = 0;
dcCam = 0;
isoSpeed = 400;
fps = 15;
nDMABufs = 8;
started = false;
cameraId = 0;
colorStereo = false;
bayer = DC1394_BAYER_METHOD_BILINEAR;
bayerFilter = DC1394_COLOR_FILTER_GRBG;
frameWidth = 640;
frameHeight = 480;
for (int i = 0; i < NIMG; i++)
img[i] = maps[i][0] = maps[i][1] = 0;
frameC = 0;
nimages = 1;
rectify = false;
userMode = -1;
}
bool CvCaptureCAM_DC1394_v2_CPP::startCapture()
{
int i;
int code = 0;
if (!dcCam)
return false;
if (isoSpeed > 0)
{
// if capable set operation mode to 1394b for iso speeds above 400
if (isoSpeed > 400 && dcCam->bmode_capable == DC1394_TRUE)
{
dc1394_video_set_operation_mode(dcCam, DC1394_OPERATION_MODE_1394B);
}
code = dc1394_video_set_iso_speed(dcCam,
isoSpeed <= 100 ? DC1394_ISO_SPEED_100 :
isoSpeed <= 200 ? DC1394_ISO_SPEED_200 :
isoSpeed <= 400 ? DC1394_ISO_SPEED_400 :
isoSpeed <= 800 ? DC1394_ISO_SPEED_800 :
isoSpeed == 1600 ? DC1394_ISO_SPEED_1600 :
DC1394_ISO_SPEED_3200);
}
// should a specific mode be used
if (userMode >= 0)
{
dc1394video_mode_t wantedMode;
dc1394video_modes_t videoModes;
dc1394_video_get_supported_modes(dcCam, &videoModes);
//set mode from number, for example the second supported mode, i.e userMode = 1
if (userMode < (int)videoModes.num)
{
wantedMode = videoModes.modes[userMode];
}
//set modes directly from DC134 constants (from dc1394video_mode_t)
else if ((userMode >= DC1394_VIDEO_MODE_MIN) && (userMode <= DC1394_VIDEO_MODE_MAX ))
{
//search for wanted mode, to check if camera supports it
int j = 0;
while ((j< (int)videoModes.num) && videoModes.modes[j]!=userMode)
{
j++;
}
if ((int)videoModes.modes[j]==userMode)
{
wantedMode = videoModes.modes[j];
}
else
{
userMode = -1; // wanted mode not supported, search for best mode
}
}
else
{
userMode = -1; // wanted mode not supported, search for best mode
}
//if userMode is available: set it and update size
if (userMode != -1)
{
code = dc1394_video_set_mode(dcCam, wantedMode);
uint32_t width, height;
dc1394_get_image_size_from_video_mode(dcCam, wantedMode, &width, &height);
frameWidth = (int)width;
frameHeight = (int)height;
}
}
if (userMode == -1 && (frameWidth > 0 || frameHeight > 0))
{
dc1394video_mode_t bestMode = (dc1394video_mode_t) - 1;
dc1394video_modes_t videoModes;
dc1394_video_get_supported_modes(dcCam, &videoModes);
for (i = 0; i < (int)videoModes.num; i++)
{
dc1394video_mode_t mode = videoModes.modes[i];
if (mode >= DC1394_VIDEO_MODE_FORMAT7_MIN && mode <= DC1394_VIDEO_MODE_FORMAT7_MAX)
continue;
int pref = -1;
dc1394color_coding_t colorCoding;
dc1394_get_color_coding_from_video_mode(dcCam, mode, &colorCoding);
uint32_t width, height;
dc1394_get_image_size_from_video_mode(dcCam, mode, &width, &height);
if ((int)width == frameWidth || (int)height == frameHeight)
{
if (colorCoding == DC1394_COLOR_CODING_RGB8 ||
colorCoding == DC1394_COLOR_CODING_RAW8)
{
bestMode = mode;
break;
}
if (colorCoding == DC1394_COLOR_CODING_YUV411 ||
colorCoding == DC1394_COLOR_CODING_YUV422 ||
(colorCoding == DC1394_COLOR_CODING_YUV444 &&
pref < 1))
{
bestMode = mode;
pref = 1;
break;
}
if (colorCoding == DC1394_COLOR_CODING_MONO8)
{
bestMode = mode;
pref = 0;
}
}
}
if ((int)bestMode >= 0)
code = dc1394_video_set_mode(dcCam, bestMode);
}
if (fps > 0)
{
dc1394video_mode_t mode;
dc1394framerates_t framerates;
double minDiff = DBL_MAX;
dc1394framerate_t bestFps = (dc1394framerate_t) - 1;
dc1394_video_get_mode(dcCam, &mode);
dc1394_video_get_supported_framerates(dcCam, mode, &framerates);
for (i = 0; i < (int)framerates.num; i++)
{
dc1394framerate_t ifps = framerates.framerates[i];
double fps1 = (1 << (ifps - DC1394_FRAMERATE_1_875)) * 1.875;
double diff = fabs(fps1 - fps);
if (diff < minDiff)
{
minDiff = diff;
bestFps = ifps;
}
}
if ((int)bestFps >= 0)
code = dc1394_video_set_framerate(dcCam, bestFps);
}
if (cameraId == VIDERE)
{
bayerFilter = DC1394_COLOR_FILTER_GBRG;
nimages = 2;
uint32_t value = 0;
dc1394_get_control_register(dcCam, 0x50c, &value);
colorStereo = (value & 0x80000000) != 0;
}
code = dc1394_capture_setup(dcCam, nDMABufs, DC1394_CAPTURE_FLAGS_DEFAULT);
if (code >= 0)
{
FD_SET(dc1394_capture_get_fileno(dcCam), &dc1394.camFds);
dc1394_video_set_transmission(dcCam, DC1394_ON);
if (cameraId == VIDERE)
{
enum { PROC_MODE_OFF, PROC_MODE_NONE, PROC_MODE_TEST, PROC_MODE_RECTIFIED, PROC_MODE_DISPARITY, PROC_MODE_DISPARITY_RAW };
int procMode = PROC_MODE_RECTIFIED;
usleep(100000);
uint32_t qval1 = 0x08000000 | (0x90 << 16) | ((procMode & 0x7) << 16);
uint32_t qval2 = 0x08000000 | (0x9C << 16);
dc1394_set_control_register(dcCam, 0xFF000, qval1);
dc1394_set_control_register(dcCam, 0xFF000, qval2);
}
started = true;
}
return code >= 0;
}
bool CvCaptureCAM_DC1394_v2_CPP::open(int index)
{
bool result = false;
dc1394camera_list_t* cameraList = 0;
dc1394error_t err;
close();
if (!dc1394.dc)
goto _exit_;
err = dc1394_camera_enumerate(dc1394.dc, &cameraList);
if (err < 0 || !cameraList || (unsigned)index >= (unsigned)cameraList->num)
goto _exit_;
guid = cameraList->ids[index].guid;
dcCam = dc1394_camera_new(dc1394.dc, guid);
if (!dcCam)
goto _exit_;
cameraId = dcCam->vendor_id;
//get all features
if (dc1394_feature_get_all(dcCam,&feature_set) == DC1394_SUCCESS)
result = true;
else
result = false;
_exit_:
if (cameraList)
dc1394_camera_free_list(cameraList);
return result;
}
void CvCaptureCAM_DC1394_v2_CPP::close()
{
if (dcCam)
{
// check for fileno valid before using
int fileno=dc1394_capture_get_fileno(dcCam);
if (fileno>=0 && FD_ISSET(fileno, &dc1394.camFds))
FD_CLR(fileno, &dc1394.camFds);
dc1394_video_set_transmission(dcCam, DC1394_OFF);
dc1394_capture_stop(dcCam);
dc1394_camera_free(dcCam);
dcCam = 0;
started = false;
}
for (int i = 0; i < NIMG; i++)
{
cvReleaseImage(&img[i]);
cvReleaseImage(&maps[i][0]);
cvReleaseImage(&maps[i][1]);
}
if (frameC)
{
if (frameC->image)
free(frameC->image);
free(frameC);
frameC = 0;
}
}
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
{
dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
bool code = false, isColor;
dc1394video_frame_t *dcFrame = 0, *fs = 0;
int i, nch;
if (!dcCam || (!started && !startCapture()))
return false;
dc1394_capture_dequeue(dcCam, policy, &dcFrame);
if (!dcFrame)
return false;
if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
{
goto _exit_;
}
isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;
if (nimages == 2)
{
fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));
//dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
dc1394_deinterlace_stereo_frames_fixed(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
dcFrame = 0;
if (!fs->image)
goto _exit_;
isColor = colorStereo;
}
nch = isColor ? 3 : 1;
for (i = 0; i < nimages; i++)
{
IplImage fhdr;
dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
f.size[1] /= nimages;
f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
if (isColor)
{
if (!frameC)
frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
if (nimages == 1)
{
dc1394_convert_frames(&f, frameC);
dc1394_capture_enqueue(dcCam, dcFrame);
dcFrame = 0;
}
else
{
f.color_filter = bayerFilter;
dc1394_debayer_frames(&f, frameC, bayer);
}
fc = frameC;
}
if (!img[i])
img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
cvSetData(&fhdr, fc->image, fc->size[0]*nch);
// Swap R&B channels:
if (nch==3)
cvConvertImage(&fhdr,&fhdr,CV_CVTIMG_SWAP_RB);
if( rectify && cameraId == VIDERE && nimages == 2 )
{
if( !maps[0][0] || maps[0][0]->width != img[i]->width || maps[0][0]->height != img[i]->height )
{
CvSize size = cvGetSize(img[i]);
cvReleaseImage(&maps[0][0]);
cvReleaseImage(&maps[0][1]);
cvReleaseImage(&maps[1][0]);
cvReleaseImage(&maps[1][1]);
maps[0][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
maps[0][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
maps[1][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
maps[1][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
char buf[4*4096];
if( getVidereCalibrationInfo( buf, (int)sizeof(buf) ) &&
initVidereRectifyMaps( buf, maps[0], maps[1] ))
;
else
rectify = false;
}
cvRemap(&fhdr, img[i], maps[i][0], maps[i][1]);
}
else
cvCopy(&fhdr, img[i]);
}
code = true;
_exit_:
if (dcFrame)
dc1394_capture_enqueue(dcCam, dcFrame);
if (fs)
{
if (fs->image)
free(fs->image);
free(fs);
}
return code;
}
IplImage* CvCaptureCAM_DC1394_v2_CPP::retrieveFrame(int idx)
{
return 0 <= idx && idx < nimages ? img[idx] : 0;
}
double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId)
{
switch (propId)
{
case CV_CAP_PROP_FRAME_WIDTH:
return frameWidth ? frameWidth : frameHeight*4 / 3;
case CV_CAP_PROP_FRAME_HEIGHT:
return frameHeight ? frameHeight : frameWidth*3 / 4;
case CV_CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_RECTIFICATION:
return rectify ? 1 : 0;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
if (dc1394_feature_whitebalance_get_value(dcCam,
&feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
&feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value;
break;
case CV_CAP_PROP_WHITE_BALANCE_RED_V:
if (dc1394_feature_whitebalance_get_value(dcCam,
&feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
&feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value;
break;
case CV_CAP_PROP_GUID:
//the least 32 bits are enough to identify the camera
return (double) (guid & 0x00000000FFFFFFFF);
break;
case CV_CAP_PROP_MODE:
return (double) userMode;
break;
case CV_CAP_PROP_ISO_SPEED:
return (double) isoSpeed;
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
//&& feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].on_off_capable)
if (dc1394_feature_get_value(dcCam,(dc1394feature_t)dc1394properties[propId],
&feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].value) == DC1394_SUCCESS)
return feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].value;
}
return -1; // the value of the feature can be 0, so returning 0 as an error is wrong
}
bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
{
switch (propId)
{
case CV_CAP_PROP_FRAME_WIDTH:
if(started)
return false;
frameWidth = cvRound(value);
frameHeight = 0;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
if(started)
return false;
frameWidth = 0;
frameHeight = cvRound(value);
break;
case CV_CAP_PROP_FPS:
if(started)
return false;
fps = value;
break;
case CV_CAP_PROP_RECTIFICATION:
if( cameraId != VIDERE )
return false;
rectify = fabs(value) > FLT_EPSILON;
break;
case CV_CAP_PROP_MODE:
if(started)
return false;
userMode = cvRound(value);
break;
case CV_CAP_PROP_ISO_SPEED:
if(started)
return false;
isoSpeed = cvRound(value);
break;
//The code below is based on coriander, callbacks.c:795, refer to case RANGE_MENU_MAN :
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
{
//get the corresponding feature from property-id
dc1394feature_info_t *act_feature = &feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN];
if (cvRound(value) == CV_CAP_PROP_DC1394_OFF)
{
if ( (act_feature->on_off_capable)
&& (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_OFF) == DC1394_SUCCESS))
{
act_feature->is_on=DC1394_OFF;
return true;
}
return false;
}
//try to turn the feature ON, feature can be ON and at the same time it can be not capable to change state to OFF
if ( (act_feature->is_on == DC1394_OFF) && (act_feature->on_off_capable == DC1394_TRUE))
{
if (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_ON) == DC1394_SUCCESS)
feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].is_on=DC1394_ON;
}
//turn off absolute mode - the actual value will be stored in the value field,
//otherwise it would be stored into CSR (control and status register) absolute value
if (act_feature->absolute_capable
&& dc1394_feature_set_absolute_control(dcCam, act_feature->id, DC1394_OFF) !=DC1394_SUCCESS)
return false;
else
act_feature->abs_control=DC1394_OFF;
//set AUTO
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_AUTO)
{
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_AUTO)!=DC1394_SUCCESS)
return false;
act_feature->current_mode=DC1394_FEATURE_MODE_AUTO;
return true;
}
//set ONE PUSH
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO)
{
//have to set to manual first, otherwise one push will be ignored (AVT manual 4.3.0 p. 115)
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_ONE_PUSH_AUTO)!=DC1394_SUCCESS)
return false;
//will change to
act_feature->current_mode=DC1394_FEATURE_MODE_ONE_PUSH_AUTO;
return true;
}
//set the feature to MANUAL mode,
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_MANUAL)!=DC1394_SUCCESS)
return false;
else
act_feature->current_mode=DC1394_FEATURE_MODE_MANUAL;
// if property is one of the white balance features treat it in different way
if (propId == CV_CAP_PROP_WHITE_BALANCE_BLUE_U)
{
if (dc1394_feature_whitebalance_set_value(dcCam,cvRound(value), act_feature->RV_value)!=DC1394_SUCCESS)
return false;
else
{
act_feature->BU_value = cvRound(value);
return true;
}
}
if (propId == CV_CAP_PROP_WHITE_BALANCE_RED_V)
{
if (dc1394_feature_whitebalance_set_value(dcCam, act_feature->BU_value, cvRound(value))!=DC1394_SUCCESS)
return false;
else
{
act_feature->RV_value = cvRound(value);
return true;
}
}
//first: check boundaries
if (value < act_feature->min)
{
value = act_feature->min;
}
else if (value > act_feature->max)
{
value = act_feature->max;
}
if (dc1394_feature_set_value(dcCam, act_feature->id, cvRound(value)) == DC1394_SUCCESS)
{
act_feature->value = value;
return true;
}
}
return false;
}
return true;
}
bool CvCaptureCAM_DC1394_v2_CPP::getVidereCalibrationInfo( char* buf, int bufSize )
{
int pos;
for( pos = 0; pos < bufSize - 4; pos += 4 )
{
uint32_t quad = getControlRegister(dcCam, 0xF0800 + pos);
if( quad == 0 || quad == 0xffffffff )
break;
buf[pos] = (uchar)(quad >> 24);
buf[pos+1] = (uchar)(quad >> 16);
buf[pos+2] = (uchar)(quad >> 8);
buf[pos+3] = (uchar)(quad);
}
if( pos == 0 )
return false;
buf[pos] = '\0';
return true;
}
bool CvCaptureCAM_DC1394_v2_CPP::initVidereRectifyMaps( const char* info,
IplImage* ml[2], IplImage* mr[2] )
{
float identity_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
CvMat l_rect = cvMat(3, 3, CV_32F, identity_data), r_rect = l_rect;
float l_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
float r_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
CvMat l_intrinsic = cvMat(3, 3, CV_32F, l_intrinsic_data);
CvMat r_intrinsic = cvMat(3, 3, CV_32F, r_intrinsic_data);
float l_distortion_data[] = {0,0,0,0,0}, r_distortion_data[] = {0,0,0,0,0};
CvMat l_distortion = cvMat(1, 5, CV_32F, l_distortion_data);
CvMat r_distortion = cvMat(1, 5, CV_32F, r_distortion_data);
IplImage* mx = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
IplImage* my = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
int k, j;
for( k = 0; k < 2; k++ )
{
const char* section_name = k == 0 ? "[left_camera]" : "[right_camera]";
static const char* param_names[] = { "f ", "fy", "Cx", "Cy" "kappa1", "kappa2", "tau1", "tau2", "kappa3", 0 };
const char* section_start = strstr( info, section_name );
CvMat* intrinsic = k == 0 ? &l_intrinsic : &r_intrinsic;
CvMat* distortion = k == 0 ? &l_distortion : &r_distortion;
CvMat* rectification = k == 0 ? &l_rect : &r_rect;
IplImage** dst = k == 0 ? ml : mr;
if( !section_start )
break;
section_start += strlen(section_name);
for( j = 0; param_names[j] != 0; j++ )
{
const char* param_value_start = strstr(section_start, param_names[j]);
float val=0;
if(!param_value_start)
break;
sscanf(param_value_start + strlen(param_names[j]), "%f", &val);
if( j < 4 )
intrinsic->data.fl[j == 0 ? 0 : j == 1 ? 4 : j == 2 ? 2 : 5] = val;
else
distortion->data.fl[j - 4] = val;
}
if( param_names[j] != 0 )
break;
// some sanity check for the principal point
if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.1 ||
fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.1 )
{
cvScale( &intrinsic, &intrinsic, 0.5 ); // try the corrected intrinsic matrix for 2x lower resolution
if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.05 ||
fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.05 )
cvScale( &intrinsic, &intrinsic, 2 ); // revert it back if the new variant is not much better
intrinsic->data.fl[8] = 1;
}
cvInitUndistortRectifyMap( intrinsic, distortion,
rectification, intrinsic, mx, my );
cvConvertMaps( mx, my, dst[0], dst[1] );
}
cvReleaseImage( &mx );
cvReleaseImage( &my );
return k >= 2;
}
CvCapture* cvCreateCameraCapture_DC1394_2(int index)
{
CvCaptureCAM_DC1394_v2_CPP* capture = new CvCaptureCAM_DC1394_v2_CPP;
if (capture->open(index))
return capture;
delete capture;
return 0;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,275 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if defined HAVE_FFMPEG && !defined WIN32
#include "cap_ffmpeg_impl.hpp"
#else
#include "cap_ffmpeg_api.hpp"
#endif
static CvCreateFileCapture_Plugin icvCreateFileCapture_FFMPEG_p = 0;
static CvReleaseCapture_Plugin icvReleaseCapture_FFMPEG_p = 0;
static CvGrabFrame_Plugin icvGrabFrame_FFMPEG_p = 0;
static CvRetrieveFrame_Plugin icvRetrieveFrame_FFMPEG_p = 0;
static CvSetCaptureProperty_Plugin icvSetCaptureProperty_FFMPEG_p = 0;
static CvGetCaptureProperty_Plugin icvGetCaptureProperty_FFMPEG_p = 0;
static CvCreateVideoWriter_Plugin icvCreateVideoWriter_FFMPEG_p = 0;
static CvReleaseVideoWriter_Plugin icvReleaseVideoWriter_FFMPEG_p = 0;
static CvWriteFrame_Plugin icvWriteFrame_FFMPEG_p = 0;
static cv::Mutex _icvInitFFMPEG_mutex;
class icvInitFFMPEG
{
public:
static void Init()
{
cv::AutoLock al(_icvInitFFMPEG_mutex);
static icvInitFFMPEG init;
}
private:
#if defined WIN32 || defined _WIN32
HMODULE icvFFOpenCV;
~icvInitFFMPEG()
{
if (icvFFOpenCV)
{
FreeLibrary(icvFFOpenCV);
icvFFOpenCV = 0;
}
}
#endif
icvInitFFMPEG()
{
#if defined WIN32 || defined _WIN32
# ifdef HAVE_WINRT
const wchar_t* module_name = L"opencv_ffmpeg"
CVAUX_STRW(CV_MAJOR_VERSION) CVAUX_STRW(CV_MINOR_VERSION) CVAUX_STRW(CV_SUBMINOR_VERSION)
#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__)
L"_64"
#endif
L".dll";
icvFFOpenCV = LoadPackagedLibrary( module_name, 0 );
# else
const char* module_name = "opencv_ffmpeg"
CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) CVAUX_STR(CV_SUBMINOR_VERSION)
#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__)
"_64"
#endif
".dll";
icvFFOpenCV = LoadLibrary( module_name );
# endif
if( icvFFOpenCV )
{
icvCreateFileCapture_FFMPEG_p =
(CvCreateFileCapture_Plugin)GetProcAddress(icvFFOpenCV, "cvCreateFileCapture_FFMPEG");
icvReleaseCapture_FFMPEG_p =
(CvReleaseCapture_Plugin)GetProcAddress(icvFFOpenCV, "cvReleaseCapture_FFMPEG");
icvGrabFrame_FFMPEG_p =
(CvGrabFrame_Plugin)GetProcAddress(icvFFOpenCV, "cvGrabFrame_FFMPEG");
icvRetrieveFrame_FFMPEG_p =
(CvRetrieveFrame_Plugin)GetProcAddress(icvFFOpenCV, "cvRetrieveFrame_FFMPEG");
icvSetCaptureProperty_FFMPEG_p =
(CvSetCaptureProperty_Plugin)GetProcAddress(icvFFOpenCV, "cvSetCaptureProperty_FFMPEG");
icvGetCaptureProperty_FFMPEG_p =
(CvGetCaptureProperty_Plugin)GetProcAddress(icvFFOpenCV, "cvGetCaptureProperty_FFMPEG");
icvCreateVideoWriter_FFMPEG_p =
(CvCreateVideoWriter_Plugin)GetProcAddress(icvFFOpenCV, "cvCreateVideoWriter_FFMPEG");
icvReleaseVideoWriter_FFMPEG_p =
(CvReleaseVideoWriter_Plugin)GetProcAddress(icvFFOpenCV, "cvReleaseVideoWriter_FFMPEG");
icvWriteFrame_FFMPEG_p =
(CvWriteFrame_Plugin)GetProcAddress(icvFFOpenCV, "cvWriteFrame_FFMPEG");
#if 0
if( icvCreateFileCapture_FFMPEG_p != 0 &&
icvReleaseCapture_FFMPEG_p != 0 &&
icvGrabFrame_FFMPEG_p != 0 &&
icvRetrieveFrame_FFMPEG_p != 0 &&
icvSetCaptureProperty_FFMPEG_p != 0 &&
icvGetCaptureProperty_FFMPEG_p != 0 &&
icvCreateVideoWriter_FFMPEG_p != 0 &&
icvReleaseVideoWriter_FFMPEG_p != 0 &&
icvWriteFrame_FFMPEG_p != 0 )
{
printf("Successfully initialized ffmpeg plugin!\n");
}
else
{
printf("Failed to load FFMPEG plugin: module handle=%p\n", icvFFOpenCV);
}
#endif
}
#elif defined HAVE_FFMPEG
icvCreateFileCapture_FFMPEG_p = (CvCreateFileCapture_Plugin)cvCreateFileCapture_FFMPEG;
icvReleaseCapture_FFMPEG_p = (CvReleaseCapture_Plugin)cvReleaseCapture_FFMPEG;
icvGrabFrame_FFMPEG_p = (CvGrabFrame_Plugin)cvGrabFrame_FFMPEG;
icvRetrieveFrame_FFMPEG_p = (CvRetrieveFrame_Plugin)cvRetrieveFrame_FFMPEG;
icvSetCaptureProperty_FFMPEG_p = (CvSetCaptureProperty_Plugin)cvSetCaptureProperty_FFMPEG;
icvGetCaptureProperty_FFMPEG_p = (CvGetCaptureProperty_Plugin)cvGetCaptureProperty_FFMPEG;
icvCreateVideoWriter_FFMPEG_p = (CvCreateVideoWriter_Plugin)cvCreateVideoWriter_FFMPEG;
icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
#endif
}
};
class CvCapture_FFMPEG_proxy :
public CvCapture
{
public:
CvCapture_FFMPEG_proxy() { ffmpegCapture = 0; }
virtual ~CvCapture_FFMPEG_proxy() { close(); }
virtual double getProperty(int propId)
{
return ffmpegCapture ? icvGetCaptureProperty_FFMPEG_p(ffmpegCapture, propId) : 0;
}
virtual bool setProperty(int propId, double value)
{
return ffmpegCapture ? icvSetCaptureProperty_FFMPEG_p(ffmpegCapture, propId, value)!=0 : false;
}
virtual bool grabFrame()
{
return ffmpegCapture ? icvGrabFrame_FFMPEG_p(ffmpegCapture)!=0 : false;
}
virtual IplImage* retrieveFrame(int)
{
unsigned char* data = 0;
int step=0, width=0, height=0, cn=0;
if (!ffmpegCapture ||
!icvRetrieveFrame_FFMPEG_p(ffmpegCapture, &data, &step, &width, &height, &cn))
return 0;
cvInitImageHeader(&frame, cvSize(width, height), 8, cn);
cvSetData(&frame, data, step);
return &frame;
}
virtual bool open( const char* filename )
{
icvInitFFMPEG::Init();
close();
if( !icvCreateFileCapture_FFMPEG_p )
return false;
ffmpegCapture = icvCreateFileCapture_FFMPEG_p( filename );
return ffmpegCapture != 0;
}
virtual void close()
{
if( ffmpegCapture && icvReleaseCapture_FFMPEG_p )
icvReleaseCapture_FFMPEG_p( &ffmpegCapture );
assert( ffmpegCapture == 0 );
ffmpegCapture = 0;
}
protected:
void* ffmpegCapture;
IplImage frame;
};
CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
{
CvCapture_FFMPEG_proxy* result = new CvCapture_FFMPEG_proxy;
if( result->open( filename ))
return result;
delete result;
return 0;
}
class CvVideoWriter_FFMPEG_proxy :
public CvVideoWriter
{
public:
CvVideoWriter_FFMPEG_proxy() { ffmpegWriter = 0; }
virtual ~CvVideoWriter_FFMPEG_proxy() { close(); }
virtual bool writeFrame( const IplImage* image )
{
if(!ffmpegWriter)
return false;
CV_Assert(image->depth == 8);
return icvWriteFrame_FFMPEG_p(ffmpegWriter, (const uchar*)image->imageData,
image->widthStep, image->width, image->height, image->nChannels, image->origin) !=0;
}
virtual bool open( const char* filename, int fourcc, double fps, CvSize frameSize, bool isColor )
{
icvInitFFMPEG::Init();
close();
if( !icvCreateVideoWriter_FFMPEG_p )
return false;
ffmpegWriter = icvCreateVideoWriter_FFMPEG_p( filename, fourcc, fps, frameSize.width, frameSize.height, isColor );
return ffmpegWriter != 0;
}
virtual void close()
{
if( ffmpegWriter && icvReleaseVideoWriter_FFMPEG_p )
icvReleaseVideoWriter_FFMPEG_p( &ffmpegWriter );
assert( ffmpegWriter == 0 );
ffmpegWriter = 0;
}
protected:
void* ffmpegWriter;
};
CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourcc,
double fps, CvSize frameSize, int isColor )
{
CvVideoWriter_FFMPEG_proxy* result = new CvVideoWriter_FFMPEG_proxy;
if( result->open( filename, fourcc, fps, frameSize, isColor != 0 ))
return result;
delete result;
return 0;
}

View File

@@ -1,98 +0,0 @@
#ifndef __OPENCV_FFMPEG_H__
#define __OPENCV_FFMPEG_H__
#ifdef __cplusplus
extern "C"
{
#endif
#if defined WIN32 || defined _WIN32
# define OPENCV_FFMPEG_API __declspec(dllexport)
#elif defined __GNUC__ && __GNUC__ >= 4
# define OPENCV_FFMPEG_API __attribute__ ((visibility ("default")))
#else
# define OPENCV_FFMPEG_API
#endif
enum
{
CV_FFMPEG_CAP_PROP_POS_MSEC=0,
CV_FFMPEG_CAP_PROP_POS_FRAMES=1,
CV_FFMPEG_CAP_PROP_POS_AVI_RATIO=2,
CV_FFMPEG_CAP_PROP_FRAME_WIDTH=3,
CV_FFMPEG_CAP_PROP_FRAME_HEIGHT=4,
CV_FFMPEG_CAP_PROP_FPS=5,
CV_FFMPEG_CAP_PROP_FOURCC=6,
CV_FFMPEG_CAP_PROP_FRAME_COUNT=7
};
OPENCV_FFMPEG_API struct CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG(const char* filename);
OPENCV_FFMPEG_API struct CvCapture_FFMPEG_2* cvCreateFileCapture_FFMPEG_2(const char* filename);
OPENCV_FFMPEG_API int cvSetCaptureProperty_FFMPEG(struct CvCapture_FFMPEG* cap,
int prop, double value);
OPENCV_FFMPEG_API int cvSetCaptureProperty_FFMPEG_2(struct CvCapture_FFMPEG_2* cap,
int prop, double value);
OPENCV_FFMPEG_API double cvGetCaptureProperty_FFMPEG(struct CvCapture_FFMPEG* cap, int prop);
OPENCV_FFMPEG_API double cvGetCaptureProperty_FFMPEG_2(struct CvCapture_FFMPEG_2* cap, int prop);
OPENCV_FFMPEG_API int cvGrabFrame_FFMPEG(struct CvCapture_FFMPEG* cap);
OPENCV_FFMPEG_API int cvGrabFrame_FFMPEG_2(struct CvCapture_FFMPEG_2* cap);
OPENCV_FFMPEG_API int cvRetrieveFrame_FFMPEG(struct CvCapture_FFMPEG* capture, unsigned char** data,
int* step, int* width, int* height, int* cn);
OPENCV_FFMPEG_API int cvRetrieveFrame_FFMPEG_2(struct CvCapture_FFMPEG_2* capture, unsigned char** data,
int* step, int* width, int* height, int* cn);
OPENCV_FFMPEG_API void cvReleaseCapture_FFMPEG(struct CvCapture_FFMPEG** cap);
OPENCV_FFMPEG_API void cvReleaseCapture_FFMPEG_2(struct CvCapture_FFMPEG_2** cap);
OPENCV_FFMPEG_API struct CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG(const char* filename,
int fourcc, double fps, int width, int height, int isColor );
OPENCV_FFMPEG_API struct CvVideoWriter_FFMPEG_2* cvCreateVideoWriter_FFMPEG_2(const char* filename,
int fourcc, double fps, int width, int height, int isColor );
OPENCV_FFMPEG_API int cvWriteFrame_FFMPEG(struct CvVideoWriter_FFMPEG* writer, const unsigned char* data,
int step, int width, int height, int cn, int origin);
OPENCV_FFMPEG_API void cvReleaseVideoWriter_FFMPEG(struct CvVideoWriter_FFMPEG** writer);
typedef void* (*CvCreateFileCapture_Plugin)( const char* filename );
typedef void* (*CvCreateCameraCapture_Plugin)( int index );
typedef int (*CvGrabFrame_Plugin)( void* capture_handle );
typedef int (*CvRetrieveFrame_Plugin)( void* capture_handle, unsigned char** data, int* step,
int* width, int* height, int* cn );
typedef int (*CvSetCaptureProperty_Plugin)( void* capture_handle, int prop_id, double value );
typedef double (*CvGetCaptureProperty_Plugin)( void* capture_handle, int prop_id );
typedef void (*CvReleaseCapture_Plugin)( void** capture_handle );
typedef void* (*CvCreateVideoWriter_Plugin)( const char* filename, int fourcc,
double fps, int width, int height, int iscolor );
typedef int (*CvWriteFrame_Plugin)( void* writer_handle, const unsigned char* data, int step,
int width, int height, int cn, int origin);
typedef void (*CvReleaseVideoWriter_Plugin)( void** writer );
/*
* For CUDA encoder
*/
OPENCV_FFMPEG_API struct OutputMediaStream_FFMPEG* create_OutputMediaStream_FFMPEG(const char* fileName, int width, int height, double fps);
OPENCV_FFMPEG_API void release_OutputMediaStream_FFMPEG(struct OutputMediaStream_FFMPEG* stream);
OPENCV_FFMPEG_API void write_OutputMediaStream_FFMPEG(struct OutputMediaStream_FFMPEG* stream, unsigned char* data, int size, int keyFrame);
typedef struct OutputMediaStream_FFMPEG* (*Create_OutputMediaStream_FFMPEG_Plugin)(const char* fileName, int width, int height, double fps);
typedef void (*Release_OutputMediaStream_FFMPEG_Plugin)(struct OutputMediaStream_FFMPEG* stream);
typedef void (*Write_OutputMediaStream_FFMPEG_Plugin)(struct OutputMediaStream_FFMPEG* stream, unsigned char* data, int size, int keyFrame);
/*
* For CUDA decoder
*/
OPENCV_FFMPEG_API struct InputMediaStream_FFMPEG* create_InputMediaStream_FFMPEG(const char* fileName, int* codec, int* chroma_format, int* width, int* height);
OPENCV_FFMPEG_API void release_InputMediaStream_FFMPEG(struct InputMediaStream_FFMPEG* stream);
OPENCV_FFMPEG_API int read_InputMediaStream_FFMPEG(struct InputMediaStream_FFMPEG* stream, unsigned char** data, int* size, int* endOfFile);
typedef struct InputMediaStream_FFMPEG* (*Create_InputMediaStream_FFMPEG_Plugin)(const char* fileName, int* codec, int* chroma_format, int* width, int* height);
typedef void (*Release_InputMediaStream_FFMPEG_Plugin)(struct InputMediaStream_FFMPEG* stream);
typedef int (*Read_InputMediaStream_FFMPEG_Plugin)(struct InputMediaStream_FFMPEG* stream, unsigned char** data, int* size, int* endOfFile);
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,763 +0,0 @@
////////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//
//
// The code has been contributed by Vladimir N. Litvinenko on 2012 Jul
// mailto:vladimir.litvinenko@codepaint.ru
//
#include "precomp.hpp"
#include <GigEVisionSDK.h>
#include <GigEVisionSDK.cpp>
#ifdef WIN32
#include <io.h>
#else
#include <stdio.h>
#endif
#ifdef NDEBUG
#define CV_WARN(message)
#else
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
#define QTGIG_HEARTBEAT_TIME (12000.0)
#define QTGIG_MAX_WAIT_TIME (2.0)
#define QTGIG_IMG_WAIT_TIME (3.0)
/*----------------------------------------------------------------------------*/
/**
\internal
\fn bool wrprInitGigEVisionAPI();
\brief Wrapper to GigEVisionAPI function gige::InitGigEVisionAPI ()
\return true -- success
See \a wrprExitGigEVisionAPI
*/
bool
wrprInitGigEVisionAPI()
{
CV_FUNCNAME("wrprInitGigEVisionAPI");
__BEGIN__;
try {
gige::InitGigEVisionAPI ();
} catch(...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: initialization (InitGigEVisionAPI()) failed.\n");
}
__END__;
return true;
}
/*----------------------------------------------------------------------------*/
/**
\internal
\fn void wrprExitGigEVisionAPI()
\brief Wrapper to GigEVisionAPI function gige::ExitGigEVisionAPI ()
\return true -- success
See \a wrprInitGigEVisionAPI
*/
bool
wrprExitGigEVisionAPI()
{
CV_FUNCNAME("wrprExitGigEVisionAPI");
__BEGIN__;
try {
gige::ExitGigEVisionAPI ();
} catch(...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: finalization (ExitGigEVisionAPI()) failed.\n");
return false;
}
__END__;
return true;
}
/*----------------------------------------------------------------------------*/
/**
\internal
\fn gige::IGigEVisionAPI wrprGetGigEVisionAPI()
\brief Wrapper to GigEVisionAPI function gige::GetGigEVisionAPI ()
\return item of gige::IGigEVisionAPI type
See \a wrprInitGigEVisionAPI, \a gige::IGigEVisionAPI
*/
gige::IGigEVisionAPI
wrprGetGigEVisionAPI()
{
gige::IGigEVisionAPI b_ret = 0;
CV_FUNCNAME("wrprGetGigEVisionAPI");
__BEGIN__;
try {
b_ret = gige::GetGigEVisionAPI ();
} catch(...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: API instance (from GetGigEVisionAPI()) failed.\n");
}
__END__;
return b_ret;
}
/*----------------------------------------------------------------------------*/
/**
\internal
\fn bool wrprUnregisterCallback( const gige::IGigEVisionAPI* api, gige::ICallbackEvent* eventHandler)
\brief Wrapper to GigEVisionAPI function
\param api
\param eventHandler
\return true - succsess, else - false
See \a wrprInitGigEVisionAPI, \a gige::IGigEVisionAPI
*/
bool
wrprUnregisterCallback( const gige::IGigEVisionAPI* api, gige::ICallbackEvent* eventHandler)
{
bool b_ret = api != NULL;
if(b_ret) b_ret = api->IsValid ();
CV_FUNCNAME("wrprUnregisterCallback");
__BEGIN__;
if(b_ret)
{
if(eventHandler != NULL)
{
try {
b_ret = ((gige::IGigEVisionAPIInterface*)api)->UnregisterCallback (eventHandler);
} catch(...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: API unregister callback function (from UnregisterCallback()) failed.\n");
b_ret = false;
}
}
}
__END__;
return (b_ret);
}
/*----------------------------------------------------------------------------*/
/**
\internal
\fn bool wrprDeviceIsConnect( gige::IDevice& device )
\brief Wrapper to GigEVisionAPI function IDevice::IsConnected()
\param device - selected device
\return true - device connected
*/
bool
wrprDeviceIsConnect( gige::IDevice& device )
{
bool b_ret = device != NULL;
CV_FUNCNAME("wrprDeviceIsConnect");
__BEGIN__;
if(b_ret)
{
try {
b_ret = device->IsConnected ();
} catch (...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: API device connection state (from IsConnected()) failed.\n");
b_ret = false;
}
}
__END__;
return (b_ret);
}
/*----------------------------------------------------------------------------*/
/**
\internal
\fn bool wrprDeviceIsValid( gige::IDevice& device )
\brief Wrapper to GigEVisionAPI function IDevice::Connect()
\param device - selected device
\return true - device valid
*/
bool
wrprDeviceIsValid( gige::IDevice& device )
{
bool b_ret = device != NULL;
CV_FUNCNAME("wrprDeviceIsConnect");
__BEGIN__;
if(b_ret)
{
try {
b_ret = device.IsValid ();
} catch (...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: API device validation state (from IsValid()) failed.\n");
b_ret = false;
}
}
__END__;
return (b_ret);
}
/*----------------------------------------------------------------------------*/
/**
\internal
\fn bool wrprDeviceDisconnect ( gige::IDevice& device )
\brief Wrapper to GigEVisionAPI function IDevice::Disconnect()
\param device - selected device
\return true - device valid
*/
bool
wrprDeviceDisconnect ( gige::IDevice& device )
{
bool b_ret = device != NULL;
CV_FUNCNAME("wrprDeviceDisconnect");
__BEGIN__;
if(b_ret)
{
try {
device->Disconnect ();
} catch (...) {
CV_ERROR(CV_StsError, "GigEVisionAPI: API device disconnect (from Disconnect()) failed.\n");
b_ret = false;
}
}
__END__;
return (b_ret);
}
/*----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
/**
\internal
\class CvCaptureCAM_Giganetix
\brief Capturing video from camera via Smartec Giganetix (use GigEVisualSDK library).
*/
class CvCaptureCAM_Giganetix : public CvCapture
{
public:
CvCaptureCAM_Giganetix();
virtual ~CvCaptureCAM_Giganetix();
virtual bool open( int index );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain()
{
return CV_CAP_GIGANETIX;
}
bool start ();
bool stop ();
protected:
void init ();
void grabImage ();
gige::IGigEVisionAPI m_api;
bool m_api_on;
gige::IDevice m_device;
bool m_active;
IplImage* m_raw_image;
UINT32 m_rawImagePixelType;
bool m_monocrome;
};
/*----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
void
CvCaptureCAM_Giganetix::init ()
{
m_monocrome = m_active = m_api_on = false;
m_api = 0;
m_device = 0;
m_raw_image = 0;
m_rawImagePixelType = 0;
}
/*----------------------------------------------------------------------------*/
CvCaptureCAM_Giganetix::CvCaptureCAM_Giganetix()
{
init ();
m_api_on = wrprInitGigEVisionAPI ();
if(m_api_on)
{
if((m_api = wrprGetGigEVisionAPI ()) != NULL)
{
m_api->SetHeartbeatTime (QTGIG_HEARTBEAT_TIME);
}
}
}
/*----------------------------------------------------------------------------*/
CvCaptureCAM_Giganetix::~CvCaptureCAM_Giganetix()
{
close();
}
/*----------------------------------------------------------------------------*/
void
CvCaptureCAM_Giganetix::close()
{
stop ();
(void)wrprDeviceDisconnect(m_device);
(void)wrprExitGigEVisionAPI ();
if(m_raw_image) cvReleaseImageHeader(&m_raw_image);
init ();
}
/*----------------------------------------------------------------------------*/
bool
CvCaptureCAM_Giganetix::open( int index )
{
bool b_ret = m_api_on;
CV_FUNCNAME("CvCaptureCAM_Giganetix::open");
__BEGIN__;
if(b_ret)
b_ret = m_api.IsValid ();
if(b_ret )
{
m_api->FindAllDevices (QTGIG_MAX_WAIT_TIME);
//TODO - serch device as DevicesList member
gige::DevicesList DevicesList = m_api->GetAllDevices ();
m_device = 0;
b_ret = false;
for (int i = 0; i < (int) DevicesList.size() && !b_ret; i++)
{
if((b_ret = i == index))
{
m_device = DevicesList[i];
b_ret = m_device->Connect ();
if(b_ret)
{
b_ret =
m_device->SetStringNodeValue("AcquisitionStatusSelector", "AcquisitionActive")
&&
m_device->SetStringNodeValue ("TriggerMode", "Off")
&&
m_device->SetStringNodeValue ("AcquisitionMode", "Continuous")
&&
m_device->SetIntegerNodeValue ("AcquisitionFrameCount", 20)
;
}
}
} // for
}
if(!b_ret)
{
CV_ERROR(CV_StsError, "Giganetix: Error cannot find camera\n");
close ();
} else {
start ();
}
__END__;
return b_ret;
}
/*----------------------------------------------------------------------------*/
void
CvCaptureCAM_Giganetix::grabImage ()
{
CV_FUNCNAME("CvCaptureCAM_Giganetix::grabImage");
__BEGIN__;
if(wrprDeviceIsValid(m_device) && wrprDeviceIsConnect(m_device))
{
if(!m_device->IsBufferEmpty ())
{
gige::IImageInfo imageInfo;
m_device->GetImageInfo (&imageInfo);
assert(imageInfo.IsValid());
if (m_device->GetPendingImagesCount() == 1)
{
UINT32 newPixelType;
UINT32 newWidth, newHeight;
imageInfo->GetPixelType(newPixelType);
imageInfo->GetSize(newWidth, newHeight);
//TODO - validation of image exists
bool b_validation = m_raw_image != NULL;
if(b_validation)
{
b_validation =
m_raw_image->imageSize == (int)(imageInfo->GetRawDataSize ())
&&
m_rawImagePixelType == newPixelType;
} else {
if(m_raw_image) cvReleaseImageHeader(&m_raw_image);
}
m_rawImagePixelType = newPixelType;
m_monocrome = GvspGetBitsPerPixel((GVSP_PIXEL_TYPES)newPixelType) == IPL_DEPTH_8U;
try {
if (m_monocrome)
{
//TODO - For Mono & Color BayerRGB raw pixel types
if (!b_validation)
{
m_raw_image = cvCreateImageHeader (cvSize((int)newWidth, (int)newHeight),IPL_DEPTH_8U,1);
m_raw_image->origin = IPL_ORIGIN_TL;
m_raw_image->dataOrder = IPL_DATA_ORDER_PIXEL;
m_raw_image->widthStep = newWidth;
}
// Copy image.
// ::memcpy(m_raw_image->imageData, imageInfo->GetRawData (), imageInfo->GetRawDataSize ());
//TODO - Set pointer to image !
m_raw_image->imageData = (char*)(imageInfo->GetRawData ());
}
if (!m_monocrome && newPixelType == GVSP_PIX_RGB8_PACKED)
{
//TODO - 24 bit RGB color image.
if (!b_validation)
{
m_raw_image = cvCreateImageHeader (cvSize((int)newWidth, (int)newHeight), IPL_DEPTH_32F, 3);
m_raw_image->origin = IPL_ORIGIN_TL;
m_raw_image->dataOrder = IPL_DATA_ORDER_PIXEL;
m_raw_image->widthStep = newWidth * 3;
}
m_raw_image->imageData = (char*)(imageInfo->GetRawData ());
}
} catch (...) {
CV_ERROR(CV_StsError, "Giganetix: failed to queue a buffer on device\n");
close ();
}
} else {
//TODO - all other pixel types
m_raw_image = 0;
CV_WARN("Giganetix: Undefined image pixel type\n");
}
m_device->PopImage (imageInfo);
m_device->ClearImageBuffer ();
}
}
__END__;
}
/*----------------------------------------------------------------------------*/
bool
CvCaptureCAM_Giganetix::start ()
{
CV_FUNCNAME("CvCaptureCAM_Giganetix::start");
__BEGIN__;
m_active = wrprDeviceIsValid(m_device) && wrprDeviceIsConnect(m_device);
if(m_active)
{
(void)m_device->SetIntegerNodeValue("TLParamsLocked", 1);
(void)m_device->CommandNodeExecute("AcquisitionStart");
m_active = m_device->GetBooleanNodeValue("AcquisitionStatus", m_active);
}
if(!m_active)
{
CV_ERROR(CV_StsError, "Giganetix: Cannot open camera\n");
close ();
}
__END__;
return m_active;
}
/*----------------------------------------------------------------------------*/
bool
CvCaptureCAM_Giganetix::stop ()
{
if (!m_active) return true;
CV_FUNCNAME("CvCaptureCAM_Giganetix::stop");
__BEGIN__;
if(wrprDeviceIsValid(m_device) && wrprDeviceIsConnect(m_device))
{
(void)m_device->GetBooleanNodeValue("AcquisitionStatus", m_active);
if(m_active)
{
(void)m_device->CommandNodeExecute("AcquisitionStop");
(void)m_device->SetIntegerNodeValue("TLParamsLocked", 0);
m_device->ClearImageBuffer ();
(void)m_device->GetBooleanNodeValue("AcquisitionStatus", m_active);
}
}
if(m_active)
{
CV_ERROR(CV_StsError, "Giganetix: Improper closure of the camera\n");
close ();
}
__END__;
return !m_active;
}
/*----------------------------------------------------------------------------*/
bool
CvCaptureCAM_Giganetix::grabFrame()
{
bool b_ret =
wrprDeviceIsValid(m_device)
&&
wrprDeviceIsConnect(m_device);
if(b_ret) grabImage ();
return b_ret;
}
/*----------------------------------------------------------------------------*/
IplImage*
CvCaptureCAM_Giganetix::retrieveFrame(int)
{
return (
wrprDeviceIsValid(m_device) && wrprDeviceIsConnect(m_device) ?
m_raw_image :
NULL
);
}
/*----------------------------------------------------------------------------*/
double
CvCaptureCAM_Giganetix::getProperty( int property_id )
{
double d_ret = -1.0;
INT64 i;
if(wrprDeviceIsConnect(m_device))
{
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
m_device->GetIntegerNodeValue ("Width", i);
d_ret = i;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
m_device->GetIntegerNodeValue ("Height", i);
d_ret = i;
break;
case CV_CAP_PROP_GIGA_FRAME_OFFSET_X:
m_device->GetIntegerNodeValue ("OffsetX", i);
d_ret = i;
break;
case CV_CAP_PROP_GIGA_FRAME_OFFSET_Y:
m_device->GetIntegerNodeValue ("OffsetY", i);
d_ret = i;
break;
case CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX:
m_device->GetIntegerNodeValue ("WidthMax", i);
d_ret = i;
break;
case CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX:
m_device->GetIntegerNodeValue ("HeightMax", i);
d_ret = i;
break;
case CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH:
m_device->GetIntegerNodeValue ("SensorWidth", i);
d_ret = i;
break;
case CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH:
m_device->GetIntegerNodeValue ("SensorHeight", i);
d_ret = i;
break;
case CV_CAP_PROP_FRAME_COUNT:
m_device->GetIntegerNodeValue ("AcquisitionFrameCount", i);
d_ret = i;
break;
case CV_CAP_PROP_EXPOSURE:
m_device->GetFloatNodeValue ("ExposureTime",d_ret);
break;
case CV_CAP_PROP_GAIN :
m_device->GetFloatNodeValue ("Gain",d_ret);
break;
case CV_CAP_PROP_TRIGGER :
bool b;
m_device->GetBooleanNodeValue ("TriggerMode",b);
d_ret = (double)b;
break;
case CV_CAP_PROP_TRIGGER_DELAY :
m_device->GetFloatNodeValue ("TriggerDelay",d_ret);
break;
default : ;
}
}
return d_ret;
}
/*----------------------------------------------------------------------------*/
bool
CvCaptureCAM_Giganetix::setProperty( int property_id, double value )
{
bool b_ret = wrprDeviceIsConnect(m_device);
if(b_ret)
{
bool b_val = m_active;
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
stop ();
b_ret = m_device->SetIntegerNodeValue ("Width", (INT64)value);
if(b_val) start ();
break;
case CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX:
stop ();
b_ret = m_device->SetIntegerNodeValue ("WidthMax", (INT64)value);
if(b_val) start ();
break;
case CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH:
stop ();
b_ret = m_device->SetIntegerNodeValue ("SensorWidth", (INT64)value);
if(b_val) start ();
break;
case CV_CAP_PROP_FRAME_HEIGHT:
stop ();
b_ret = m_device->SetIntegerNodeValue ("Height", (INT64)value);
if(b_val) start ();
break;
case CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX:
stop ();
b_ret = m_device->SetIntegerNodeValue ("HeightMax", (INT64)value);
if(b_val) start ();
break;
case CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH:
stop ();
b_ret = m_device->SetIntegerNodeValue ("SensorHeight", (INT64)value);
if(b_val) start ();
break;
case CV_CAP_PROP_GIGA_FRAME_OFFSET_X: {
INT64 w, wmax, val = (INT64)value;
if((b_ret = m_device->GetIntegerNodeValue ("Width", w)))
if((b_ret = m_device->GetIntegerNodeValue ("WidthMax", wmax)))
b_ret = m_device->SetIntegerNodeValue ("OffsetX", (val + w) > wmax ? (wmax - w) : val);
} break;
case CV_CAP_PROP_GIGA_FRAME_OFFSET_Y: {
INT64 h, hmax, val = (INT64)value;
if((b_ret = m_device->GetIntegerNodeValue ("Height", h)))
if((b_ret = m_device->GetIntegerNodeValue ("HeightMax", hmax)))
b_ret = m_device->SetIntegerNodeValue ("OffsetY", (val + h) > hmax ? (hmax - h) : val);
b_ret = m_device->SetIntegerNodeValue ("OffsetY", (INT64)value);
}
break;
case CV_CAP_PROP_EXPOSURE:
b_ret = m_device->SetFloatNodeValue ("ExposureTime",value);
break;
case CV_CAP_PROP_GAIN :
b_ret = m_device->SetFloatNodeValue ("Gain",value);
break;
case CV_CAP_PROP_TRIGGER :
b_ret = m_device->SetBooleanNodeValue ("TriggerMode",(bool)value);
break;
case CV_CAP_PROP_TRIGGER_DELAY :
stop ();
b_ret = m_device->SetFloatNodeValue ("TriggerDelay",value);
if(b_val) start ();
break;
default:
b_ret = false;
}
}
return b_ret;
}
/*----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
CvCapture*
cvCreateCameraCapture_Giganetix( int index )
{
CvCaptureCAM_Giganetix* capture = new CvCaptureCAM_Giganetix;
if (!(capture->open( index )))
{
delete capture;
capture = NULL;
}
return ((CvCapture*)capture);
}
/*----------------------------------------------------------------------------*/

File diff suppressed because it is too large Load Diff

View File

@@ -1,374 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Nils Hasler, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Author: Nils Hasler <hasler@mpi-inf.mpg.de>
//
// Max-Planck-Institut Informatik
//
// capture video from a sequence of images
// the filename when opening can either be a printf pattern such as
// video%04d.png or the first frame of the sequence i.e. video0001.png
//
#include "precomp.hpp"
#include <sys/stat.h>
#ifdef NDEBUG
#define CV_WARN(message)
#else
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
#ifndef _MAX_PATH
#define _MAX_PATH 1024
#endif
class CvCapture_Images : public CvCapture
{
public:
CvCapture_Images()
{
filename = 0;
currentframe = firstframe = 0;
length = 0;
frame = 0;
}
virtual ~CvCapture_Images()
{
close();
}
virtual bool open(const char* _filename);
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
protected:
char* filename; // actually a printf-pattern
unsigned currentframe;
unsigned firstframe; // number of first frame
unsigned length; // length of sequence
IplImage* frame;
};
void CvCapture_Images::close()
{
if( filename )
{
free(filename);
filename = 0;
}
currentframe = firstframe = 0;
length = 0;
cvReleaseImage( &frame );
}
bool CvCapture_Images::grabFrame()
{
char str[_MAX_PATH];
sprintf(str, filename, firstframe + currentframe);
cvReleaseImage(&frame);
frame = cvLoadImage(str, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if( frame )
currentframe++;
return frame != 0;
}
IplImage* CvCapture_Images::retrieveFrame(int)
{
return frame;
}
double CvCapture_Images::getProperty(int id)
{
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
CV_WARN("collections of images don't have framerates\n");
return 0;
case CV_CAP_PROP_POS_FRAMES:
return currentframe;
case CV_CAP_PROP_POS_AVI_RATIO:
return (double)currentframe / (double)(length - 1);
case CV_CAP_PROP_FRAME_WIDTH:
return frame ? frame->width : 0;
case CV_CAP_PROP_FRAME_HEIGHT:
return frame ? frame->height : 0;
case CV_CAP_PROP_FPS:
CV_WARN("collections of images don't have framerates\n");
return 1;
case CV_CAP_PROP_FOURCC:
CV_WARN("collections of images don't have 4-character codes\n");
return 0;
}
return 0;
}
bool CvCapture_Images::setProperty(int id, double value)
{
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping\n");
value = 0;
}
if(value >= length) {
CV_WARN("seeking beyond end of sequence - clamping\n");
value = length - 1;
}
currentframe = cvRound(value);
return true;
case CV_CAP_PROP_POS_AVI_RATIO:
if(value > 1) {
CV_WARN("seeking beyond end of sequence - clamping\n");
value = 1;
} else if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping\n");
value = 0;
}
currentframe = cvRound((length - 1) * value);
return true;
}
CV_WARN("unknown/unhandled property\n");
return false;
}
static char* icvExtractPattern(const char *filename, unsigned *offset)
{
char *name = (char *)filename;
if( !filename )
return 0;
// check whether this is a valid image sequence filename
char *at = strchr(name, '%');
if(at)
{
int dummy;
if(sscanf(at + 1, "%ud", &dummy) != 1)
return 0;
name = strdup(filename);
}
else // no pattern filename was given - extract the pattern
{
at = name;
// ignore directory names
char *slash = strrchr(at, '/');
if (slash) at = slash + 1;
#ifdef _WIN32
slash = strrchr(at, '\\');
if (slash) at = slash + 1;
#endif
while (*at && !isdigit(*at)) at++;
if(!*at)
return 0;
sscanf(at, "%u", offset);
int size = (int)strlen(filename) + 20;
name = (char *)malloc(size);
strncpy(name, filename, at - filename);
name[at - filename] = 0;
strcat(name, "%0");
int i;
char *extension;
for(i = 0, extension = at; isdigit(at[i]); i++, extension++)
;
char places[10];
sprintf(places, "%dd", i);
strcat(name, places);
strcat(name, extension);
}
return name;
}
bool CvCapture_Images::open(const char * _filename)
{
unsigned offset = 0;
close();
filename = icvExtractPattern(_filename, &offset);
if(!filename)
return false;
// determine the length of the sequence
length = 0;
char str[_MAX_PATH];
for(;;)
{
sprintf(str, filename, offset + length);
struct stat s;
if(stat(str, &s))
{
if(length == 0 && offset == 0) // allow starting with 0 or 1
{
offset++;
continue;
}
}
if(!cvHaveImageReader(str))
break;
length++;
}
if(length == 0)
{
close();
return false;
}
firstframe = offset;
return true;
}
CvCapture* cvCreateFileCapture_Images(const char * filename)
{
CvCapture_Images* capture = new CvCapture_Images;
if( capture->open(filename) )
return capture;
delete capture;
return 0;
}
//
//
// image sequence writer
//
//
class CvVideoWriter_Images : public CvVideoWriter
{
public:
CvVideoWriter_Images()
{
filename = 0;
currentframe = 0;
}
virtual ~CvVideoWriter_Images() { close(); }
virtual bool open( const char* _filename );
virtual void close();
virtual bool writeFrame( const IplImage* );
protected:
char* filename;
unsigned currentframe;
};
bool CvVideoWriter_Images::writeFrame( const IplImage* image )
{
char str[_MAX_PATH];
sprintf(str, filename, currentframe);
int ret = cvSaveImage(str, image);
currentframe++;
return ret > 0;
}
void CvVideoWriter_Images::close()
{
if( filename )
{
free( filename );
filename = 0;
}
currentframe = 0;
}
bool CvVideoWriter_Images::open( const char* _filename )
{
unsigned offset = 0;
close();
filename = icvExtractPattern(_filename, &offset);
if(!filename)
return false;
char str[_MAX_PATH];
sprintf(str, filename, 0);
if(!cvHaveImageWriter(str))
{
close();
return false;
}
currentframe = offset;
return true;
}
CvVideoWriter* cvCreateVideoWriter_Images( const char* filename )
{
CvVideoWriter_Images *writer = new CvVideoWriter_Images;
if( writer->open( filename ))
return writer;
delete writer;
return 0;
}

View File

@@ -1,634 +0,0 @@
#ifdef HAVE_INTELPERC
#include "cap_intelperc.hpp"
namespace cv
{
///////////////// IntelPerCStreamBase //////////////////
IntelPerCStreamBase::IntelPerCStreamBase()
: m_profileIdx(-1)
, m_frameIdx(0)
, m_timeStampStartNS(0)
{
}
IntelPerCStreamBase::~IntelPerCStreamBase()
{
}
bool IntelPerCStreamBase::isValid()
{
return (m_device.IsValid() && m_stream.IsValid());
}
bool IntelPerCStreamBase::grabFrame()
{
if (!m_stream.IsValid())
return false;
if (-1 == m_profileIdx)
{
if (!setProperty(CV_CAP_PROP_INTELPERC_PROFILE_IDX, 0))
return false;
}
PXCSmartSP sp;
m_pxcImage.ReleaseRef();
if (PXC_STATUS_NO_ERROR > m_stream->ReadStreamAsync(&m_pxcImage, &sp))
return false;
if (PXC_STATUS_NO_ERROR > sp->Synchronize())
return false;
if (0 == m_timeStampStartNS)
m_timeStampStartNS = m_pxcImage->QueryTimeStamp();
m_timeStamp = (double)((m_pxcImage->QueryTimeStamp() - m_timeStampStartNS) / 10000);
m_frameIdx++;
return true;
}
int IntelPerCStreamBase::getProfileIDX() const
{
return m_profileIdx;
}
double IntelPerCStreamBase::getProperty(int propIdx)
{
double ret = 0.0;
switch (propIdx)
{
case CV_CAP_PROP_INTELPERC_PROFILE_COUNT:
ret = (double)m_profiles.size();
break;
case CV_CAP_PROP_FRAME_WIDTH :
if ((0 <= m_profileIdx) && (m_profileIdx < m_profiles.size()))
ret = (double)m_profiles[m_profileIdx].imageInfo.width;
break;
case CV_CAP_PROP_FRAME_HEIGHT :
if ((0 <= m_profileIdx) && (m_profileIdx < m_profiles.size()))
ret = (double)m_profiles[m_profileIdx].imageInfo.height;
break;
case CV_CAP_PROP_FPS :
if ((0 <= m_profileIdx) && (m_profileIdx < m_profiles.size()))
{
ret = ((double)m_profiles[m_profileIdx].frameRateMin.numerator / (double)m_profiles[m_profileIdx].frameRateMin.denominator
+ (double)m_profiles[m_profileIdx].frameRateMax.numerator / (double)m_profiles[m_profileIdx].frameRateMax.denominator) / 2.0;
}
break;
case CV_CAP_PROP_POS_FRAMES:
ret = (double)m_frameIdx;
break;
case CV_CAP_PROP_POS_MSEC:
ret = m_timeStamp;
break;
};
return ret;
}
bool IntelPerCStreamBase::setProperty(int propIdx, double propVal)
{
bool isSet = false;
switch (propIdx)
{
case CV_CAP_PROP_INTELPERC_PROFILE_IDX:
{
int propValInt = (int)propVal;
if (0 > propValInt)
{
m_profileIdx = propValInt;
isSet = true;
}
else if (propValInt < m_profiles.size())
{
if (m_profileIdx != propValInt)
{
m_profileIdx = propValInt;
if (m_stream.IsValid())
m_stream->SetProfile(&m_profiles[m_profileIdx]);
m_frameIdx = 0;
m_timeStampStartNS = 0;
}
isSet = true;
}
}
break;
};
return isSet;
}
bool IntelPerCStreamBase::initDevice(PXCSession *session)
{
if (NULL == session)
return false;
pxcStatus sts = PXC_STATUS_NO_ERROR;
PXCSession::ImplDesc templat;
memset(&templat,0,sizeof(templat));
templat.group = PXCSession::IMPL_GROUP_SENSOR;
templat.subgroup= PXCSession::IMPL_SUBGROUP_VIDEO_CAPTURE;
for (int modidx = 0; PXC_STATUS_NO_ERROR <= sts; modidx++)
{
PXCSession::ImplDesc desc;
sts = session->QueryImpl(&templat, modidx, &desc);
if (PXC_STATUS_NO_ERROR > sts)
break;
PXCSmartPtr<PXCCapture> capture;
sts = session->CreateImpl<PXCCapture>(&desc, &capture);
if (!capture.IsValid())
continue;
/* enumerate devices */
for (int devidx = 0; PXC_STATUS_NO_ERROR <= sts; devidx++)
{
PXCSmartPtr<PXCCapture::Device> device;
sts = capture->CreateDevice(devidx, &device);
if (PXC_STATUS_NO_ERROR <= sts)
{
m_device = device.ReleasePtr();
return true;
}
}
}
return false;
}
void IntelPerCStreamBase::initStreamImpl(PXCImage::ImageType type)
{
if (!m_device.IsValid())
return;
pxcStatus sts = PXC_STATUS_NO_ERROR;
/* enumerate streams */
for (int streamidx = 0; PXC_STATUS_NO_ERROR <= sts; streamidx++)
{
PXCCapture::Device::StreamInfo sinfo;
sts = m_device->QueryStream(streamidx, &sinfo);
if (PXC_STATUS_NO_ERROR > sts)
break;
if (PXCCapture::VideoStream::CUID != sinfo.cuid)
continue;
if (type != sinfo.imageType)
continue;
sts = m_device->CreateStream<PXCCapture::VideoStream>(streamidx, &m_stream);
if (PXC_STATUS_NO_ERROR == sts)
break;
m_stream.ReleaseRef();
}
}
bool IntelPerCStreamBase::validProfile(const PXCCapture::VideoStream::ProfileInfo& /*pinfo*/)
{
return true;
}
void IntelPerCStreamBase::enumProfiles()
{
m_profiles.clear();
if (!m_stream.IsValid())
return;
pxcStatus sts = PXC_STATUS_NO_ERROR;
for (int profidx = 0; PXC_STATUS_NO_ERROR <= sts; profidx++)
{
PXCCapture::VideoStream::ProfileInfo pinfo;
sts = m_stream->QueryProfile(profidx, &pinfo);
if (PXC_STATUS_NO_ERROR > sts)
break;
if (validProfile(pinfo))
m_profiles.push_back(pinfo);
}
}
///////////////// IntelPerCStreamImage //////////////////
IntelPerCStreamImage::IntelPerCStreamImage()
{
}
IntelPerCStreamImage::~IntelPerCStreamImage()
{
}
bool IntelPerCStreamImage::initStream(PXCSession *session)
{
if (!initDevice(session))
return false;
initStreamImpl(PXCImage::IMAGE_TYPE_COLOR);
if (!m_stream.IsValid())
return false;
enumProfiles();
return true;
}
double IntelPerCStreamImage::getProperty(int propIdx)
{
switch (propIdx)
{
case CV_CAP_PROP_BRIGHTNESS:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_BRIGHTNESS, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_CONTRAST:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_CONTRAST, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_SATURATION:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_SATURATION, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_HUE:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_HUE, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_GAMMA:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_GAMMA, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_SHARPNESS:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_SHARPNESS, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_GAIN:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_GAIN, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_BACKLIGHT:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_BACK_LIGHT_COMPENSATION, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_EXPOSURE:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_COLOR_EXPOSURE, &fret))
return (double)fret;
return 0.0;
}
break;
//Add image stream specific properties
}
return IntelPerCStreamBase::getProperty(propIdx);
}
bool IntelPerCStreamImage::setProperty(int propIdx, double propVal)
{
switch (propIdx)
{
case CV_CAP_PROP_BRIGHTNESS:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_BRIGHTNESS, (float)propVal));
}
break;
case CV_CAP_PROP_CONTRAST:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_CONTRAST, (float)propVal));
}
break;
case CV_CAP_PROP_SATURATION:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_SATURATION, (float)propVal));
}
break;
case CV_CAP_PROP_HUE:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_HUE, (float)propVal));
}
break;
case CV_CAP_PROP_GAMMA:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_GAMMA, (float)propVal));
}
break;
case CV_CAP_PROP_SHARPNESS:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_SHARPNESS, (float)propVal));
}
break;
case CV_CAP_PROP_GAIN:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_GAIN, (float)propVal));
}
break;
case CV_CAP_PROP_BACKLIGHT:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_BACK_LIGHT_COMPENSATION, (float)propVal));
}
break;
case CV_CAP_PROP_EXPOSURE:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_COLOR_EXPOSURE, (float)propVal));
}
break;
//Add image stream specific properties
}
return IntelPerCStreamBase::setProperty(propIdx, propVal);
}
bool IntelPerCStreamImage::retrieveAsOutputArray(cv::OutputArray image)
{
if (!m_pxcImage.IsValid())
return false;
PXCImage::ImageInfo info;
m_pxcImage->QueryInfo(&info);
PXCImage::ImageData data;
m_pxcImage->AcquireAccess(PXCImage::ACCESS_READ, PXCImage::COLOR_FORMAT_RGB24, &data);
if (PXCImage::SURFACE_TYPE_SYSTEM_MEMORY != data.type)
return false;
cv::Mat temp(info.height, info.width, CV_8UC3, data.planes[0], data.pitches[0]);
temp.copyTo(image);
m_pxcImage->ReleaseAccess(&data);
return true;
}
///////////////// IntelPerCStreamDepth //////////////////
IntelPerCStreamDepth::IntelPerCStreamDepth()
{
}
IntelPerCStreamDepth::~IntelPerCStreamDepth()
{
}
bool IntelPerCStreamDepth::initStream(PXCSession *session)
{
if (!initDevice(session))
return false;
initStreamImpl(PXCImage::IMAGE_TYPE_DEPTH);
if (!m_stream.IsValid())
return false;
enumProfiles();
return true;
}
double IntelPerCStreamDepth::getProperty(int propIdx)
{
switch (propIdx)
{
case CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_DEPTH_LOW_CONFIDENCE_VALUE, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_DEPTH_SATURATION_VALUE, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD:
{
if (!m_device.IsValid())
return 0.0;
float fret = 0.0f;
if (PXC_STATUS_NO_ERROR == m_device->QueryProperty(PXCCapture::Device::PROPERTY_DEPTH_CONFIDENCE_THRESHOLD, &fret))
return (double)fret;
return 0.0;
}
break;
case CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ:
{
if (!m_device.IsValid())
return 0.0f;
PXCPointF32 ptf;
if (PXC_STATUS_NO_ERROR == m_device->QueryPropertyAsPoint(PXCCapture::Device::PROPERTY_DEPTH_FOCAL_LENGTH, &ptf))
return (double)ptf.x;
return 0.0;
}
break;
case CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT:
{
if (!m_device.IsValid())
return 0.0f;
PXCPointF32 ptf;
if (PXC_STATUS_NO_ERROR == m_device->QueryPropertyAsPoint(PXCCapture::Device::PROPERTY_DEPTH_FOCAL_LENGTH, &ptf))
return (double)ptf.y;
return 0.0;
}
break;
//Add depth stream sepcific properties
}
return IntelPerCStreamBase::getProperty(propIdx);
}
bool IntelPerCStreamDepth::setProperty(int propIdx, double propVal)
{
switch (propIdx)
{
case CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_DEPTH_LOW_CONFIDENCE_VALUE, (float)propVal));
}
break;
case CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_DEPTH_SATURATION_VALUE, (float)propVal));
}
break;
case CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD:
{
if (!m_device.IsValid())
return false;
return (PXC_STATUS_NO_ERROR == m_device->SetProperty(PXCCapture::Device::PROPERTY_DEPTH_CONFIDENCE_THRESHOLD, (float)propVal));
}
break;
//Add depth stream sepcific properties
}
return IntelPerCStreamBase::setProperty(propIdx, propVal);
}
bool IntelPerCStreamDepth::retrieveDepthAsOutputArray(cv::OutputArray image)
{
return retriveFrame(CV_16SC1, 0, image);
}
bool IntelPerCStreamDepth::retrieveIRAsOutputArray(cv::OutputArray image)
{
return retriveFrame(CV_16SC1, 1, image);
}
bool IntelPerCStreamDepth::retrieveUVAsOutputArray(cv::OutputArray image)
{
return retriveFrame(CV_32FC2, 2, image);
}
bool IntelPerCStreamDepth::validProfile(const PXCCapture::VideoStream::ProfileInfo& pinfo)
{
return (PXCImage::COLOR_FORMAT_DEPTH == pinfo.imageInfo.format);
}
bool IntelPerCStreamDepth::retriveFrame(int type, int planeIdx, cv::OutputArray frame)
{
if (!m_pxcImage.IsValid())
return false;
PXCImage::ImageInfo info;
m_pxcImage->QueryInfo(&info);
PXCImage::ImageData data;
m_pxcImage->AcquireAccess(PXCImage::ACCESS_READ, &data);
if (PXCImage::SURFACE_TYPE_SYSTEM_MEMORY != data.type)
return false;
cv::Mat temp(info.height, info.width, type, data.planes[planeIdx], data.pitches[planeIdx]);
temp.copyTo(frame);
m_pxcImage->ReleaseAccess(&data);
return true;
}
///////////////// VideoCapture_IntelPerC //////////////////
VideoCapture_IntelPerC::VideoCapture_IntelPerC()
: m_contextOpened(false)
{
pxcStatus sts = PXCSession_Create(&m_session);
if (PXC_STATUS_NO_ERROR > sts)
return;
m_contextOpened = m_imageStream.initStream(m_session);
m_contextOpened &= m_depthStream.initStream(m_session);
}
VideoCapture_IntelPerC::~VideoCapture_IntelPerC(){}
double VideoCapture_IntelPerC::getProperty(int propIdx)
{
double propValue = 0;
int purePropIdx = propIdx & ~CV_CAP_INTELPERC_GENERATORS_MASK;
if (CV_CAP_INTELPERC_IMAGE_GENERATOR == (propIdx & CV_CAP_INTELPERC_GENERATORS_MASK))
{
propValue = m_imageStream.getProperty(purePropIdx);
}
else if (CV_CAP_INTELPERC_DEPTH_GENERATOR == (propIdx & CV_CAP_INTELPERC_GENERATORS_MASK))
{
propValue = m_depthStream.getProperty(purePropIdx);
}
else
{
propValue = m_depthStream.getProperty(purePropIdx);
}
return propValue;
}
bool VideoCapture_IntelPerC::setProperty(int propIdx, double propVal)
{
bool isSet = false;
int purePropIdx = propIdx & ~CV_CAP_INTELPERC_GENERATORS_MASK;
if (CV_CAP_INTELPERC_IMAGE_GENERATOR == (propIdx & CV_CAP_INTELPERC_GENERATORS_MASK))
{
isSet = m_imageStream.setProperty(purePropIdx, propVal);
}
else if (CV_CAP_INTELPERC_DEPTH_GENERATOR == (propIdx & CV_CAP_INTELPERC_GENERATORS_MASK))
{
isSet = m_depthStream.setProperty(purePropIdx, propVal);
}
else
{
isSet = m_depthStream.setProperty(purePropIdx, propVal);
}
return isSet;
}
bool VideoCapture_IntelPerC::grabFrame()
{
if (!isOpened())
return false;
bool isGrabbed = false;
if (m_depthStream.isValid())
isGrabbed = m_depthStream.grabFrame();
if ((m_imageStream.isValid()) && (-1 != m_imageStream.getProfileIDX()))
isGrabbed &= m_imageStream.grabFrame();
return isGrabbed;
}
bool VideoCapture_IntelPerC::retrieveFrame(int outputType, cv::OutputArray frame)
{
switch (outputType)
{
case CV_CAP_INTELPERC_DEPTH_MAP:
return m_depthStream.retrieveDepthAsOutputArray(frame);
case CV_CAP_INTELPERC_UVDEPTH_MAP:
return m_depthStream.retrieveUVAsOutputArray(frame);
case CV_CAP_INTELPERC_IR_MAP:
return m_depthStream.retrieveIRAsOutputArray(frame);
case CV_CAP_INTELPERC_IMAGE:
return m_imageStream.retrieveAsOutputArray(frame);
}
return false;
}
int VideoCapture_IntelPerC::getCaptureDomain()
{
return CV_CAP_INTELPERC;
}
bool VideoCapture_IntelPerC::isOpened() const
{
return m_contextOpened;
}
}
#endif //HAVE_INTELPERC

View File

@@ -1,115 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2014, Itseez, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
//M*/
#ifndef _CAP_INTELPERC_HPP_
#define _CAP_INTELPERC_HPP_
#include "precomp.hpp"
#ifdef HAVE_INTELPERC
#include "pxcsession.h"
#include "pxcsmartptr.h"
#include "pxccapture.h"
namespace cv
{
class IntelPerCStreamBase
{
public:
IntelPerCStreamBase();
virtual ~IntelPerCStreamBase();
bool isValid();
bool grabFrame();
int getProfileIDX() const;
public:
virtual bool initStream(PXCSession *session) = 0;
virtual double getProperty(int propIdx);
virtual bool setProperty(int propIdx, double propVal);
protected:
PXCSmartPtr<PXCCapture::Device> m_device;
bool initDevice(PXCSession *session);
PXCSmartPtr<PXCCapture::VideoStream> m_stream;
void initStreamImpl(PXCImage::ImageType type);
protected:
std::vector<PXCCapture::VideoStream::ProfileInfo> m_profiles;
int m_profileIdx;
int m_frameIdx;
pxcU64 m_timeStampStartNS;
double m_timeStamp;
PXCSmartPtr<PXCImage> m_pxcImage;
virtual bool validProfile(const PXCCapture::VideoStream::ProfileInfo& /*pinfo*/);
void enumProfiles();
};
class IntelPerCStreamImage
: public IntelPerCStreamBase
{
public:
IntelPerCStreamImage();
virtual ~IntelPerCStreamImage();
virtual bool initStream(PXCSession *session);
virtual double getProperty(int propIdx);
virtual bool setProperty(int propIdx, double propVal);
public:
bool retrieveAsOutputArray(OutputArray image);
};
class IntelPerCStreamDepth
: public IntelPerCStreamBase
{
public:
IntelPerCStreamDepth();
virtual ~IntelPerCStreamDepth();
virtual bool initStream(PXCSession *session);
virtual double getProperty(int propIdx);
virtual bool setProperty(int propIdx, double propVal);
public:
bool retrieveDepthAsOutputArray(OutputArray image);
bool retrieveIRAsOutputArray(OutputArray image);
bool retrieveUVAsOutputArray(OutputArray image);
protected:
virtual bool validProfile(const PXCCapture::VideoStream::ProfileInfo& pinfo);
protected:
bool retriveFrame(int type, int planeIdx, OutputArray frame);
};
class VideoCapture_IntelPerC : public IVideoCapture
{
public:
VideoCapture_IntelPerC();
virtual ~VideoCapture_IntelPerC();
virtual double getProperty(int propIdx);
virtual bool setProperty(int propIdx, double propVal);
virtual bool grabFrame();
virtual bool retrieveFrame(int outputType, OutputArray frame);
virtual int getCaptureDomain();
bool isOpened() const;
protected:
bool m_contextOpened;
PXCSmartPtr<PXCSession> m_session;
IntelPerCStreamImage m_imageStream;
IntelPerCStreamDepth m_depthStream;
};
}
#endif //HAVE_INTELPERC
#endif //_CAP_INTELPERC_HPP_

View File

@@ -1,502 +0,0 @@
/*
* cap_ios_abstract_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* by Alexander Shishkov on 17/07/13
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "opencv2/highgui/cap_ios.h"
#include "precomp.hpp"
#pragma mark - Private Interface
@interface CvAbstractCamera ()
@property (nonatomic, retain) AVCaptureVideoPreviewLayer* captureVideoPreviewLayer;
- (void)deviceOrientationDidChange:(NSNotification*)notification;
- (void)startCaptureSession;
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
- (void)updateSize;
@end
#pragma mark - Implementation
@implementation CvAbstractCamera
#pragma mark Public
@synthesize imageWidth;
@synthesize imageHeight;
@synthesize defaultFPS;
@synthesize defaultAVCaptureDevicePosition;
@synthesize defaultAVCaptureVideoOrientation;
@synthesize defaultAVCaptureSessionPreset;
@synthesize captureSession;
@synthesize captureVideoPreviewLayer;
@synthesize videoCaptureConnection;
@synthesize running;
@synthesize captureSessionLoaded;
@synthesize useAVCaptureVideoPreviewLayer;
@synthesize parentView;
#pragma mark - Constructors
- (id)init;
{
self = [super init];
if (self) {
// react to device orientation notifications
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
currentDeviceOrientation = [[UIDevice currentDevice] orientation];
// check if camera available
cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
NSLog(@"camera available: %@", (cameraAvailable == YES ? @"YES" : @"NO") );
running = NO;
// set camera default configuration
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
self.defaultFPS = 15;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset352x288;
self.parentView = nil;
self.useAVCaptureVideoPreviewLayer = NO;
}
return self;
}
- (id)initWithParentView:(UIView*)parent;
{
self = [super init];
if (self) {
// react to device orientation notifications
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
currentDeviceOrientation = [[UIDevice currentDevice] orientation];
// check if camera available
cameraAvailable = [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera];
NSLog(@"camera available: %@", (cameraAvailable == YES ? @"YES" : @"NO") );
running = NO;
// set camera default configuration
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
self.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationLandscapeLeft;
self.defaultFPS = 15;
self.defaultAVCaptureSessionPreset = AVCaptureSessionPreset640x480;
self.parentView = parent;
self.useAVCaptureVideoPreviewLayer = YES;
}
return self;
}
- (void)dealloc;
{
[[NSNotificationCenter defaultCenter] removeObserver:self];
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
}
#pragma mark - Public interface
- (void)start;
{
if (![NSThread isMainThread]) {
NSLog(@"[Camera] Warning: Call start only from main thread");
[self performSelectorOnMainThread:@selector(start) withObject:nil waitUntilDone:NO];
return;
}
if (running == YES) {
return;
}
running = YES;
// TOOD update image size data before actually starting (needed for recording)
[self updateSize];
if (cameraAvailable) {
[self startCaptureSession];
}
}
- (void)pause;
{
running = NO;
[self.captureSession stopRunning];
}
- (void)stop;
{
running = NO;
// Release any retained subviews of the main view.
// e.g. self.myOutlet = nil;
[self.captureSession stopRunning];
self.captureSession = nil;
self.captureVideoPreviewLayer = nil;
self.videoCaptureConnection = nil;
captureSessionLoaded = NO;
}
// use front/back camera
- (void)switchCameras;
{
BOOL was_running = self.running;
if (was_running) {
[self stop];
}
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionBack;
} else {
self.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
}
if (was_running) {
[self start];
}
}
#pragma mark - Device Orientation Changes
- (void)deviceOrientationDidChange:(NSNotification*)notification
{
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
switch (orientation)
{
case UIDeviceOrientationPortrait:
case UIDeviceOrientationPortraitUpsideDown:
case UIDeviceOrientationLandscapeLeft:
case UIDeviceOrientationLandscapeRight:
currentDeviceOrientation = orientation;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break;
}
NSLog(@"deviceOrientationDidChange: %d", orientation);
[self updateOrientation];
}
#pragma mark - Private Interface
- (void)createCaptureSession;
{
// set a av capture session preset
self.captureSession = [[AVCaptureSession alloc] init];
if ([self.captureSession canSetSessionPreset:self.defaultAVCaptureSessionPreset]) {
[self.captureSession setSessionPreset:self.defaultAVCaptureSessionPreset];
} else if ([self.captureSession canSetSessionPreset:AVCaptureSessionPresetLow]) {
[self.captureSession setSessionPreset:AVCaptureSessionPresetLow];
} else {
NSLog(@"[Camera] Error: could not set session preset");
}
}
- (void)createCaptureDevice;
{
// setup the device
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
[self setDesiredCameraPosition:self.defaultAVCaptureDevicePosition];
NSLog(@"[Camera] device connected? %@", device.connected ? @"YES" : @"NO");
NSLog(@"[Camera] device position %@", (device.position == AVCaptureDevicePositionBack) ? @"back" : @"front");
}
- (void)createVideoPreviewLayer;
{
self.captureVideoPreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
if ([self.captureVideoPreviewLayer respondsToSelector:@selector(connection)])
{
if ([self.captureVideoPreviewLayer.connection isVideoOrientationSupported])
{
[self.captureVideoPreviewLayer.connection setVideoOrientation:self.defaultAVCaptureVideoOrientation];
}
}
else
{
// Deprecated in 6.0; here for backward compatibility
if ([self.captureVideoPreviewLayer isOrientationSupported])
{
[self.captureVideoPreviewLayer setOrientation:self.defaultAVCaptureVideoOrientation];
}
}
if (parentView != nil) {
self.captureVideoPreviewLayer.frame = self.parentView.bounds;
self.captureVideoPreviewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.parentView.layer addSublayer:self.captureVideoPreviewLayer];
}
NSLog(@"[Camera] created AVCaptureVideoPreviewLayer");
}
- (void)setDesiredCameraPosition:(AVCaptureDevicePosition)desiredPosition;
{
for (AVCaptureDevice *device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if ([device position] == desiredPosition) {
[self.captureSession beginConfiguration];
NSError* error;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
if (!input) {
NSLog(@"error creating input %@", [error localizedDescription]);
}
// support for autofocus
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocos configuration %@", [error localizedDescription]);
}
}
[self.captureSession addInput:input];
for (AVCaptureInput *oldInput in self.captureSession.inputs) {
[self.captureSession removeInput:oldInput];
}
[self.captureSession addInput:input];
[self.captureSession commitConfiguration];
break;
}
}
}
- (void)startCaptureSession
{
if (!cameraAvailable) {
return;
}
if (self.captureSessionLoaded == NO) {
[self createCaptureSession];
[self createCaptureDevice];
[self createCaptureOutput];
// setup preview layer
if (self.useAVCaptureVideoPreviewLayer) {
[self createVideoPreviewLayer];
} else {
[self createCustomVideoPreview];
}
captureSessionLoaded = YES;
}
[self.captureSession startRunning];
}
- (void)createCaptureOutput;
{
[NSException raise:NSInternalInconsistencyException
format:@"You must override %@ in a subclass", NSStringFromSelector(_cmd)];
}
- (void)createCustomVideoPreview;
{
[NSException raise:NSInternalInconsistencyException
format:@"You must override %@ in a subclass", NSStringFromSelector(_cmd)];
}
- (void)updateOrientation;
{
// nothing to do here
}
- (void)updateSize;
{
if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetPhoto]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetHigh]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetMedium]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPresetLow]) {
//TODO: find the correct resolution
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset352x288]) {
self.imageWidth = 352;
self.imageHeight = 288;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset640x480]) {
self.imageWidth = 640;
self.imageHeight = 480;
} else if ([self.defaultAVCaptureSessionPreset isEqualToString:AVCaptureSessionPreset1280x720]) {
self.imageWidth = 1280;
self.imageHeight = 720;
} else {
self.imageWidth = 640;
self.imageHeight = 480;
}
}
- (void)lockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked focus configuration %@", [error localizedDescription]);
}
}
}
- (void) unlockFocus;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.focusMode = AVCaptureFocusModeContinuousAutoFocus;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autofocus configuration %@", [error localizedDescription]);
}
}
}
- (void)lockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked exposure configuration %@", [error localizedDescription]);
}
}
}
- (void) unlockExposure;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.exposureMode = AVCaptureExposureModeContinuousAutoExposure;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for autoexposure configuration %@", [error localizedDescription]);
}
}
}
- (void)lockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeLocked]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeLocked;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for locked white balance configuration %@", [error localizedDescription]);
}
}
}
- (void) unlockBalance;
{
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
if ([device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]) {
NSError *error = nil;
if ([device lockForConfiguration:&error]) {
device.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
[device unlockForConfiguration];
} else {
NSLog(@"unable to lock device for auto white balance configuration %@", [error localizedDescription]);
}
}
}
@end

View File

@@ -1,165 +0,0 @@
/*
* cap_ios_photo_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "opencv2/highgui/cap_ios.h"
#include "precomp.hpp"
#pragma mark - Private Interface
@interface CvPhotoCamera ()
@property (nonatomic, retain) AVCaptureStillImageOutput* stillImageOutput;
@end
#pragma mark - Implementation
@implementation CvPhotoCamera
#pragma mark Public
@synthesize stillImageOutput;
@synthesize delegate;
#pragma mark - Public interface
- (void)takePicture
{
if (cameraAvailable == NO) {
return;
}
cameraAvailable = NO;
[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:self.videoCaptureConnection
completionHandler:
^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
if (error == nil && imageSampleBuffer != NULL)
{
// TODO check
// NSNumber* imageOrientation = [UIImage cgImageOrientationForUIDeviceOrientation:currentDeviceOrientation];
// CMSetAttachment(imageSampleBuffer, kCGImagePropertyOrientation, imageOrientation, 1);
NSData *jpegData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
[self.captureSession stopRunning];
// Make sure we create objects on the main thread in the main context
UIImage* newImage = [UIImage imageWithData:jpegData];
//UIImageOrientation orientation = [newImage imageOrientation];
// TODO: only apply rotation, don't scale, since we can set this directly in the camera
/*
switch (orientation) {
case UIImageOrientationUp:
case UIImageOrientationDown:
newImage = [newImage imageWithAppliedRotationAndMaxSize:CGSizeMake(640.0, 480.0)];
break;
case UIImageOrientationLeft:
case UIImageOrientationRight:
newImage = [newImage imageWithMaxSize:CGSizeMake(640.0, 480.0)];
default:
break;
}
*/
// We have captured the image, we can allow the user to take another picture
cameraAvailable = YES;
NSLog(@"CvPhotoCamera captured image");
if (self.delegate) {
[self.delegate photoCamera:self capturedImage:newImage];
}
[self.captureSession startRunning];
});
}
}];
}
- (void)stop;
{
[super stop];
self.stillImageOutput = nil;
}
#pragma mark - Private Interface
- (void)createStillImageOutput;
{
// setup still image output with jpeg codec
self.stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
NSDictionary *outputSettings = [NSDictionary dictionaryWithObjectsAndKeys:AVVideoCodecJPEG, AVVideoCodecKey, nil];
[self.stillImageOutput setOutputSettings:outputSettings];
[self.captureSession addOutput:self.stillImageOutput];
for (AVCaptureConnection *connection in self.stillImageOutput.connections) {
for (AVCaptureInputPort *port in [connection inputPorts]) {
if ([port.mediaType isEqual:AVMediaTypeVideo]) {
self.videoCaptureConnection = connection;
break;
}
}
if (self.videoCaptureConnection) {
break;
}
}
NSLog(@"[Camera] still image output created");
}
- (void)createCaptureOutput;
{
[self createStillImageOutput];
}
- (void)createCustomVideoPreview;
{
//do nothing, always use AVCaptureVideoPreviewLayer
}
@end

View File

@@ -1,618 +0,0 @@
/*
* cap_ios_video_camera.mm
* For iOS video I/O
* by Eduard Feicho on 29/07/12
* by Alexander Shishkov on 17/07/13
* Copyright 2012. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#import "opencv2/highgui/cap_ios.h"
#include "precomp.hpp"
#import <AssetsLibrary/AssetsLibrary.h>
static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;}
#pragma mark - Private Interface
@interface CvVideoCamera ()
- (void)createVideoDataOutput;
- (void)createVideoFileOutput;
@property (nonatomic, retain) CALayer *customPreviewLayer;
@property (nonatomic, retain) AVCaptureVideoDataOutput *videoDataOutput;
@end
#pragma mark - Implementation
@implementation CvVideoCamera
@synthesize delegate;
@synthesize grayscaleMode;
@synthesize customPreviewLayer;
@synthesize videoDataOutput;
@synthesize recordVideo;
@synthesize rotateVideo;
//@synthesize videoFileOutput;
@synthesize recordAssetWriterInput;
@synthesize recordPixelBufferAdaptor;
@synthesize recordAssetWriter;
#pragma mark - Constructors
- (id)initWithParentView:(UIView*)parent;
{
self = [super initWithParentView:parent];
if (self) {
self.useAVCaptureVideoPreviewLayer = NO;
self.recordVideo = NO;
self.rotateVideo = NO;
}
return self;
}
#pragma mark - Public interface
- (void)start;
{
[super start];
if (self.recordVideo == YES) {
NSError* error;
if ([[NSFileManager defaultManager] fileExistsAtPath:[self videoFileString]]) {
[[NSFileManager defaultManager] removeItemAtPath:[self videoFileString] error:&error];
}
if (error == nil) {
NSLog(@"[Camera] Delete file %@", [self videoFileString]);
}
}
}
- (void)stop;
{
[super stop];
self.videoDataOutput = nil;
if (videoDataOutputQueue) {
dispatch_release(videoDataOutputQueue);
}
if (self.recordVideo == YES) {
if (self.recordAssetWriter.status == AVAssetWriterStatusWriting) {
[self.recordAssetWriter finishWriting];
NSLog(@"[Camera] recording stopped");
} else {
NSLog(@"[Camera] Recording Error: asset writer status is not writing");
}
self.recordAssetWriter = nil;
self.recordAssetWriterInput = nil;
self.recordPixelBufferAdaptor = nil;
}
[self.customPreviewLayer removeFromSuperlayer];
self.customPreviewLayer = nil;
}
// TODO fix
- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
{
NSLog(@"layout preview layer");
if (self.parentView != nil) {
CALayer* layer = self.customPreviewLayer;
CGRect bounds = self.customPreviewLayer.bounds;
int rotation_angle = 0;
bool flip_bounds = false;
switch (interfaceOrientation) {
case UIInterfaceOrientationPortrait:
NSLog(@"to Portrait");
rotation_angle = 270;
break;
case UIInterfaceOrientationPortraitUpsideDown:
rotation_angle = 90;
NSLog(@"to UpsideDown");
break;
case UIInterfaceOrientationLandscapeLeft:
rotation_angle = 0;
NSLog(@"to LandscapeLeft");
break;
case UIInterfaceOrientationLandscapeRight:
rotation_angle = 180;
NSLog(@"to LandscapeRight");
break;
default:
break; // leave the layer in its last known orientation
}
switch (defaultAVCaptureVideoOrientation) {
case AVCaptureVideoOrientationLandscapeRight:
rotation_angle += 180;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
rotation_angle += 270;
break;
case AVCaptureVideoOrientationPortrait:
rotation_angle += 90;
case AVCaptureVideoOrientationLandscapeLeft:
break;
default:
break;
}
rotation_angle = rotation_angle % 360;
if (rotation_angle == 90 || rotation_angle == 270) {
flip_bounds = true;
}
if (flip_bounds) {
NSLog(@"flip bounds");
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
}
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
layer.bounds = bounds;
}
}
// TODO fix
- (void)layoutPreviewLayer;
{
NSLog(@"layout preview layer");
if (self.parentView != nil) {
CALayer* layer = self.customPreviewLayer;
CGRect bounds = self.customPreviewLayer.bounds;
int rotation_angle = 0;
bool flip_bounds = false;
switch (currentDeviceOrientation) {
case UIDeviceOrientationPortrait:
rotation_angle = 270;
break;
case UIDeviceOrientationPortraitUpsideDown:
rotation_angle = 90;
break;
case UIDeviceOrientationLandscapeLeft:
NSLog(@"left");
rotation_angle = 180;
break;
case UIDeviceOrientationLandscapeRight:
NSLog(@"right");
rotation_angle = 0;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
default:
break; // leave the layer in its last known orientation
}
switch (defaultAVCaptureVideoOrientation) {
case AVCaptureVideoOrientationLandscapeRight:
rotation_angle += 180;
break;
case AVCaptureVideoOrientationPortraitUpsideDown:
rotation_angle += 270;
break;
case AVCaptureVideoOrientationPortrait:
rotation_angle += 90;
case AVCaptureVideoOrientationLandscapeLeft:
break;
default:
break;
}
rotation_angle = rotation_angle % 360;
if (rotation_angle == 90 || rotation_angle == 270) {
flip_bounds = true;
}
if (flip_bounds) {
NSLog(@"flip bounds");
bounds = CGRectMake(0, 0, bounds.size.height, bounds.size.width);
}
layer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
layer.affineTransform = CGAffineTransformMakeRotation( DegreesToRadians(rotation_angle) );
layer.bounds = bounds;
}
}
#pragma mark - Private Interface
- (void)createVideoDataOutput;
{
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];
// In grayscale mode we want YUV (YpCbCr 4:2:0) so we can directly access the graylevel intensity values (Y component)
// In color mode we, BGRA format is used
OSType format = self.grayscaleMode ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
// discard if the data output queue is blocked (as we process the still image)
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
[self.captureSession addOutput:self.videoDataOutput];
}
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
// set default FPS
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMinFrameDuration) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMinFrameDuration = CMTimeMake(1, self.defaultFPS);
}
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMaxFrameDuration) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMaxFrameDuration = CMTimeMake(1, self.defaultFPS);
}
// set video mirroring for front camera (more intuitive)
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
} else {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
}
}
// set default video orientation
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
}
// create a custom preview layer
self.customPreviewLayer = [CALayer layer];
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
[self layoutPreviewLayer];
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
// see the header doc for setSampleBufferDelegate:queue: for more information
videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
NSLog(@"[Camera] created AVCaptureVideoDataOutput at %d FPS", self.defaultFPS);
}
- (void)createVideoFileOutput;
{
/* Video File Output in H.264, via AVAsserWriter */
NSLog(@"Create Video with dimensions %dx%d", self.imageWidth, self.imageHeight);
NSDictionary *outputSettings
= [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:self.imageWidth], AVVideoWidthKey,
[NSNumber numberWithInt:self.imageHeight], AVVideoHeightKey,
AVVideoCodecH264, AVVideoCodecKey,
nil
];
self.recordAssetWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:outputSettings];
int pixelBufferFormat = (self.grayscaleMode == YES) ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_32BGRA;
self.recordPixelBufferAdaptor =
[[AVAssetWriterInputPixelBufferAdaptor alloc]
initWithAssetWriterInput:self.recordAssetWriterInput
sourcePixelBufferAttributes:[NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:pixelBufferFormat], kCVPixelBufferPixelFormatTypeKey, nil]];
NSError* error = nil;
NSLog(@"Create AVAssetWriter with url: %@", [self videoFileURL]);
self.recordAssetWriter = [AVAssetWriter assetWriterWithURL:[self videoFileURL]
fileType:AVFileTypeMPEG4
error:&error];
if (error != nil) {
NSLog(@"[Camera] Unable to create AVAssetWriter: %@", error);
}
[self.recordAssetWriter addInput:self.recordAssetWriterInput];
self.recordAssetWriterInput.expectsMediaDataInRealTime = YES;
NSLog(@"[Camera] created AVAssetWriter");
}
- (void)createCaptureOutput;
{
[self createVideoDataOutput];
if (self.recordVideo == YES) {
[self createVideoFileOutput];
}
}
- (void)createCustomVideoPreview;
{
[self.parentView.layer addSublayer:self.customPreviewLayer];
}
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
CGSize frameSize = CGSizeMake(CGImageGetWidth(image), CGImageGetHeight(image));
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:NO], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:NO], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, frameSize.width,
frameSize.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) CFBridgingRetain(options),
&pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, frameSize.width,
frameSize.height, 8, 4*frameSize.width, rgbColorSpace,
kCGImageAlphaPremultipliedFirst);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
#pragma mark - Protocol AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
if (self.delegate) {
// convert from Core Media to Core Video
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void* bufferAddress;
size_t width;
size_t height;
size_t bytesPerRow;
CGColorSpaceRef colorSpace;
CGContextRef context;
int format_opencv;
OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
format_opencv = CV_8UC1;
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
} else { // expect kCVPixelFormatType_32BGRA
format_opencv = CV_8UC4;
bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
width = CVPixelBufferGetWidth(imageBuffer);
height = CVPixelBufferGetHeight(imageBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
}
// delegate image processing to the delegate
cv::Mat image(height, width, format_opencv, bufferAddress, bytesPerRow);
cv::Mat* result = NULL;
CGImage* dstImage;
if ([self.delegate respondsToSelector:@selector(processImage:)]) {
[self.delegate processImage:image];
}
// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == image.rows && width == image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}
// (create color space, create graphics context, render buffer)
CGBitmapInfo bitmapInfo;
// basically we decide if it's a grayscale, rgb or rgba image
if (image.channels() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone;
} else if (image.channels() == 3) {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaNone;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaPremultipliedFirst;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
}
if (iOSimage) {
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
dstImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
} else {
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
dstImage = CGImageCreate(image.cols, // width
image.rows, // height
8, // bits per component
8 * image.elemSize(), // bits per pixel
image.step, // bytesPerRow
colorSpace, // colorspace
bitmapInfo, // bitmap info
provider, // CGDataProviderRef
NULL, // decode
false, // should interpolate
kCGRenderingIntentDefault // intent
);
CGDataProviderRelease(provider);
}
// render buffer
dispatch_sync(dispatch_get_main_queue(), ^{
self.customPreviewLayer.contents = (__bridge id)dstImage;
});
if (self.recordVideo == YES) {
lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
// CMTimeShow(lastSampleTime);
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
[self.recordAssetWriter startWriting];
[self.recordAssetWriter startSessionAtSourceTime:lastSampleTime];
if (self.recordAssetWriter.status != AVAssetWriterStatusWriting) {
NSLog(@"[Camera] Recording Error: asset writer status is not writing: %@", self.recordAssetWriter.error);
return;
} else {
NSLog(@"[Camera] Video recording started");
}
}
if (self.recordAssetWriterInput.readyForMoreMediaData) {
CVImageBufferRef pixelBuffer = [self pixelBufferFromCGImage:dstImage];
if (! [self.recordPixelBufferAdaptor appendPixelBuffer:pixelBuffer
withPresentationTime:lastSampleTime] ) {
NSLog(@"Video Writing Error");
}
}
}
// cleanup
CGImageRelease(dstImage);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
}
- (void)updateOrientation;
{
if (self.rotateVideo == YES)
{
NSLog(@"rotate..");
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
[self layoutPreviewLayer];
}
}
- (void)saveVideo;
{
if (self.recordVideo == NO) {
return;
}
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
if ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:[self videoFileURL]]) {
[library writeVideoAtPathToSavedPhotosAlbum:[self videoFileURL]
completionBlock:^(NSURL *assetURL, NSError *error){}];
}
}
- (NSURL *)videoFileURL;
{
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
NSURL *outputURL = [NSURL fileURLWithPath:outputPath];
NSFileManager *fileManager = [NSFileManager defaultManager];
if ([fileManager fileExistsAtPath:outputPath]) {
NSLog(@"file exists");
}
return outputURL;
}
- (NSString *)videoFileString;
{
NSString *outputPath = [[NSString alloc] initWithFormat:@"%@%@", NSTemporaryDirectory(), @"output.mov"];
return outputPath;
}
@end

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,545 +0,0 @@
////////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//
//
// The code has been contributed by Justin G. Eskesen on 2010 Jan
//
#include "precomp.hpp"
#ifdef HAVE_PVAPI
#if !defined WIN32 && !defined _WIN32 && !defined _LINUX
#define _LINUX
#endif
#if defined(_x64) || defined (__x86_64) || defined (_M_X64)
#define _x64 1
#elif defined(_x86) || defined(__i386) || defined (_M_IX86)
#define _x86 1
#endif
#include <PvApi.h>
#ifdef WIN32
# include <io.h>
#else
# include <unistd.h>
#endif
//#include <arpa/inet.h>
#define MAX_CAMERAS 10
/********************* Capturing video from camera via PvAPI *********************/
class CvCaptureCAM_PvAPI : public CvCapture
{
public:
CvCaptureCAM_PvAPI();
virtual ~CvCaptureCAM_PvAPI()
{
close();
}
virtual bool open( int index );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain()
{
return CV_CAP_PVAPI;
}
protected:
#ifndef WIN32
virtual void Sleep(unsigned int time);
#endif
void stopCapture();
bool startCapture();
bool resizeCaptureFrame (int frameWidth, int frameHeight);
typedef struct
{
unsigned long UID;
tPvHandle Handle;
tPvFrame Frame;
} tCamera;
IplImage *frame;
IplImage *grayframe;
tCamera Camera;
tPvErr Errcode;
bool monocrome;
};
CvCaptureCAM_PvAPI::CvCaptureCAM_PvAPI()
{
monocrome=false;
frame = NULL;
grayframe = NULL;
memset(&this->Camera, 0, sizeof(this->Camera));
}
#ifndef WIN32
void CvCaptureCAM_PvAPI::Sleep(unsigned int time)
{
struct timespec t,r;
t.tv_sec = time / 1000;
t.tv_nsec = (time % 1000) * 1000000;
while(nanosleep(&t,&r)==-1)
t = r;
}
#endif
void CvCaptureCAM_PvAPI::close()
{
// Stop the acquisition & free the camera
stopCapture();
PvCameraClose(Camera.Handle);
PvUnInitialize();
}
// Initialize camera input
bool CvCaptureCAM_PvAPI::open( int index )
{
tPvCameraInfo cameraList[MAX_CAMERAS];
tPvCameraInfo camInfo;
tPvIpSettings ipSettings;
if (PvInitialize()) {
}
//return false;
Sleep(1000);
//close();
int numCameras=PvCameraList(cameraList, MAX_CAMERAS, NULL);
if (numCameras <= 0 || index >= numCameras)
return false;
Camera.UID = cameraList[index].UniqueId;
if (!PvCameraInfo(Camera.UID,&camInfo) && !PvCameraIpSettingsGet(Camera.UID,&ipSettings))
{
/*
struct in_addr addr;
addr.s_addr = ipSettings.CurrentIpAddress;
printf("Current address:\t%s\n",inet_ntoa(addr));
addr.s_addr = ipSettings.CurrentIpSubnet;
printf("Current subnet:\t\t%s\n",inet_ntoa(addr));
addr.s_addr = ipSettings.CurrentIpGateway;
printf("Current gateway:\t%s\n",inet_ntoa(addr));
*/
}
else
{
fprintf(stderr,"ERROR: could not retrieve camera IP settings.\n");
return false;
}
if (PvCameraOpen(Camera.UID, ePvAccessMaster, &(Camera.Handle))==ePvErrSuccess)
{
tPvUint32 frameWidth, frameHeight;
unsigned long maxSize;
// By Default, try to set the pixel format to Mono8. This can be changed later
// via calls to setProperty. Some colour cameras (i.e. the Manta line) have a default
// image mode of Bayer8, which is currently unsupported, so Mono8 is a safe bet for
// startup.
monocrome = (PvAttrEnumSet(Camera.Handle, "PixelFormat", "Mono8") == ePvErrSuccess);
PvAttrUint32Get(Camera.Handle, "Width", &frameWidth);
PvAttrUint32Get(Camera.Handle, "Height", &frameHeight);
// Determine the maximum packet size supported by the system (ethernet adapter)
// and then configure the camera to use this value. If the system's NIC only supports
// an MTU of 1500 or lower, this will automatically configure an MTU of 1500.
// 8228 is the optimal size described by the API in order to enable jumbo frames
maxSize = 8228;
//PvAttrUint32Get(Camera.Handle,"PacketSize",&maxSize);
if (PvCaptureAdjustPacketSize(Camera.Handle,maxSize)!=ePvErrSuccess)
return false;
resizeCaptureFrame(frameWidth, frameHeight);
return startCapture();
}
fprintf(stderr,"Error cannot open camera\n");
return false;
}
bool CvCaptureCAM_PvAPI::grabFrame()
{
//if(Camera.Frame.Status != ePvErrUnplugged && Camera.Frame.Status != ePvErrCancelled)
return PvCaptureQueueFrame(Camera.Handle, &(Camera.Frame), NULL) == ePvErrSuccess;
}
IplImage* CvCaptureCAM_PvAPI::retrieveFrame(int)
{
if (PvCaptureWaitForFrameDone(Camera.Handle, &(Camera.Frame), 1000) == ePvErrSuccess)
{
if (!monocrome)
{
cvMerge(grayframe,grayframe,grayframe,NULL,frame);
return frame;
}
return grayframe;
}
else return NULL;
}
double CvCaptureCAM_PvAPI::getProperty( int property_id )
{
tPvUint32 nTemp;
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
PvAttrUint32Get(Camera.Handle, "Width", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_FRAME_HEIGHT:
PvAttrUint32Get(Camera.Handle, "Height", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_MONOCROME:
if (monocrome)
return 1;
else
return 0;
case CV_CAP_PROP_EXPOSURE:
PvAttrUint32Get(Camera.Handle,"ExposureValue",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_FPS:
tPvFloat32 nfTemp;
PvAttrFloat32Get(Camera.Handle, "StatFrameRate", &nfTemp);
return (double)nfTemp;
case CV_CAP_PROP_PVAPI_MULTICASTIP:
char mEnable[2];
char mIp[11];
PvAttrEnumGet(Camera.Handle,"MulticastEnable",mEnable,sizeof(mEnable),NULL);
if (strcmp(mEnable, "Off") == 0)
{
return -1;
}
else
{
long int ip;
int a,b,c,d;
PvAttrStringGet(Camera.Handle, "MulticastIPAddress",mIp,sizeof(mIp),NULL);
sscanf(mIp, "%d.%d.%d.%d", &a, &b, &c, &d); ip = ((a*256 + b)*256 + c)*256 + d;
return (double)ip;
}
case CV_CAP_PROP_GAIN:
PvAttrUint32Get(Camera.Handle, "GainValue", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
char triggerMode[256];
PvAttrEnumGet(Camera.Handle, "FrameStartTriggerMode", triggerMode, 256, NULL);
if (strcmp(triggerMode, "Freerun")==0)
return 0.0;
else if (strcmp(triggerMode, "SyncIn1")==0)
return 1.0;
else if (strcmp(triggerMode, "SyncIn2")==0)
return 2.0;
else if (strcmp(triggerMode, "FixedRate")==0)
return 3.0;
else if (strcmp(triggerMode, "Software")==0)
return 4.0;
else
return -1.0;
}
return -1.0;
}
bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
{
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
{
tPvUint32 currHeight;
PvAttrUint32Get(Camera.Handle, "Height", &currHeight);
stopCapture();
// Reallocate Frames
if (!resizeCaptureFrame(value, currHeight))
{
startCapture();
return false;
}
startCapture();
break;
}
case CV_CAP_PROP_FRAME_HEIGHT:
{
tPvUint32 currWidth;
PvAttrUint32Get(Camera.Handle, "Width", &currWidth);
stopCapture();
// Reallocate Frames
if (!resizeCaptureFrame(value, currWidth))
{
startCapture();
return false;
}
startCapture();
break;
}
case CV_CAP_PROP_MONOCROME:
if (value==1)
{
char pixelFormat[256];
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
if ((strcmp(pixelFormat, "Mono8")==0) || strcmp(pixelFormat, "Mono16")==0)
{
monocrome=true;
}
else
return false;
}
else
monocrome=false;
break;
case CV_CAP_PROP_EXPOSURE:
if ((PvAttrUint32Set(Camera.Handle,"ExposureValue",(tPvUint32)value)==ePvErrSuccess))
break;
else
return false;
case CV_CAP_PROP_PVAPI_MULTICASTIP:
if (value==-1)
{
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "Off")==ePvErrSuccess))
break;
else
return false;
}
else
{
cv::String ip=cv::format("%d.%d.%d.%d", ((int)value>>24)&255, ((int)value>>16)&255, ((int)value>>8)&255, (int)value&255);
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "On")==ePvErrSuccess) &&
(PvAttrStringSet(Camera.Handle, "MulticastIPAddress", ip.c_str())==ePvErrSuccess))
break;
else
return false;
}
case CV_CAP_PROP_GAIN:
if (PvAttrUint32Set(Camera.Handle,"GainValue",(tPvUint32)value)!=ePvErrSuccess)
{
return false;
}
break;
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
tPvErr error;
if (value==0)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun");
else if (value==1)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "SyncIn1");
else if (value==2)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "SyncIn2");
else if (value==3)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "FixedRate");
else if (value==4)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Software");
else
error = ePvErrOutOfRange;
if(error==ePvErrSuccess)
break;
else
return false;
default:
return false;
}
return true;
}
void CvCaptureCAM_PvAPI::stopCapture()
{
PvCommandRun(Camera.Handle, "AcquisitionStop");
PvCaptureEnd(Camera.Handle);
}
bool CvCaptureCAM_PvAPI::startCapture()
{
// Start the camera
PvCaptureStart(Camera.Handle);
// Set the camera to capture continuously
if(PvAttrEnumSet(Camera.Handle, "AcquisitionMode", "Continuous")!= ePvErrSuccess)
{
fprintf(stderr,"Could not set PvAPI Acquisition Mode\n");
return false;
}
if(PvCommandRun(Camera.Handle, "AcquisitionStart")!= ePvErrSuccess)
{
fprintf(stderr,"Could not start PvAPI acquisition\n");
return false;
}
if(PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun")!= ePvErrSuccess)
{
fprintf(stderr,"Error setting PvAPI trigger to \"Freerun\"");
return false;
}
return true;
}
bool CvCaptureCAM_PvAPI::resizeCaptureFrame (int frameWidth, int frameHeight)
{
char pixelFormat[256];
tPvUint32 frameSize;
tPvUint32 sensorHeight;
tPvUint32 sensorWidth;
if (grayframe)
{
cvReleaseImage(&grayframe);
grayframe = NULL;
}
if (frame)
{
cvReleaseImage(&frame);
frame = NULL;
}
if (PvAttrUint32Get(Camera.Handle, "SensorWidth", &sensorWidth) != ePvErrSuccess)
{
return false;
}
if (PvAttrUint32Get(Camera.Handle, "SensorHeight", &sensorHeight) != ePvErrSuccess)
{
return false;
}
// Cap out of bounds widths to the max supported by the sensor
if ((frameWidth < 0) || ((tPvUint32)frameWidth > sensorWidth))
{
frameWidth = sensorWidth;
}
if ((frameHeight < 0) || ((tPvUint32)frameHeight > sensorHeight))
{
frameHeight = sensorHeight;
}
if (PvAttrUint32Set(Camera.Handle, "Height", frameHeight) != ePvErrSuccess)
{
return false;
}
if (PvAttrUint32Set(Camera.Handle, "Width", frameWidth) != ePvErrSuccess)
{
return false;
}
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
PvAttrUint32Get(Camera.Handle, "TotalBytesPerFrame", &frameSize);
if (strcmp(pixelFormat, "Mono8")==0)
{
grayframe = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 1);
grayframe->widthStep = (int)frameWidth;
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 3);
frame->widthStep = (int)frameWidth*3;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = grayframe->imageData;
}
else if (strcmp(pixelFormat, "Mono16")==0)
{
grayframe = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_16U, 1);
grayframe->widthStep = (int)frameWidth;
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_16U, 3);
frame->widthStep = (int)frameWidth*3;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = grayframe->imageData;
}
else if (strcmp(pixelFormat, "Bgr24")==0)
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 3);
frame->widthStep = (int)frameWidth*3;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
}
else
return false;
return true;
}
CvCapture* cvCreateCameraCapture_PvAPI( int index )
{
CvCaptureCAM_PvAPI* capture = new CvCaptureCAM_PvAPI;
if ( capture->open( index ))
return capture;
delete capture;
return NULL;
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,331 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Xavier Delacour, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// 2008-04-27 Xavier Delacour <xavier.delacour@gmail.com>
#include "precomp.hpp"
#include <unistd.h>
#include <unicap.h>
extern "C" {
#include <ucil.h>
}
#ifdef NDEBUG
#define CV_WARN(message)
#else
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
struct CvCapture_Unicap : public CvCapture
{
CvCapture_Unicap() { init(); }
virtual ~CvCapture_Unicap() { close(); }
virtual bool open( int index );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_UNICAP; } // Return the type of the capture object: CV_CAP_VFW, etc...
bool shutdownDevice();
bool initDevice();
void init()
{
device_initialized = false;
desired_format = 0;
desired_size = cvSize(0,0);
convert_rgb = false;
handle = 0;
memset( &device, 0, sizeof(device) );
memset( &format_spec, 0, sizeof(format_spec) );
memset( &format, 0, sizeof(format) );
memset( &raw_buffer, 0, sizeof(raw_buffer) );
memset( &buffer, 0, sizeof(buffer) );
raw_frame = frame = 0;
}
bool device_initialized;
int desired_device;
int desired_format;
CvSize desired_size;
bool convert_rgb;
unicap_handle_t handle;
unicap_device_t device;
unicap_format_t format_spec;
unicap_format_t format;
unicap_data_buffer_t raw_buffer;
unicap_data_buffer_t buffer;
IplImage *raw_frame;
IplImage *frame;
};
bool CvCapture_Unicap::shutdownDevice() {
bool result = false;
CV_FUNCNAME("CvCapture_Unicap::shutdownDevice");
__BEGIN__;
if (!SUCCESS(unicap_stop_capture(handle)))
CV_ERROR(CV_StsError, "unicap: failed to stop capture on device\n");
if (!SUCCESS(unicap_close(handle)))
CV_ERROR(CV_StsError, "unicap: failed to close the device\n");
cvReleaseImage(&raw_frame);
cvReleaseImage(&frame);
device_initialized = false;
result = true;
__END__;
return result;
}
bool CvCapture_Unicap::initDevice() {
bool result = false;
CV_FUNCNAME("CvCapture_Unicap::initDevice");
__BEGIN__;
if (device_initialized && !shutdownDevice())
return false;
if(!SUCCESS(unicap_enumerate_devices(NULL, &device, desired_device)))
CV_ERROR(CV_StsError, "unicap: failed to get info for device\n");
if(!SUCCESS(unicap_open( &handle, &device)))
CV_ERROR(CV_StsError, "unicap: failed to open device\n");
unicap_void_format(&format_spec);
if (!SUCCESS(unicap_enumerate_formats(handle, &format_spec, &format, desired_format))) {
shutdownDevice();
CV_ERROR(CV_StsError, "unicap: failed to get video format\n");
}
int i;
if (format.sizes)
{
for (i = format.size_count - 1; i > 0; i--)
if (format.sizes[i].width == desired_size.width &&
format.sizes[i].height == desired_size.height)
break;
format.size.width = format.sizes[i].width;
format.size.height = format.sizes[i].height;
}
if (!SUCCESS(unicap_set_format(handle, &format))) {
shutdownDevice();
CV_ERROR(CV_StsError, "unicap: failed to set video format\n");
}
memset(&raw_buffer, 0x0, sizeof(unicap_data_buffer_t));
raw_frame = cvCreateImage(cvSize(format.size.width,
format.size.height),
8, format.bpp / 8);
memcpy(&raw_buffer.format, &format, sizeof(raw_buffer.format));
raw_buffer.data = (unsigned char*)raw_frame->imageData;
raw_buffer.buffer_size = format.size.width *
format.size.height * format.bpp / 8;
memset(&buffer, 0x0, sizeof(unicap_data_buffer_t));
memcpy(&buffer.format, &format, sizeof(buffer.format));
buffer.format.fourcc = UCIL_FOURCC('B','G','R','3');
buffer.format.bpp = 24;
// * todo support greyscale output
// buffer.format.fourcc = UCIL_FOURCC('G','R','E','Y');
// buffer.format.bpp = 8;
frame = cvCreateImage(cvSize(buffer.format.size.width,
buffer.format.size.height),
8, buffer.format.bpp / 8);
buffer.data = (unsigned char*)frame->imageData;
buffer.buffer_size = buffer.format.size.width *
buffer.format.size.height * buffer.format.bpp / 8;
if(!SUCCESS(unicap_start_capture(handle))) {
shutdownDevice();
CV_ERROR(CV_StsError, "unicap: failed to start capture on device\n");
}
device_initialized = true;
result = true;
__END__;
return result;
}
void CvCapture_Unicap::close() {
if(device_initialized)
shutdownDevice();
}
bool CvCapture_Unicap::grabFrame() {
bool result = false;
CV_FUNCNAME("CvCapture_Unicap::grabFrame");
__BEGIN__;
unicap_data_buffer_t *returned_buffer;
int retry_count = 100;
while (retry_count--) {
if(!SUCCESS(unicap_queue_buffer(handle, &raw_buffer)))
CV_ERROR(CV_StsError, "unicap: failed to queue a buffer on device\n");
if(SUCCESS(unicap_wait_buffer(handle, &returned_buffer)))
{
result = true;
EXIT;
}
CV_WARN("unicap: failed to wait for buffer on device\n");
usleep(100 * 1000);
}
__END__;
return result;
}
IplImage * CvCapture_Unicap::retrieveFrame(int) {
if (convert_rgb) {
ucil_convert_buffer(&buffer, &raw_buffer);
return frame;
}
return raw_frame;
}
double CvCapture_Unicap::getProperty(int id) {
switch (id) {
case CV_CAP_PROP_POS_MSEC: break;
case CV_CAP_PROP_POS_FRAMES: break;
case CV_CAP_PROP_POS_AVI_RATIO: break;
case CV_CAP_PROP_FRAME_WIDTH:
return desired_size.width;
case CV_CAP_PROP_FRAME_HEIGHT:
return desired_size.height;
case CV_CAP_PROP_FPS: break;
case CV_CAP_PROP_FOURCC: break;
case CV_CAP_PROP_FRAME_COUNT: break;
case CV_CAP_PROP_FORMAT:
return desired_format;
case CV_CAP_PROP_MODE: break;
case CV_CAP_PROP_BRIGHTNESS: break;
case CV_CAP_PROP_CONTRAST: break;
case CV_CAP_PROP_SATURATION: break;
case CV_CAP_PROP_HUE: break;
case CV_CAP_PROP_GAIN: break;
case CV_CAP_PROP_CONVERT_RGB:
return convert_rgb;
}
return 0;
}
bool CvCapture_Unicap::setProperty(int id, double value) {
bool reinit = false;
switch (id) {
case CV_CAP_PROP_POS_MSEC: break;
case CV_CAP_PROP_POS_FRAMES: break;
case CV_CAP_PROP_POS_AVI_RATIO: break;
case CV_CAP_PROP_FRAME_WIDTH:
desired_size.width = (int)value;
reinit = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
desired_size.height = (int)value;
reinit = true;
break;
case CV_CAP_PROP_FPS: break;
case CV_CAP_PROP_FOURCC: break;
case CV_CAP_PROP_FRAME_COUNT: break;
case CV_CAP_PROP_FORMAT:
desired_format = id;
reinit = true;
break;
case CV_CAP_PROP_MODE: break;
case CV_CAP_PROP_BRIGHTNESS: break;
case CV_CAP_PROP_CONTRAST: break;
case CV_CAP_PROP_SATURATION: break;
case CV_CAP_PROP_HUE: break;
case CV_CAP_PROP_GAIN: break;
case CV_CAP_PROP_CONVERT_RGB:
convert_rgb = value != 0;
break;
}
if (reinit && !initDevice())
return false;
return true;
}
bool CvCapture_Unicap::open(int index)
{
close();
device_initialized = false;
desired_device = index < 0 ? 0 : index;
desired_format = 0;
desired_size = cvSize(320, 240);
convert_rgb = true;
return initDevice();
}
CvCapture * cvCreateCameraCapture_Unicap(const int index)
{
CvCapture_Unicap *cap = new CvCapture_Unicap;
if( cap->open(index) )
return cap;
delete cap;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,842 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vfw.h>
#ifdef __GNUC__
#define WM_CAP_FIRSTA (WM_USER)
#define capSendMessage(hwnd,m,w,l) (IsWindow(hwnd)?SendMessage(hwnd,m,w,l):0)
#endif
#if defined _M_X64 && defined _MSC_VER
#pragma optimize("",off)
#pragma warning(disable: 4748)
#endif
/********************* Capturing video from AVI via VFW ************************/
static BITMAPINFOHEADER icvBitmapHeader( int width, int height, int bpp, int compression = BI_RGB )
{
BITMAPINFOHEADER bmih;
memset( &bmih, 0, sizeof(bmih));
bmih.biSize = sizeof(bmih);
bmih.biWidth = width;
bmih.biHeight = height;
bmih.biBitCount = (WORD)bpp;
bmih.biCompression = compression;
bmih.biPlanes = 1;
return bmih;
}
static void icvInitCapture_VFW()
{
static int isInitialized = 0;
if( !isInitialized )
{
AVIFileInit();
isInitialized = 1;
}
}
class CvCaptureAVI_VFW : public CvCapture
{
public:
CvCaptureAVI_VFW()
{
CoInitialize(NULL);
init();
}
virtual ~CvCaptureAVI_VFW()
{
close();
CoUninitialize();
}
virtual bool open( const char* filename );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_VFW; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
void init();
PAVIFILE avifile;
PAVISTREAM avistream;
PGETFRAME getframe;
AVISTREAMINFO aviinfo;
BITMAPINFOHEADER * bmih;
CvSlice film_range;
double fps;
int pos;
IplImage* frame;
CvSize size;
};
void CvCaptureAVI_VFW::init()
{
avifile = 0;
avistream = 0;
getframe = 0;
memset( &aviinfo, 0, sizeof(aviinfo) );
bmih = 0;
film_range = cvSlice(0,0);
fps = 0;
pos = 0;
frame = 0;
size = cvSize(0,0);
}
void CvCaptureAVI_VFW::close()
{
if( getframe )
AVIStreamGetFrameClose( getframe );
if( avistream )
AVIStreamRelease( avistream );
if( avifile )
AVIFileRelease( avifile );
if (frame)
cvReleaseImage( &frame );
init();
}
bool CvCaptureAVI_VFW::open( const char* filename )
{
close();
icvInitCapture_VFW();
if( !filename )
return false;
HRESULT hr = AVIFileOpen( &avifile, filename, OF_READ, NULL );
if( SUCCEEDED(hr))
{
hr = AVIFileGetStream( avifile, &avistream, streamtypeVIDEO, 0 );
if( SUCCEEDED(hr))
{
hr = AVIStreamInfo( avistream, &aviinfo, sizeof(aviinfo));
if( SUCCEEDED(hr))
{
size.width = aviinfo.rcFrame.right - aviinfo.rcFrame.left;
size.height = aviinfo.rcFrame.bottom - aviinfo.rcFrame.top;
BITMAPINFOHEADER bmihdr = icvBitmapHeader( size.width, size.height, 24 );
film_range.start_index = (int)aviinfo.dwStart;
film_range.end_index = film_range.start_index + (int)aviinfo.dwLength;
fps = (double)aviinfo.dwRate/aviinfo.dwScale;
pos = film_range.start_index;
getframe = AVIStreamGetFrameOpen( avistream, &bmihdr );
if( getframe != 0 )
return true;
// Attempt to open as 8-bit AVI.
bmihdr = icvBitmapHeader( size.width, size.height, 8);
getframe = AVIStreamGetFrameOpen( avistream, &bmihdr );
if( getframe != 0 )
return true;
}
}
}
close();
return false;
}
bool CvCaptureAVI_VFW::grabFrame()
{
if( avistream )
bmih = (BITMAPINFOHEADER*)AVIStreamGetFrame( getframe, pos++ );
return bmih != 0;
}
IplImage* CvCaptureAVI_VFW::retrieveFrame(int)
{
if( avistream && bmih )
{
bool isColor = bmih->biBitCount == 24;
int nChannels = (isColor) ? 3 : 1;
IplImage src;
cvInitImageHeader( &src, cvSize( bmih->biWidth, bmih->biHeight ),
IPL_DEPTH_8U, nChannels, IPL_ORIGIN_BL, 4 );
char* dataPtr = (char*)(bmih + 1);
// Only account for the color map size if we are an 8-bit image and the color map is used
if (!isColor)
{
static int RGBQUAD_SIZE_PER_BYTE = sizeof(RGBQUAD)/sizeof(BYTE);
int offsetFromColormapToData = (int)bmih->biClrUsed*RGBQUAD_SIZE_PER_BYTE;
dataPtr += offsetFromColormapToData;
}
cvSetData( &src, dataPtr, src.widthStep );
if( !frame || frame->width != src.width || frame->height != src.height )
{
cvReleaseImage( &frame );
frame = cvCreateImage( cvGetSize(&src), 8, nChannels );
}
cvFlip( &src, frame, 0 );
return frame;
}
return 0;
}
double CvCaptureAVI_VFW::getProperty( int property_id )
{
switch( property_id )
{
case CV_CAP_PROP_POS_MSEC:
return cvRound(pos*1000./fps);
case CV_CAP_PROP_POS_FRAMES:
return pos;
case CV_CAP_PROP_POS_AVI_RATIO:
return (pos - film_range.start_index)/
(film_range.end_index - film_range.start_index + 1e-10);
case CV_CAP_PROP_FRAME_WIDTH:
return size.width;
case CV_CAP_PROP_FRAME_HEIGHT:
return size.height;
case CV_CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_FOURCC:
return aviinfo.fccHandler;
case CV_CAP_PROP_FRAME_COUNT:
return film_range.end_index - film_range.start_index;
}
return 0;
}
bool CvCaptureAVI_VFW::setProperty( int property_id, double value )
{
switch( property_id )
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
case CV_CAP_PROP_POS_AVI_RATIO:
{
switch( property_id )
{
case CV_CAP_PROP_POS_MSEC:
pos = cvRound(value*fps*0.001);
break;
case CV_CAP_PROP_POS_AVI_RATIO:
pos = cvRound(value*(film_range.end_index -
film_range.start_index) +
film_range.start_index);
break;
default:
pos = cvRound(value);
}
if( pos < film_range.start_index )
pos = film_range.start_index;
if( pos > film_range.end_index )
pos = film_range.end_index;
}
break;
default:
return false;
}
return true;
}
CvCapture* cvCreateFileCapture_VFW (const char* filename)
{
CvCaptureAVI_VFW* capture = new CvCaptureAVI_VFW;
if( capture->open(filename) )
return capture;
delete capture;
return 0;
}
/********************* Capturing video from camera via VFW *********************/
class CvCaptureCAM_VFW : public CvCapture
{
public:
CvCaptureCAM_VFW() { init(); }
virtual ~CvCaptureCAM_VFW() { close(); }
virtual bool open( int index );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_VFW; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
void init();
void closeHIC();
static LRESULT PASCAL frameCallback( HWND hWnd, VIDEOHDR* hdr );
CAPDRIVERCAPS caps;
HWND capWnd;
VIDEOHDR* hdr;
DWORD fourcc;
int width, height;
int widthSet, heightSet;
HIC hic;
IplImage* frame;
};
void CvCaptureCAM_VFW::init()
{
memset( &caps, 0, sizeof(caps) );
capWnd = 0;
hdr = 0;
fourcc = 0;
hic = 0;
frame = 0;
width = height = -1;
widthSet = heightSet = 0;
}
void CvCaptureCAM_VFW::closeHIC()
{
if( hic )
{
ICDecompressEnd( hic );
ICClose( hic );
hic = 0;
}
}
LRESULT PASCAL CvCaptureCAM_VFW::frameCallback( HWND hWnd, VIDEOHDR* hdr )
{
CvCaptureCAM_VFW* capture = 0;
if (!hWnd) return FALSE;
capture = (CvCaptureCAM_VFW*)capGetUserData(hWnd);
capture->hdr = hdr;
return (LRESULT)TRUE;
}
// Initialize camera input
bool CvCaptureCAM_VFW::open( int wIndex )
{
char szDeviceName[80];
char szDeviceVersion[80];
HWND hWndC = 0;
close();
if( (unsigned)wIndex >= 10 )
wIndex = 0;
for( ; wIndex < 10; wIndex++ )
{
if( capGetDriverDescription( wIndex, szDeviceName,
sizeof (szDeviceName), szDeviceVersion,
sizeof (szDeviceVersion)))
{
hWndC = capCreateCaptureWindow ( "My Own Capture Window",
WS_POPUP | WS_CHILD, 0, 0, 320, 240, 0, 0);
if( capDriverConnect (hWndC, wIndex))
break;
DestroyWindow( hWndC );
hWndC = 0;
}
}
if( hWndC )
{
capWnd = hWndC;
hdr = 0;
hic = 0;
fourcc = (DWORD)-1;
memset( &caps, 0, sizeof(caps));
capDriverGetCaps( hWndC, &caps, sizeof(caps));
CAPSTATUS status = {};
capGetStatus(hWndC, &status, sizeof(status));
::SetWindowPos(hWndC, NULL, 0, 0, status.uiImageWidth, status.uiImageHeight, SWP_NOZORDER|SWP_NOMOVE);
capSetUserData( hWndC, (size_t)this );
capSetCallbackOnFrame( hWndC, frameCallback );
CAPTUREPARMS p;
capCaptureGetSetup(hWndC,&p,sizeof(CAPTUREPARMS));
p.dwRequestMicroSecPerFrame = 66667/2; // 30 FPS
capCaptureSetSetup(hWndC,&p,sizeof(CAPTUREPARMS));
//capPreview( hWndC, 1 );
capPreviewScale(hWndC,FALSE);
capPreviewRate(hWndC,1);
// Get frame initial parameters.
const DWORD size = capGetVideoFormatSize(capWnd);
if( size > 0 )
{
unsigned char *pbi = new unsigned char[size];
if( pbi )
{
if( capGetVideoFormat(capWnd, pbi, size) == size )
{
BITMAPINFOHEADER& vfmt = ((BITMAPINFO*)pbi)->bmiHeader;
widthSet = vfmt.biWidth;
heightSet = vfmt.biHeight;
fourcc = vfmt.biCompression;
}
delete []pbi;
}
}
// And alternative way in case of failure.
if( widthSet == 0 || heightSet == 0 )
{
widthSet = status.uiImageWidth;
heightSet = status.uiImageHeight;
}
}
return capWnd != 0;
}
void CvCaptureCAM_VFW::close()
{
if( capWnd )
{
capSetCallbackOnFrame( capWnd, NULL );
capDriverDisconnect( capWnd );
DestroyWindow( capWnd );
closeHIC();
}
cvReleaseImage( &frame );
init();
}
bool CvCaptureCAM_VFW::grabFrame()
{
if( capWnd )
return capGrabFrameNoStop(capWnd) == TRUE;
return false;
}
IplImage* CvCaptureCAM_VFW::retrieveFrame(int)
{
BITMAPINFO vfmt;
memset( &vfmt, 0, sizeof(vfmt));
BITMAPINFOHEADER& vfmt0 = vfmt.bmiHeader;
if( !capWnd )
return 0;
const DWORD sz = capGetVideoFormat( capWnd, &vfmt, sizeof(vfmt));
const int prevWidth = frame ? frame->width : 0;
const int prevHeight = frame ? frame->height : 0;
if( !hdr || hdr->lpData == 0 || sz == 0 )
return 0;
if( !frame || frame->width != vfmt0.biWidth || frame->height != vfmt0.biHeight )
{
cvReleaseImage( &frame );
frame = cvCreateImage( cvSize( vfmt0.biWidth, vfmt0.biHeight ), 8, 3 );
}
if( vfmt0.biCompression != BI_RGB ||
vfmt0.biBitCount != 24 )
{
BITMAPINFOHEADER vfmt1 = icvBitmapHeader( vfmt0.biWidth, vfmt0.biHeight, 24 );
if( hic == 0 || fourcc != vfmt0.biCompression ||
prevWidth != vfmt0.biWidth || prevHeight != vfmt0.biHeight )
{
closeHIC();
hic = ICOpen( MAKEFOURCC('V','I','D','C'),
vfmt0.biCompression, ICMODE_DECOMPRESS );
if( hic )
{
if( ICDecompressBegin( hic, &vfmt0, &vfmt1 ) != ICERR_OK )
{
closeHIC();
return 0;
}
}
}
if( !hic || ICDecompress( hic, 0, &vfmt0, hdr->lpData,
&vfmt1, frame->imageData ) != ICERR_OK )
{
closeHIC();
return 0;
}
cvFlip( frame, frame, 0 );
}
else
{
IplImage src;
cvInitImageHeader( &src, cvSize(vfmt0.biWidth, vfmt0.biHeight),
IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
cvSetData( &src, hdr->lpData, src.widthStep );
cvFlip( &src, frame, 0 );
}
return frame;
}
double CvCaptureCAM_VFW::getProperty( int property_id )
{
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
return widthSet;
case CV_CAP_PROP_FRAME_HEIGHT:
return heightSet;
case CV_CAP_PROP_FOURCC:
return fourcc;
case CV_CAP_PROP_FPS:
{
CAPTUREPARMS params = {};
if( capCaptureGetSetup(capWnd, &params, sizeof(params)) )
return 1e6 / params.dwRequestMicroSecPerFrame;
}
break;
default:
break;
}
return 0;
}
bool CvCaptureCAM_VFW::setProperty(int property_id, double value)
{
bool handledSize = false;
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
width = cvRound(value);
handledSize = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
height = cvRound(value);
handledSize = true;
break;
case CV_CAP_PROP_FOURCC:
break;
case CV_CAP_PROP_FPS:
if( value > 0 )
{
CAPTUREPARMS params;
if( capCaptureGetSetup(capWnd, &params, sizeof(params)) )
{
params.dwRequestMicroSecPerFrame = cvRound(1e6/value);
return capCaptureSetSetup(capWnd, &params, sizeof(params)) == TRUE;
}
}
break;
default:
break;
}
if ( handledSize )
{
// If both width and height are set then change frame size.
if( width > 0 && height > 0 )
{
const DWORD size = capGetVideoFormatSize(capWnd);
if( size == 0 )
return false;
unsigned char *pbi = new unsigned char[size];
if( !pbi )
return false;
if( capGetVideoFormat(capWnd, pbi, size) != size )
{
delete []pbi;
return false;
}
BITMAPINFOHEADER& vfmt = ((BITMAPINFO*)pbi)->bmiHeader;
bool success = true;
if( width != vfmt.biWidth || height != vfmt.biHeight )
{
// Change frame size.
vfmt.biWidth = width;
vfmt.biHeight = height;
vfmt.biSizeImage = height * ((width * vfmt.biBitCount + 31) / 32) * 4;
vfmt.biCompression = BI_RGB;
success = capSetVideoFormat(capWnd, pbi, size) == TRUE;
}
if( success )
{
// Adjust capture window size.
CAPSTATUS status = {};
capGetStatus(capWnd, &status, sizeof(status));
::SetWindowPos(capWnd, NULL, 0, 0, status.uiImageWidth, status.uiImageHeight, SWP_NOZORDER|SWP_NOMOVE);
// Store frame size.
widthSet = width;
heightSet = height;
}
delete []pbi;
width = height = -1;
return success;
}
return true;
}
return false;
}
CvCapture* cvCreateCameraCapture_VFW( int index )
{
CvCaptureCAM_VFW* capture = new CvCaptureCAM_VFW;
if( capture->open( index ))
return capture;
delete capture;
return 0;
}
/*************************** writing AVIs ******************************/
class CvVideoWriter_VFW : public CvVideoWriter
{
public:
CvVideoWriter_VFW() { init(); }
virtual ~CvVideoWriter_VFW() { close(); }
virtual bool open( const char* filename, int fourcc,
double fps, CvSize frameSize, bool isColor );
virtual void close();
virtual bool writeFrame( const IplImage* );
protected:
void init();
bool createStreams( CvSize frameSize, bool isColor );
PAVIFILE avifile;
PAVISTREAM compressed;
PAVISTREAM uncompressed;
double fps;
IplImage* tempFrame;
long pos;
int fourcc;
};
void CvVideoWriter_VFW::init()
{
avifile = 0;
compressed = uncompressed = 0;
fps = 0;
tempFrame = 0;
pos = 0;
fourcc = 0;
}
void CvVideoWriter_VFW::close()
{
if( uncompressed )
AVIStreamRelease( uncompressed );
if( compressed )
AVIStreamRelease( compressed );
if( avifile )
AVIFileRelease( avifile );
cvReleaseImage( &tempFrame );
init();
}
// philipg. Made this code capable of writing 8bpp gray scale bitmaps
struct BITMAPINFO_8Bit
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[256];
};
bool CvVideoWriter_VFW::open( const char* filename, int _fourcc, double _fps, CvSize frameSize, bool isColor )
{
close();
icvInitCapture_VFW();
if( AVIFileOpen( &avifile, filename, OF_CREATE | OF_WRITE, 0 ) == AVIERR_OK )
{
fourcc = _fourcc;
fps = _fps;
if( frameSize.width > 0 && frameSize.height > 0 &&
!createStreams( frameSize, isColor ) )
{
close();
return false;
}
return true;
}
else
return false;
}
bool CvVideoWriter_VFW::createStreams( CvSize frameSize, bool isColor )
{
if( !avifile )
return false;
AVISTREAMINFO aviinfo;
BITMAPINFO_8Bit bmih;
bmih.bmiHeader = icvBitmapHeader( frameSize.width, frameSize.height, isColor ? 24 : 8 );
for( int i = 0; i < 256; i++ )
{
bmih.bmiColors[i].rgbBlue = (BYTE)i;
bmih.bmiColors[i].rgbGreen = (BYTE)i;
bmih.bmiColors[i].rgbRed = (BYTE)i;
bmih.bmiColors[i].rgbReserved = 0;
}
memset( &aviinfo, 0, sizeof(aviinfo));
aviinfo.fccType = streamtypeVIDEO;
aviinfo.fccHandler = 0;
// use highest possible accuracy for dwRate/dwScale
aviinfo.dwScale = (DWORD)((double)0x7FFFFFFF / fps);
aviinfo.dwRate = cvRound(fps * aviinfo.dwScale);
aviinfo.rcFrame.top = aviinfo.rcFrame.left = 0;
aviinfo.rcFrame.right = frameSize.width;
aviinfo.rcFrame.bottom = frameSize.height;
if( AVIFileCreateStream( avifile, &uncompressed, &aviinfo ) == AVIERR_OK )
{
AVICOMPRESSOPTIONS copts, *pcopts = &copts;
copts.fccType = streamtypeVIDEO;
copts.fccHandler = fourcc != -1 ? fourcc : 0;
copts.dwKeyFrameEvery = 1;
copts.dwQuality = 10000;
copts.dwBytesPerSecond = 0;
copts.dwFlags = AVICOMPRESSF_VALID;
copts.lpFormat = &bmih;
copts.cbFormat = (isColor ? sizeof(BITMAPINFOHEADER) : sizeof(bmih));
copts.lpParms = 0;
copts.cbParms = 0;
copts.dwInterleaveEvery = 0;
if( fourcc != -1 || AVISaveOptions( 0, 0, 1, &uncompressed, &pcopts ) == TRUE )
{
if( AVIMakeCompressedStream( &compressed, uncompressed, pcopts, 0 ) == AVIERR_OK &&
AVIStreamSetFormat( compressed, 0, &bmih, sizeof(bmih)) == AVIERR_OK )
{
fps = fps;
fourcc = (int)copts.fccHandler;
frameSize = frameSize;
tempFrame = cvCreateImage( frameSize, 8, (isColor ? 3 : 1) );
return true;
}
}
}
return false;
}
bool CvVideoWriter_VFW::writeFrame( const IplImage* image )
{
bool result = false;
CV_FUNCNAME( "CvVideoWriter_VFW::writeFrame" );
__BEGIN__;
if( !image )
EXIT;
if( !compressed && !createStreams( cvGetSize(image), image->nChannels > 1 ))
EXIT;
if( image->width != tempFrame->width || image->height != tempFrame->height )
CV_ERROR( CV_StsUnmatchedSizes,
"image size is different from the currently set frame size" );
if( image->nChannels != tempFrame->nChannels ||
image->depth != tempFrame->depth ||
image->origin == 0 ||
image->widthStep != cvAlign(image->width*image->nChannels*((image->depth & 255)/8), 4))
{
cvConvertImage( image, tempFrame, image->origin == 0 ? CV_CVTIMG_FLIP : 0 );
image = (const IplImage*)tempFrame;
}
result = AVIStreamWrite( compressed, pos++, 1, image->imageData,
image->imageSize, AVIIF_KEYFRAME, 0, 0 ) == AVIERR_OK;
__END__;
return result;
}
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int isColor )
{
CvVideoWriter_VFW* writer = new CvVideoWriter_VFW;
if( writer->open( filename, fourcc, fps, frameSize, isColor != 0 ))
return writer;
delete writer;
return 0;
}

View File

@@ -1,352 +0,0 @@
#include "precomp.hpp"
#include "xiApi.h"
#include "xiExt.h"
#include "m3Api.h"
/**********************************************************************************/
class CvCaptureCAM_XIMEA : public CvCapture
{
public:
CvCaptureCAM_XIMEA() { init(); }
virtual ~CvCaptureCAM_XIMEA() { close(); }
virtual bool open( int index );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_XIAPI; } // Return the type of the capture object: CV_CAP_VFW, etc...
private:
void init();
void errMsg(const char* msg, int errNum);
void resetCvImage();
int getBpp();
IplImage* frame;
HANDLE hmv;
DWORD numDevices;
int timeout;
XI_IMG image;
};
/**********************************************************************************/
CvCapture* cvCreateCameraCapture_XIMEA( int index )
{
CvCaptureCAM_XIMEA* capture = new CvCaptureCAM_XIMEA;
if( capture->open( index ))
return capture;
delete capture;
return 0;
}
/**********************************************************************************/
// Enumerate connected devices
void CvCaptureCAM_XIMEA::init()
{
xiGetNumberDevices( &numDevices);
hmv = NULL;
frame = NULL;
timeout = 0;
memset(&image, 0, sizeof(XI_IMG));
}
/**********************************************************************************/
// Initialize camera input
bool CvCaptureCAM_XIMEA::open( int wIndex )
{
#define HandleXiResult(res) if (res!=XI_OK) goto error;
int mvret = XI_OK;
if(numDevices == 0)
return false;
if((mvret = xiOpenDevice( wIndex, &hmv)) != XI_OK)
{
errMsg("Open XI_DEVICE failed", mvret);
return false;
}
int width = 0;
int height = 0;
int isColor = 0;
// always use auto exposure/gain
mvret = xiSetParamInt( hmv, XI_PRM_AEAG, 1);
HandleXiResult(mvret);
mvret = xiGetParamInt( hmv, XI_PRM_WIDTH, &width);
HandleXiResult(mvret);
mvret = xiGetParamInt( hmv, XI_PRM_HEIGHT, &height);
HandleXiResult(mvret);
mvret = xiGetParamInt(hmv, XI_PRM_IMAGE_IS_COLOR, &isColor);
HandleXiResult(mvret);
if(isColor) // for color cameras
{
// default image format RGB24
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_RGB24);
HandleXiResult(mvret);
// always use auto white balance for color cameras
mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, 1);
HandleXiResult(mvret);
// allocate frame buffer for RGB24 image
frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
}
else // for mono cameras
{
// default image format MONO8
mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, XI_MONO8);
HandleXiResult(mvret);
// allocate frame buffer for MONO8 image
frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
}
//default capture timeout 10s
timeout = 10000;
mvret = xiStartAcquisition(hmv);
if(mvret != XI_OK)
{
errMsg("StartAcquisition XI_DEVICE failed", mvret);
goto error;
}
return true;
error:
errMsg("Open XI_DEVICE failed", mvret);
xiCloseDevice(hmv);
hmv = NULL;
return false;
}
/**********************************************************************************/
void CvCaptureCAM_XIMEA::close()
{
if(frame)
cvReleaseImage(&frame);
if(hmv)
{
xiStopAcquisition(hmv);
xiCloseDevice(hmv);
}
hmv = NULL;
}
/**********************************************************************************/
bool CvCaptureCAM_XIMEA::grabFrame()
{
memset(&image, 0, sizeof(XI_IMG));
image.size = sizeof(XI_IMG);
int mvret = xiGetImage( hmv, timeout, &image);
if(mvret == MM40_ACQUISITION_STOPED)
{
xiStartAcquisition(hmv);
mvret = xiGetImage(hmv, timeout, &image);
}
if(mvret != XI_OK)
{
errMsg("Error during GetImage", mvret);
return false;
}
return true;
}
/**********************************************************************************/
IplImage* CvCaptureCAM_XIMEA::retrieveFrame(int)
{
// update cvImage after format has changed
resetCvImage();
// copy pixel data
switch( image.frm)
{
case XI_MONO8 :
case XI_RAW8 : memcpy( frame->imageData, image.bp, image.width*image.height); break;
case XI_MONO16 :
case XI_RAW16 : memcpy( frame->imageData, image.bp, image.width*image.height*sizeof(WORD)); break;
case XI_RGB24 :
case XI_RGB_PLANAR : memcpy( frame->imageData, image.bp, image.width*image.height*3); break;
case XI_RGB32 : memcpy( frame->imageData, image.bp, image.width*image.height*4); break;
default: break;
}
return frame;
}
/**********************************************************************************/
void CvCaptureCAM_XIMEA::resetCvImage()
{
int width = 0, height = 0, format = 0;
xiGetParamInt( hmv, XI_PRM_WIDTH, &width);
xiGetParamInt( hmv, XI_PRM_HEIGHT, &height);
xiGetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, &format);
if( (int)image.width != width || (int)image.height != height || image.frm != (XI_IMG_FORMAT)format)
{
if(frame) cvReleaseImage(&frame);
frame = NULL;
switch( image.frm)
{
case XI_MONO8 :
case XI_RAW8 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 1); break;
case XI_MONO16 :
case XI_RAW16 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_16U, 1); break;
case XI_RGB24 :
case XI_RGB_PLANAR : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 3); break;
case XI_RGB32 : frame = cvCreateImage(cvSize( image.width, image.height), IPL_DEPTH_8U, 4); break;
default :
return;
}
}
cvZero(frame);
}
/**********************************************************************************/
double CvCaptureCAM_XIMEA::getProperty( int property_id )
{
if(hmv == NULL)
return 0;
int ival = 0;
float fval = 0;
switch( property_id )
{
// OCV parameters
case CV_CAP_PROP_POS_FRAMES : return (double) image.nframe;
case CV_CAP_PROP_FRAME_WIDTH : xiGetParamInt( hmv, XI_PRM_WIDTH, &ival); return ival;
case CV_CAP_PROP_FRAME_HEIGHT : xiGetParamInt( hmv, XI_PRM_HEIGHT, &ival); return ival;
case CV_CAP_PROP_FPS : xiGetParamFloat( hmv, XI_PRM_FRAMERATE, &fval); return fval;
case CV_CAP_PROP_GAIN : xiGetParamFloat( hmv, XI_PRM_GAIN, &fval); return fval;
case CV_CAP_PROP_EXPOSURE : xiGetParamInt( hmv, XI_PRM_EXPOSURE, &ival); return ival;
// XIMEA camera properties
case CV_CAP_PROP_XI_DOWNSAMPLING : xiGetParamInt( hmv, XI_PRM_DOWNSAMPLING, &ival); return ival;
case CV_CAP_PROP_XI_DATA_FORMAT : xiGetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, &ival); return ival;
case CV_CAP_PROP_XI_OFFSET_X : xiGetParamInt( hmv, XI_PRM_OFFSET_X, &ival); return ival;
case CV_CAP_PROP_XI_OFFSET_Y : xiGetParamInt( hmv, XI_PRM_OFFSET_Y, &ival); return ival;
case CV_CAP_PROP_XI_TRG_SOURCE : xiGetParamInt( hmv, XI_PRM_TRG_SOURCE, &ival); return ival;
case CV_CAP_PROP_XI_GPI_SELECTOR : xiGetParamInt( hmv, XI_PRM_GPI_SELECTOR, &ival); return ival;
case CV_CAP_PROP_XI_GPI_MODE : xiGetParamInt( hmv, XI_PRM_GPI_MODE, &ival); return ival;
case CV_CAP_PROP_XI_GPI_LEVEL : xiGetParamInt( hmv, XI_PRM_GPI_LEVEL, &ival); return ival;
case CV_CAP_PROP_XI_GPO_SELECTOR : xiGetParamInt( hmv, XI_PRM_GPO_SELECTOR, &ival); return ival;
case CV_CAP_PROP_XI_GPO_MODE : xiGetParamInt( hmv, XI_PRM_GPO_MODE, &ival); return ival;
case CV_CAP_PROP_XI_LED_SELECTOR : xiGetParamInt( hmv, XI_PRM_LED_SELECTOR, &ival); return ival;
case CV_CAP_PROP_XI_LED_MODE : xiGetParamInt( hmv, XI_PRM_LED_MODE, &ival); return ival;
case CV_CAP_PROP_XI_AUTO_WB : xiGetParamInt( hmv, XI_PRM_AUTO_WB, &ival); return ival;
case CV_CAP_PROP_XI_AEAG : xiGetParamInt( hmv, XI_PRM_AEAG, &ival); return ival;
case CV_CAP_PROP_XI_EXP_PRIORITY : xiGetParamFloat( hmv, XI_PRM_EXP_PRIORITY, &fval); return fval;
case CV_CAP_PROP_XI_AE_MAX_LIMIT : xiGetParamInt( hmv, XI_PRM_EXP_PRIORITY, &ival); return ival;
case CV_CAP_PROP_XI_AG_MAX_LIMIT : xiGetParamFloat( hmv, XI_PRM_AG_MAX_LIMIT, &fval); return fval;
case CV_CAP_PROP_XI_AEAG_LEVEL : xiGetParamInt( hmv, XI_PRM_AEAG_LEVEL, &ival); return ival;
case CV_CAP_PROP_XI_TIMEOUT : return timeout;
}
return 0;
}
/**********************************************************************************/
bool CvCaptureCAM_XIMEA::setProperty( int property_id, double value )
{
int ival = (int) value;
float fval = (float) value;
int mvret = XI_OK;
switch(property_id)
{
// OCV parameters
case CV_CAP_PROP_FRAME_WIDTH : mvret = xiSetParamInt( hmv, XI_PRM_WIDTH, ival); break;
case CV_CAP_PROP_FRAME_HEIGHT : mvret = xiSetParamInt( hmv, XI_PRM_HEIGHT, ival); break;
case CV_CAP_PROP_FPS : mvret = xiSetParamFloat( hmv, XI_PRM_FRAMERATE, fval); break;
case CV_CAP_PROP_GAIN : mvret = xiSetParamFloat( hmv, XI_PRM_GAIN, fval); break;
case CV_CAP_PROP_EXPOSURE : mvret = xiSetParamInt( hmv, XI_PRM_EXPOSURE, ival); break;
// XIMEA camera properties
case CV_CAP_PROP_XI_DOWNSAMPLING : mvret = xiSetParamInt( hmv, XI_PRM_DOWNSAMPLING, ival); break;
case CV_CAP_PROP_XI_DATA_FORMAT : mvret = xiSetParamInt( hmv, XI_PRM_IMAGE_DATA_FORMAT, ival); break;
case CV_CAP_PROP_XI_OFFSET_X : mvret = xiSetParamInt( hmv, XI_PRM_OFFSET_X, ival); break;
case CV_CAP_PROP_XI_OFFSET_Y : mvret = xiSetParamInt( hmv, XI_PRM_OFFSET_Y, ival); break;
case CV_CAP_PROP_XI_TRG_SOURCE : mvret = xiSetParamInt( hmv, XI_PRM_TRG_SOURCE, ival); break;
case CV_CAP_PROP_XI_GPI_SELECTOR : mvret = xiSetParamInt( hmv, XI_PRM_GPI_SELECTOR, ival); break;
case CV_CAP_PROP_XI_TRG_SOFTWARE : mvret = xiSetParamInt( hmv, XI_PRM_TRG_SOURCE, 1); break;
case CV_CAP_PROP_XI_GPI_MODE : mvret = xiSetParamInt( hmv, XI_PRM_GPI_MODE, ival); break;
case CV_CAP_PROP_XI_GPI_LEVEL : mvret = xiSetParamInt( hmv, XI_PRM_GPI_LEVEL, ival); break;
case CV_CAP_PROP_XI_GPO_SELECTOR : mvret = xiSetParamInt( hmv, XI_PRM_GPO_SELECTOR, ival); break;
case CV_CAP_PROP_XI_GPO_MODE : mvret = xiSetParamInt( hmv, XI_PRM_GPO_MODE, ival); break;
case CV_CAP_PROP_XI_LED_SELECTOR : mvret = xiSetParamInt( hmv, XI_PRM_LED_SELECTOR, ival); break;
case CV_CAP_PROP_XI_LED_MODE : mvret = xiSetParamInt( hmv, XI_PRM_LED_MODE, ival); break;
case CV_CAP_PROP_XI_AUTO_WB : mvret = xiSetParamInt( hmv, XI_PRM_AUTO_WB, ival); break;
case CV_CAP_PROP_XI_MANUAL_WB : mvret = xiSetParamInt( hmv, XI_PRM_LED_MODE, ival); break;
case CV_CAP_PROP_XI_AEAG : mvret = xiSetParamInt( hmv, XI_PRM_AEAG, ival); break;
case CV_CAP_PROP_XI_EXP_PRIORITY : mvret = xiSetParamFloat( hmv, XI_PRM_EXP_PRIORITY, fval); break;
case CV_CAP_PROP_XI_AE_MAX_LIMIT : mvret = xiSetParamInt( hmv, XI_PRM_EXP_PRIORITY, ival); break;
case CV_CAP_PROP_XI_AG_MAX_LIMIT : mvret = xiSetParamFloat( hmv, XI_PRM_AG_MAX_LIMIT, fval); break;
case CV_CAP_PROP_XI_AEAG_LEVEL : mvret = xiSetParamInt( hmv, XI_PRM_AEAG_LEVEL, ival); break;
case CV_CAP_PROP_XI_TIMEOUT : timeout = ival; break;
}
if(mvret != XI_OK)
{
errMsg("Set parameter error", mvret);
return false;
}
else
return true;
}
/**********************************************************************************/
void CvCaptureCAM_XIMEA::errMsg(const char* msg, int errNum)
{
#if defined WIN32 || defined _WIN32
char buf[512]="";
sprintf( buf, "%s : %d\n", msg, errNum);
OutputDebugString(buf);
#else
fprintf(stderr, "%s : %d\n", msg, errNum);
#endif
}
/**********************************************************************************/
int CvCaptureCAM_XIMEA::getBpp()
{
switch( image.frm)
{
case XI_MONO8 :
case XI_RAW8 : return 1;
case XI_MONO16 :
case XI_RAW16 : return 2;
case XI_RGB24 :
case XI_RGB_PLANAR : return 3;
case XI_RGB32 : return 4;
default :
return 0;
}
}
/**********************************************************************************/

View File

@@ -1,846 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Authors: Konstantin Dols <dols@ient.rwth-aachen.de>
// Mark Asbach <asbach@ient.rwth-aachen.de>
//
// Institute of Communications Engineering
// RWTH Aachen University
#include "precomp.hpp"
// required to enable some functions used here...
#define XINE_ENABLE_EXPERIMENTAL_FEATURES
#include <cassert>
extern "C"
{
#include <xine.h>
//#include <xine/xineutils.h>
// forward declaration from <xine/xineutils.h>
const char *xine_get_homedir( void );
}
typedef struct CvCaptureAVI_XINE
{
/// method call table
xine_t * xine;
xine_stream_t * stream;
xine_video_port_t * vo_port;
/// frame returned by xine_get_next_video_frame()
xine_video_frame_t xine_frame;
IplImage * yuv_frame;
IplImage * bgr_frame;
/// image dimansions of the input stream.
CvSize size;
/// framenumber of the last frame received from xine_get_next_video_frame().
/// note: always keep this value updated !!!!
int frame_number;
/// framerate of the opened stream
double frame_rate;
/// duration of a frame in stream
double frame_duration;
/// indicated if input is seekable
bool seekable;
}
CvCaptureAVI_XINE;
// 4:2:2 interleaved -> BGR
static void icvYUY2toBGR( CvCaptureAVI_XINE * capture )
{
uint8_t * v = capture->xine_frame.data;
int offset;
for ( int y = 0; y < capture->yuv_frame->height; y++ )
{
offset = y * capture->yuv_frame->widthStep;
for ( int x = 0; x < capture->yuv_frame->width; x++, offset += 3 )
{
capture->yuv_frame->imageData[ offset + 1 ] = v[ 3 ];
capture->yuv_frame->imageData[ offset + 2 ] = v[ 1 ];
if ( x & 1 )
{
capture->yuv_frame->imageData[ offset ] = v[ 2 ];
v += 4;
}
else
{
capture->yuv_frame->imageData[ offset ] = v[ 0 ];
}
}
}
// convert to BGR
cvCvtColor( capture->yuv_frame, capture->bgr_frame, CV_YCrCb2BGR );
}
// 4:2:0 planary -> BGR
static void icvYV12toBGR( CvCaptureAVI_XINE * capture )
{
IplImage * yuv = capture->yuv_frame;
int w_Y = capture->size.width;
int h_Y = capture->size.height;
int w_UV = w_Y >> 1;
int size_Y = w_Y * h_Y;
int size_UV = size_Y / 4;
int line = yuv->widthStep;
uint8_t * addr_Y = capture->xine_frame.data;
uint8_t * addr_U = addr_Y + size_Y;
uint8_t * addr_V = addr_U + size_UV;
// YYYY..UU.VV. -> BGRBGRBGR...
for ( int y = 0; y < h_Y; y++ )
{
int offset = y * line;
for ( int x = 0; x < w_Y; x++, offset += 3 )
{
/*
if ( x&1 )
{
addr_U++; addr_V++;
}
*/
int one_zero = x & 1;
addr_U += one_zero;
addr_V += one_zero;
yuv->imageData[ offset ] = *( addr_Y++ );
yuv->imageData[ offset + 1 ] = *addr_U;
yuv->imageData[ offset + 2 ] = *addr_V;
}
if ( y & 1 )
{
addr_U -= w_UV;
addr_V -= w_UV;
}
}
/* convert to BGR */
cvCvtColor( capture->yuv_frame, capture->bgr_frame, CV_YCrCb2BGR );
}
static void icvCloseAVI_XINE( CvCaptureAVI_XINE* capture )
{
xine_free_video_frame( capture->vo_port, &capture->xine_frame );
if ( capture->yuv_frame ) cvReleaseImage( &capture->yuv_frame );
if ( capture->bgr_frame ) cvReleaseImage( &capture->bgr_frame );
xine_close( capture->stream );
// xine_dispose( capture->stream );
if ( capture->vo_port ) xine_close_video_driver( capture->xine, capture->vo_port );
xine_exit( capture->xine );
}
/**
* CHECKS IF THE STREAM IN * capture IS SEEKABLE.
**/
static void icvCheckSeekAVI_XINE( CvCaptureAVI_XINE * capture )
{
OPENCV_ASSERT ( capture, "icvCheckSeekAVI_XINE( CvCaptureAVI_XINE* )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvCheckSeekAVI_XINE( CvCaptureAVI_XINE* )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvCheckSeekAVI_XINE( CvCaptureAVI_XINE* )", "illegal capture->vo_port");
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvCheckSeekAVI_XINE ... start\n" );
#endif
// temp. frame for testing.
xine_video_frame_t tmp;
// try to seek to a future frame...
xine_play( capture->stream, 0, 300 ); /* 300msec */
// try to receive the frame...
xine_get_next_video_frame( capture->vo_port, &tmp );
// if the framenumber is still 0, we can't use the xine seek functionality
capture->seekable = ( tmp.frame_number != 0 );
// reset stream
xine_play( capture->stream, 0, 0 );
// release xine_frame
xine_free_video_frame( capture->vo_port, &tmp );
#ifndef NDEBUG
if ( capture->seekable )
fprintf( stderr, "(DEBUG) icvCheckSeekAVI_XINE: Input is seekable, using XINE seek implementation.\n" );
else
fprintf( stderr, "(DEBUG) icvCheckSeekAVI_XINE: Input is NOT seekable, using fallback function.\n" );
fprintf( stderr, "(DEBUG) icvCheckSeekAVI_XINE ... end\n" );
#endif
}
static int icvOpenAVI_XINE( CvCaptureAVI_XINE* capture, const char* filename )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvOpenAVI_XINE ... start\n" );
#endif
char configfile[ 2048 ];
capture->xine = xine_new();
sprintf( configfile, "%s%s", xine_get_homedir(), "/.xine/config" );
xine_config_load( capture->xine, configfile );
xine_init( capture->xine );
xine_engine_set_param( capture->xine, 0, 0 );
capture->vo_port = xine_new_framegrab_video_port( capture->xine );
if ( capture->vo_port == NULL )
{
printf( "(ERROR)icvOpenAVI_XINE(): Unable to initialize video driver.\n" );
return 0;
}
capture->stream = xine_stream_new( capture->xine, NULL, capture->vo_port );
if ( !xine_open( capture->stream, filename ) )
{
printf( "(ERROR)icvOpenAVI_XINE(): Unable to open source '%s'\n", filename );
return 0;
}
// reset stream...
xine_play( capture->stream, 0, 0 );
// initialize some internals...
capture->frame_number = 0;
if ( !xine_get_next_video_frame( capture->vo_port, &capture->xine_frame ) )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvOpenAVI_XINE ... failed!\n" );
#endif
return 0;
}
capture->size = cvSize( capture->xine_frame.width, capture->xine_frame.height );
capture->yuv_frame = cvCreateImage( capture->size, IPL_DEPTH_8U, 3 );
capture->bgr_frame = cvCreateImage( capture->size, IPL_DEPTH_8U, 3 );
xine_free_video_frame( capture->vo_port, &capture->xine_frame );
capture->xine_frame.data[ 0 ] = 0;
icvCheckSeekAVI_XINE( capture );
capture->frame_duration = xine_get_stream_info( capture->stream, XINE_STREAM_INFO_FRAME_DURATION ) / 90.;
capture->frame_rate = 1000 / capture->frame_duration;
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) frame_duration = %f, framerate = %f\n", capture->frame_duration, capture->frame_rate );
#endif
OPENCV_ASSERT ( capture->yuv_frame,
"icvOpenAVI_XINE( CvCaptureAVI_XINE *, const char *)", "couldn't create yuv frame");
OPENCV_ASSERT ( capture->bgr_frame,
"icvOpenAVI_XINE( CvCaptureAVI_XINE *, const char *)", "couldn't create bgr frame");
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvOpenAVI_XINE ... end\n" );
#endif
return 1;
}
static int icvGrabFrameAVI_XINE( CvCaptureAVI_XINE* capture )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvGrabFrameAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvGrabFrameAVI_XINE( CvCaptureAVI_XINE * )", "illegal capture");
OPENCV_ASSERT ( capture->vo_port,
"icvGrabFrameAVI_XINE( CvCaptureAVI_XINE * )", "illegal capture->vo_port");
int res = xine_get_next_video_frame( capture->vo_port, &capture->xine_frame );
/* always keep internal framenumber updated !!! */
if ( res ) capture->frame_number++;
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvGrabFrameAVI_XINE ... end\n" );
#endif
return res;
}
static const IplImage* icvRetrieveFrameAVI_XINE( CvCaptureAVI_XINE* capture, int )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvRetrieveFrameAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvRetrieveFrameAVI_XINE( CvCaptureAVI_XINE * )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvRetrieveFrameAVI_XINE( CvCaptureAVI_XINE * )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvRetrieveFrameAVI_XINE( CvCaptureAVI_XINE * )", "illegal capture->vo_port");
/* no frame grabbed yet? so let's do it now! */
int res = 0;
if ( capture->xine_frame.data == 0 )
{
res = icvGrabFrameAVI_XINE( capture );
}
else
{
res = 1;
}
if ( res )
{
switch ( capture->xine_frame.colorspace )
{
case XINE_IMGFMT_YV12: icvYV12toBGR( capture );
#ifndef NDEBUG
printf( "(DEBUG)icvRetrieveFrameAVI_XINE: converted YV12 to BGR.\n" );
#endif
break;
case XINE_IMGFMT_YUY2: icvYUY2toBGR( capture );
#ifndef NDEBUG
printf( "(DEBUG)icvRetrieveFrameAVI_XINE: converted YUY2 to BGR.\n" );
#endif
break;
case XINE_IMGFMT_XVMC: printf( "(ERROR)icvRetrieveFrameAVI_XINE: XVMC format not supported!\n" );
break;
case XINE_IMGFMT_XXMC: printf( "(ERROR)icvRetrieveFrameAVI_XINE: XXMC format not supported!\n" );
break;
default: printf( "(ERROR)icvRetrieveFrameAVI_XINE: unknown color/pixel format!\n" );
}
/* always release last xine_frame, not needed anymore, but store its frame_number in *capture ! */
xine_free_video_frame( capture->vo_port, &capture->xine_frame );
capture->xine_frame.data = 0;
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvRetrieveFrameAVI_XINE ... end\n" );
#endif
return capture->bgr_frame;
}
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvRetrieveFrameAVI_XINE ... failed!\n" );
#endif
return 0;
}
/**
* THIS FUNCTION IS A FALLBACK FUNCTION FOR THE CASE THAT THE XINE SEEK IMPLEMENTATION
* DOESN'T WORK WITH THE ACTUAL INPUT. THIS FUNCTION IS ONLY USED IN THE CASE OF AN EMERGENCY,
* BECAUSE IT IS VERY SLOW !
**/
static int icvOldSeekFrameAVI_XINE( CvCaptureAVI_XINE* capture, int f )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvOldSeekFrameAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvRetricvOldSeekFrameAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvOldSeekFrameAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvOldSeekFrameAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->vo_port");
// not needed tnx to asserts...
// we need a valid capture context and it's stream to seek through
// if ( !capture || !capture->stream ) return 0;
// no need to seek if we are already there...
if ( f == capture->frame_number )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvOldSeekFrameAVI_XINE ... end\n" );
#endif
return 1;
}
// if the requested position is behind out actual position,
// we just need to read the remaining amount of frames until we are there.
else if ( f > capture->frame_number )
{
for ( ;capture->frame_number < f;capture->frame_number++ )
/// un-increment framenumber grabbing failed
if ( !xine_get_next_video_frame( capture->vo_port, &capture->xine_frame ) )
{
capture->frame_number--;
break;
}
else
{
xine_free_video_frame( capture->vo_port, &capture->xine_frame );
}
}
// otherwise we need to reset the stream and
// start reading frames from the beginning.
else // f < capture->frame_number
{
/// reset stream, should also work with non-seekable input
xine_play( capture->stream, 0, 0 );
/// read frames until we are at the requested frame
for ( capture->frame_number = 0; capture->frame_number < f; capture->frame_number++ )
/// un-increment last framenumber if grabbing failed
if ( !xine_get_next_video_frame( capture->vo_port, &capture->xine_frame ) )
{
capture->frame_number--;
break;
}
else
{
xine_free_video_frame( capture->vo_port, &capture->xine_frame );
}
}
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvOldSeekFrameAVI_XINE ... end\n" );
#endif
return ( f == capture->frame_number ) ? 1 : 0;
}
static int icvSeekFrameAVI_XINE( CvCaptureAVI_XINE* capture, int f )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvSeekFrameAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvSeekFrameAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvSeekFrameAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->vo_port");
// not needed tnx to asserts...
// we need a valid capture context and it's stream to seek through
// if ( !capture || !capture->stream ) return 0;
if ( capture->seekable )
{
/// use xinelib's seek functionality
int new_time = ( int ) ( ( f + 1 ) * ( float ) capture->frame_duration );
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) calling xine_play()" );
#endif
if ( xine_play( capture->stream, 0, new_time ) )
{
#ifndef NDEBUG
fprintf( stderr, "ok\n" );
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ... end\n" );
#endif
capture->frame_number = f;
return 1;
}
else
{
#ifndef NDEBUG
fprintf( stderr, "failed\n" );
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ... failed\n" );
#endif
return 0;
}
}
else
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ... end\n" );
#endif
return icvOldSeekFrameAVI_XINE( capture, f );
}
}
static int icvSeekTimeAVI_XINE( CvCaptureAVI_XINE* capture, int t )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekTimeAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvSeekTimeAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvSeekTimeAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvSeekTimeAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->vo_port");
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekTimeAVI_XINE ... start\n" );
#endif
// not needed tnx to asserts...
// we need a valid capture context and it's stream to seek through
// if ( !capture || !capture->stream ) return 0;
if ( capture->seekable )
{
/// use xinelib's seek functionality
if ( xine_play( capture->stream, 0, t ) )
{
capture->frame_number = ( int ) ( ( float ) t * capture->frame_rate / 1000 );
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ... end\n" );
#endif
return 1;
}
else
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ... failed!\n" );
#endif
return 0;
}
}
else
{
int new_frame = ( int ) ( ( float ) t * capture->frame_rate / 1000 );
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekFrameAVI_XINE ....end\n" );
#endif
return icvOldSeekFrameAVI_XINE( capture, new_frame );
}
}
static int icvSeekRatioAVI_XINE( CvCaptureAVI_XINE* capture, double ratio )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekRatioAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvSeekRatioAVI_XINE( CvCaptureAVI_XINE *, double )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvSeekRatioAVI_XINE( CvCaptureAVI_XINE *, double )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvSeekRatioAVI_XINE( CvCaptureAVI_XINE *, double )", "illegal capture->vo_port");
// not needed tnx to asserts...
// we need a valid capture context and it's stream to seek through
// if ( !capture || !capture->stream ) return 0;
/// ratio must be [0..1]
if ( ratio > 1 || ratio < 0 ) return 0;
if ( capture->seekable )
{
// TODO: FIX IT, DOESN'T WORK PROPERLY, YET...!
int pos_t, pos_l, length;
xine_get_pos_length( capture->stream, &pos_l, &pos_t, &length );
fprintf( stderr, "ratio on GetProperty(): %d\n", pos_l );
/// use xinelib's seek functionality
if ( xine_play( capture->stream, (int)(ratio*(float)length), 0 ) )
{
capture->frame_number = ( int ) ( ratio*length / capture->frame_duration );
}
else
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekRatioAVI_XINE ... failed!\n" );
#endif
return 0;
}
}
else
{
/// TODO: fill it !
fprintf( stderr, "icvSeekRatioAVI_XINE(): Seek not supported by stream !\n" );
fprintf( stderr, "icvSeekRatioAVI_XINE(): (seek in stream with NO seek support NOT implemented...yet!)\n" );
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekRatioAVI_XINE ... failed!\n" );
#endif
return 0;
}
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSeekRatioAVI_XINE ... end!\n" );
#endif
return 1;
}
static double icvGetPropertyAVI_XINE( CvCaptureAVI_XINE* capture, int property_id )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvGetPropertyAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvGetPropertyAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvGetPropertyAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvGetPropertyAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->vo_port");
OPENCV_ASSERT ( capture->xine,
"icvGetPropertyAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->xine");
OPENCV_ASSERT ( capture->bgr_frame,
"icvGetPropertyAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->bgr_frame");
// not needed tnx to asserts...
// we need a valid capture context and it's stream to seek through
// if ( !capture || !capture->stream || !capture->bgr_frame || !capture->xine || !capture->vo_port ) return 0
int pos_t, pos_l, length;
xine_get_pos_length( capture->stream, &pos_l, &pos_t, &length );
fprintf( stderr, "ratio on GetProperty(): %i\n", pos_l );
switch ( property_id )
{
/// return actual position in msec
case CV_CAP_PROP_POS_MSEC:
if ( !capture->seekable )
{
fprintf( stderr, "(ERROR) GetPropertyAVI_XINE(CV_CAP_PROP_POS_MSEC:\n" );
fprintf( stderr, " Stream is NOT seekable, so position info may NOT be valid !!\n" );
}
return pos_t;
/// return actual frame number
case CV_CAP_PROP_POS_FRAMES:
/// we insist the capture->frame_number to be remain updated !!!!
return capture->frame_number;
/// return actual position ratio in the range [0..1] depending on
/// the total length of the stream and the actual position
case CV_CAP_PROP_POS_AVI_RATIO:
if ( !capture->seekable )
{
fprintf( stderr, "(ERROR) GetPropertyAVI_XINE(CV_CAP_PROP_POS_AVI_RATIO:\n" );
fprintf( stderr, " Stream is NOT seekable, so ratio info may NOT be valid !!\n" );
}
if ( length == 0 ) break;
else return pos_l / 65535;
/// return width of image source
case CV_CAP_PROP_FRAME_WIDTH:
return capture->size.width;
/// return height of image source
case CV_CAP_PROP_FRAME_HEIGHT:
return capture->size.height;
/// return framerate of stream
case CV_CAP_PROP_FPS:
if ( !capture->seekable )
{
fprintf( stderr, "(ERROR) GetPropertyAVI_XINE(CV_CAP_PROP_FPS:\n" );
fprintf( stderr, " Stream is NOT seekable, so FPS info may NOT be valid !!\n" );
}
return capture->frame_rate;
/// return four-character-code (FOURCC) of source's codec
case CV_CAP_PROP_FOURCC:
return ( double ) xine_get_stream_info( capture->stream, XINE_STREAM_INFO_VIDEO_FOURCC );
}
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvGetPropertyAVI_XINE ... failed!\n" );
#endif
return 0;
}
static int icvSetPropertyAVI_XINE( CvCaptureAVI_XINE* capture,
int property_id, double value )
{
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSetPropertyAVI_XINE ... start\n" );
#endif
OPENCV_ASSERT ( capture,
"icvSetPropertyAVI_XINE( CvCaptureAVI_XINE *, int, double )", "illegal capture");
OPENCV_ASSERT ( capture->stream,
"icvGetPropericvSetPropertyAVI_XINE( CvCaptureAVI_XINE *, int )", "illegal capture->stream");
OPENCV_ASSERT ( capture->vo_port,
"icvSetPropertyAVI_XINE( CvCaptureAVI_XINE *, int, double )", "illegal capture->vo_port");
// not needed tnx to asserts...
// we need a valid capture context and it's stream to seek through
// if ( !capture || !capture->stream || !capture->bgr_frame || !capture->xine || !capture->vo_port ) return 0
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSetPropertyAVI_XINE: seeking to value %f ... ", value );
#endif
switch ( property_id )
{
/// set (seek to) position in msec
case CV_CAP_PROP_POS_MSEC:
return icvSeekTimeAVI_XINE( capture, ( int ) value );
/// set (seek to) frame number
case CV_CAP_PROP_POS_FRAMES:
return icvSeekFrameAVI_XINE( capture, ( int ) value );
/// set (seek to) position ratio in the range [0..1] depending on
/// the total length of the stream and the actual position
case CV_CAP_PROP_POS_AVI_RATIO:
return icvSeekRatioAVI_XINE( capture, value );
default:
#ifndef NDEBUG
fprintf( stderr, "(DEBUG) icvSetPropertyAVI_XINE ... failed!\n" );
#endif
return 0;
}
}
static CvCaptureAVI_XINE* icvCaptureFromFile_XINE( const char* filename )
{
// construct capture struct
CvCaptureAVI_XINE * capture = ( CvCaptureAVI_XINE* ) cvAlloc ( sizeof ( CvCaptureAVI_XINE ) );
memset( capture, 0, sizeof ( CvCaptureAVI_XINE ) );
// initialize XINE
if ( !icvOpenAVI_XINE( capture, filename ) )
return 0;
OPENCV_ASSERT ( capture,
"cvCaptureFromFile_XINE( const char * )", "couldn't create capture");
return capture;
}
class CvCaptureAVI_XINE_CPP : public CvCapture
{
public:
CvCaptureAVI_XINE_CPP() { captureXINE = 0; }
virtual ~CvCaptureAVI_XINE_CPP() { close(); }
virtual bool open( const char* filename );
virtual void close();
virtual double getProperty(int);
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
protected:
CvCaptureAVI_XINE* captureXINE;
};
bool CvCaptureAVI_XINE_CPP::open( const char* filename )
{
close();
captureXINE = icvCaptureFromFile_XINE(filename);
return captureXINE != 0;
}
void CvCaptureAVI_XINE_CPP::close()
{
if( captureXINE )
{
icvCloseAVI_XINE( captureXINE );
cvFree( &captureXINE );
}
}
bool CvCaptureAVI_XINE_CPP::grabFrame()
{
return captureXINE ? icvGrabFrameAVI_XINE( captureXINE ) != 0 : false;
}
IplImage* CvCaptureAVI_XINE_CPP::retrieveFrame(int)
{
return captureXINE ? (IplImage*)icvRetrieveFrameAVI_XINE( captureXINE, 0 ) : 0;
}
double CvCaptureAVI_XINE_CPP::getProperty( int propId )
{
return captureXINE ? icvGetPropertyAVI_XINE( captureXINE, propId ) : 0;
}
bool CvCaptureAVI_XINE_CPP::setProperty( int propId, double value )
{
return captureXINE ? icvSetPropertyAVI_XINE( captureXINE, propId, value ) != 0 : false;
}
CvCapture* cvCreateFileCapture_XINE(const char* filename)
{
CvCaptureAVI_XINE_CPP* capture = new CvCaptureAVI_XINE_CPP;
if( capture->open(filename))
return capture;
delete capture;
return 0;
}
#undef NDEBUG

View File

@@ -1,253 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(WIN32) || defined(__MINGW32__)
// some versions of FFMPEG assume a C99 compiler, and don't define INT64_C
#include <stdint.h>
// some versions of FFMPEG assume a C99 compiler, and don't define INT64_C
#ifndef INT64_C
#define INT64_C(c) (c##LL)
#endif
#ifndef UINT64_C
#define UINT64_C(c) (c##ULL)
#endif
#include <errno.h>
#endif
// if the header path is not specified explicitly, let's deduce it
#if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
#if defined(HAVE_GENTOO_FFMPEG)
#define HAVE_LIBAVFORMAT_AVFORMAT_H 1
#elif defined HAVE_FFMPEG
#define HAVE_FFMPEG_AVFORMAT_H 1
#endif
#if defined(HAVE_FFMPEG_AVFORMAT_H)
#include <ffmpeg/avformat.h>
#endif
#if defined(HAVE_LIBAVFORMAT_AVFORMAT_H) || defined(WIN32)
#include <libavformat/avformat.h>
#endif
#endif
#ifdef __cplusplus
}
#endif
#ifndef MKTAG
#define MKTAG(a,b,c,d) (a | (b << 8) | (c << 16) | (d << 24))
#endif
// required to look up the correct codec ID depending on the FOURCC code,
// this is just a snipped from the file riff.c from ffmpeg/libavformat
typedef struct AVCodecTag {
int id;
unsigned int tag;
} AVCodecTag;
const AVCodecTag codec_bmp_tags[] = {
{ CODEC_ID_H264, MKTAG('H', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('h', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('x', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
{ CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') },
{ CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
{ CODEC_ID_H263P, MKTAG('H', '2', '6', '3') },
{ CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* intel h263 */
{ CODEC_ID_H261, MKTAG('H', '2', '6', '1') },
/* added based on MPlayer */
{ CODEC_ID_H263P, MKTAG('U', '2', '6', '3') },
{ CODEC_ID_H263P, MKTAG('v', 'i', 'v', '1') },
{ CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4') },
{ CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') },
{ CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0') },
{ CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
{ CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
{ CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
{ CODEC_ID_MPEG4, MKTAG(0x04, 0, 0, 0) }, /* some broken avi use this */
/* added based on MPlayer */
{ CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', '1') },
{ CODEC_ID_MPEG4, MKTAG('B', 'L', 'Z', '0') },
{ CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
{ CODEC_ID_MPEG4, MKTAG('U', 'M', 'P', '4') },
{ CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') },
{ CODEC_ID_MPEG4, MKTAG('S', 'E', 'D', 'G') },
{ CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') },
{ CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3') }, /* default signature when using MSMPEG4 */
{ CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
/* added based on MPlayer */
{ CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
{ CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
{ CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
{ CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
{ CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
{ CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
{ CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
{ CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
/* added based on MPlayer */
{ CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') },
{ CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
{ CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
/* added based on MPlayer */
{ CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') },
{ CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
{ CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
{ CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
{ CODEC_ID_MPEG2VIDEO, MKTAG('M', 'P', 'E', 'G') },
{ CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
{ CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
{ CODEC_ID_MPEG1VIDEO, 0x10000001 },
{ CODEC_ID_MPEG2VIDEO, 0x10000002 },
{ CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') },
{ CODEC_ID_MPEG2VIDEO, MKTAG('M', 'M', 'E', 'S') },
{ CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
{ CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') },
{ CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') },
{ CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') }, /* Pegasus lossless JPEG */
{ CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - decoder */
{ CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') },
{ CODEC_ID_MJPEG, MKTAG('I', 'J', 'P', 'G') },
{ CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') },
{ CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') },
{ CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') },
{ CODEC_ID_CYUV, MKTAG('C', 'Y', 'U', 'V') },
{ CODEC_ID_RAWVIDEO, 0 },
{ CODEC_ID_RAWVIDEO, MKTAG('I', '4', '2', '0') },
{ CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'Y', '2') },
{ CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', '2') },
{ CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') },
{ CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') },
{ CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') },
{ CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') },
{ CODEC_ID_RAWVIDEO, MKTAG('H', 'D', 'Y', 'C') },
{ CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '1') },
{ CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '2') },
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
{ CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') },
{ CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') },
{ CODEC_ID_VCR1, MKTAG('V', 'C', 'R', '1') },
{ CODEC_ID_FFV1, MKTAG('F', 'F', 'V', '1') },
{ CODEC_ID_XAN_WC4, MKTAG('X', 'x', 'a', 'n') },
{ CODEC_ID_MSRLE, MKTAG('m', 'r', 'l', 'e') },
{ CODEC_ID_MSRLE, MKTAG(0x1, 0x0, 0x0, 0x0) },
{ CODEC_ID_MSVIDEO1, MKTAG('M', 'S', 'V', 'C') },
{ CODEC_ID_MSVIDEO1, MKTAG('m', 's', 'v', 'c') },
{ CODEC_ID_MSVIDEO1, MKTAG('C', 'R', 'A', 'M') },
{ CODEC_ID_MSVIDEO1, MKTAG('c', 'r', 'a', 'm') },
{ CODEC_ID_MSVIDEO1, MKTAG('W', 'H', 'A', 'M') },
{ CODEC_ID_MSVIDEO1, MKTAG('w', 'h', 'a', 'm') },
{ CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') },
{ CODEC_ID_TRUEMOTION1, MKTAG('D', 'U', 'C', 'K') },
{ CODEC_ID_MSZH, MKTAG('M', 'S', 'Z', 'H') },
{ CODEC_ID_ZLIB, MKTAG('Z', 'L', 'I', 'B') },
{ CODEC_ID_SNOW, MKTAG('S', 'N', 'O', 'W') },
{ CODEC_ID_4XM, MKTAG('4', 'X', 'M', 'V') },
{ CODEC_ID_FLV1, MKTAG('F', 'L', 'V', '1') },
{ CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') },
{ CODEC_ID_TSCC, MKTAG('t', 's', 'c', 'c') },
{ CODEC_ID_ULTI, MKTAG('U', 'L', 'T', 'I') },
{ CODEC_ID_VIXL, MKTAG('V', 'I', 'X', 'L') },
{ CODEC_ID_QPEG, MKTAG('Q', 'P', 'E', 'G') },
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
{ CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
{ CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
{ CODEC_ID_THEORA, MKTAG('t', 'h', 'e', 'o') },
#if LIBAVCODEC_VERSION_INT>0x000409
{ CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
{ CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
{ CODEC_ID_INDEO2, MKTAG('R', 'T', '2', '1') },
{ CODEC_ID_FRAPS, MKTAG('F', 'P', 'S', '1') },
{ CODEC_ID_TRUEMOTION2, MKTAG('T', 'M', '2', '0') },
#endif
#if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0)
{ CODEC_ID_FLASHSV, MKTAG('F', 'S', 'V', '1') },
{ CODEC_ID_JPEGLS,MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - encoder */
{ CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
{ CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') },
{ CODEC_ID_CSCD, MKTAG('C', 'S', 'C', 'D') },
{ CODEC_ID_ZMBV, MKTAG('Z', 'M', 'B', 'V') },
{ CODEC_ID_KMVC, MKTAG('K', 'M', 'V', 'C') },
#endif
#if LIBAVCODEC_VERSION_INT>((51<<16)+(11<<8)+0)
{ CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
{ CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
{ CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
{ CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
{ CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') },
{ CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') },
{ CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') },
#endif
#if LIBAVCODEC_VERSION_INT>=((51<<16)+(49<<8)+0)
// this tag seems not to exist in older versions of FFMPEG
{ CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') },
#endif
{ CODEC_ID_NONE, 0 },
};

View File

@@ -95,90 +95,6 @@
#define CV_WINDOW_MAGIC_VAL 0x00420042
#define CV_TRACKBAR_MAGIC_VAL 0x00420043
/***************************** CvCapture structure ******************************/
struct CvCapture
{
virtual ~CvCapture() {}
virtual double getProperty(int) { return 0; }
virtual bool setProperty(int, double) { return 0; }
virtual bool grabFrame() { return true; }
virtual IplImage* retrieveFrame(int) { return 0; }
virtual int getCaptureDomain() { return CV_CAP_ANY; } // Return the type of the capture object: CV_CAP_VFW, etc...
};
/*************************** CvVideoWriter structure ****************************/
struct CvVideoWriter
{
virtual ~CvVideoWriter() {}
virtual bool writeFrame(const IplImage*) { return false; }
};
CvCapture * cvCreateCameraCapture_V4L( int index );
CvCapture * cvCreateCameraCapture_DC1394( int index );
CvCapture * cvCreateCameraCapture_DC1394_2( int index );
CvCapture* cvCreateCameraCapture_MIL( int index );
CvCapture* cvCreateCameraCapture_Giganetix( int index );
CvCapture * cvCreateCameraCapture_CMU( int index );
CV_IMPL CvCapture * cvCreateCameraCapture_TYZX( int index );
CvCapture* cvCreateFileCapture_Win32( const char* filename );
CvCapture* cvCreateCameraCapture_VFW( int index );
CvCapture* cvCreateFileCapture_VFW( const char* filename );
CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_DShow( int index );
CvCapture* cvCreateCameraCapture_MSMF( int index );
CvCapture* cvCreateFileCapture_MSMF (const char* filename);
CvVideoWriter* cvCreateVideoWriter_MSMF( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_OpenNI( int index );
CvCapture* cvCreateFileCapture_OpenNI( const char* filename );
CvCapture* cvCreateCameraCapture_Android( int index );
CvCapture* cvCreateCameraCapture_XIMEA( int index );
CvCapture* cvCreateCameraCapture_AVFoundation(int index);
CVAPI(int) cvHaveImageReader(const char* filename);
CVAPI(int) cvHaveImageWriter(const char* filename);
CvCapture* cvCreateFileCapture_Images(const char* filename);
CvVideoWriter* cvCreateVideoWriter_Images(const char* filename);
CvCapture* cvCreateFileCapture_XINE (const char* filename);
#define CV_CAP_GSTREAMER_1394 0
#define CV_CAP_GSTREAMER_V4L 1
#define CV_CAP_GSTREAMER_V4L2 2
#define CV_CAP_GSTREAMER_FILE 3
CvCapture* cvCreateCapture_GStreamer(int type, const char *filename);
CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char* filename);
CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture * cvCreateFileCapture_QT (const char * filename);
CvCapture * cvCreateCameraCapture_QT (const int index);
CvVideoWriter* cvCreateVideoWriter_QT ( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateFileCapture_AVFoundation (const char * filename);
CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture * cvCreateCameraCapture_Unicap (const int index);
CvCapture * cvCreateCameraCapture_PvAPI (const int index);
CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
//Yannick Verdie 2010
void cvSetModeWindow_W32(const char* name, double prop_value);
void cvSetModeWindow_GTK(const char* name, double prop_value);
@@ -199,20 +115,6 @@ double cvGetRatioWindow_GTK(const char* name);
double cvGetOpenGlProp_W32(const char* name);
double cvGetOpenGlProp_GTK(const char* name);
namespace cv
{
class IVideoCapture
{
public:
virtual ~IVideoCapture() {}
virtual double getProperty(int) { return 0; }
virtual bool setProperty(int, double) { return 0; }
virtual bool grabFrame() = 0;
virtual bool retrieveFrame(int, cv::OutputArray) = 0;
virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc...
};
};
//for QT
#if defined (HAVE_QT)
double cvGetModeWindow_QT(const char* name);

View File

@@ -1,400 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
#ifdef HAVE_FFMPEG
using namespace std;
class CV_FFmpegWriteBigVideoTest : public cvtest::BaseTest
{
public:
void run(int)
{
const int img_r = 4096;
const int img_c = 4096;
const double fps0 = 15;
const double time_sec = 1;
const int tags[] = {
0,
//VideoWriter::fourcc('D', 'I', 'V', '3'),
//VideoWriter::fourcc('D', 'I', 'V', 'X'),
VideoWriter::fourcc('D', 'X', '5', '0'),
VideoWriter::fourcc('F', 'L', 'V', '1'),
VideoWriter::fourcc('H', '2', '6', '1'),
VideoWriter::fourcc('H', '2', '6', '3'),
VideoWriter::fourcc('I', '4', '2', '0'),
//VideoWriter::fourcc('j', 'p', 'e', 'g'),
VideoWriter::fourcc('M', 'J', 'P', 'G'),
VideoWriter::fourcc('m', 'p', '4', 'v'),
VideoWriter::fourcc('M', 'P', 'E', 'G'),
//VideoWriter::fourcc('W', 'M', 'V', '1'),
//VideoWriter::fourcc('W', 'M', 'V', '2'),
VideoWriter::fourcc('X', 'V', 'I', 'D'),
//VideoWriter::fourcc('Y', 'U', 'Y', '2'),
};
const size_t n = sizeof(tags)/sizeof(tags[0]);
bool created = false;
for (size_t j = 0; j < n; ++j)
{
int tag = tags[j];
stringstream s;
s << tag;
const string filename = tempfile((s.str()+".avi").c_str());
try
{
double fps = fps0;
Size frame_s = Size(img_c, img_r);
if( tag == VideoWriter::fourcc('H', '2', '6', '1') )
frame_s = Size(352, 288);
else if( tag == VideoWriter::fourcc('H', '2', '6', '3') )
frame_s = Size(704, 576);
/*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') ||
tag == CV_FOURCC('j', 'p', 'e', 'g') )
frame_s = Size(1920, 1080);*/
if( tag == VideoWriter::fourcc('M', 'P', 'E', 'G') )
{
frame_s = Size(720, 576);
fps = 25;
}
VideoWriter writer(filename, tag, fps, frame_s);
if (writer.isOpened() == false)
{
ts->printf(ts->LOG, "\n\nFile name: %s\n", filename.c_str());
ts->printf(ts->LOG, "Codec id: %d Codec tag: %c%c%c%c\n", j,
tag & 255, (tag >> 8) & 255, (tag >> 16) & 255, (tag >> 24) & 255);
ts->printf(ts->LOG, "Error: cannot create video file.");
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
}
else
{
Mat img(frame_s, CV_8UC3, Scalar::all(0));
const int coeff = cvRound(min(frame_s.width, frame_s.height)/(fps0 * time_sec));
for (int i = 0 ; i < static_cast<int>(fps * time_sec); i++ )
{
//circle(img, Point2i(img_c / 2, img_r / 2), min(img_r, img_c) / 2 * (i + 1), Scalar(255, 0, 0, 0), 2);
rectangle(img, Point2i(coeff * i, coeff * i), Point2i(coeff * (i + 1), coeff * (i + 1)),
Scalar::all(255 * (1.0 - static_cast<double>(i) / (fps * time_sec * 2) )), -1);
writer << img;
}
if (!created) created = true;
else remove(filename.c_str());
}
}
catch(...)
{
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
}
ts->set_failed_test_info(cvtest::TS::OK);
}
}
};
TEST(Highgui_Video, ffmpeg_writebig) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); }
class CV_FFmpegReadImageTest : public cvtest::BaseTest
{
public:
void run(int)
{
try
{
string filename = ts->get_data_path() + "readwrite/ordinary.bmp";
VideoCapture cap(filename);
Mat img0 = imread(filename, 1);
Mat img, img_next;
cap >> img;
cap >> img_next;
CV_Assert( !img0.empty() && !img.empty() && img_next.empty() );
double diff = cvtest::norm(img0, img, CV_C);
CV_Assert( diff == 0 );
}
catch(...)
{
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
}
ts->set_failed_test_info(cvtest::TS::OK);
}
};
TEST(Highgui_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); }
#endif
#if defined(HAVE_FFMPEG)
//////////////////////////////// Parallel VideoWriters and VideoCaptures ////////////////////////////////////
class CreateVideoWriterInvoker :
public ParallelLoopBody
{
public:
const static Size FrameSize;
static std::string TmpDirectory;
CreateVideoWriterInvoker(std::vector<VideoWriter*>& _writers, std::vector<std::string>& _files) :
ParallelLoopBody(), writers(&_writers), files(&_files)
{
}
virtual void operator() (const Range& range) const
{
for (int i = range.start; i != range.end; ++i)
{
std::ostringstream stream;
stream << i << ".avi";
std::string fileName = tempfile(stream.str().c_str());
files->operator[](i) = fileName;
writers->operator[](i) = new VideoWriter(fileName, VideoWriter::fourcc('X','V','I','D'), 25.0f, FrameSize);
CV_Assert(writers->operator[](i)->isOpened());
}
}
private:
std::vector<VideoWriter*>* writers;
std::vector<std::string>* files;
};
std::string CreateVideoWriterInvoker::TmpDirectory;
const Size CreateVideoWriterInvoker::FrameSize(1020, 900);
class WriteVideo_Invoker :
public ParallelLoopBody
{
public:
enum { FrameCount = 300 };
static const Scalar ObjectColor;
static const Point Center;
WriteVideo_Invoker(const std::vector<VideoWriter*>& _writers) :
ParallelLoopBody(), writers(&_writers)
{
}
static void GenerateFrame(Mat& frame, unsigned int i)
{
frame = Scalar::all(i % 255);
std::string text = to_string(i);
putText(frame, text, Point(50, Center.y), FONT_HERSHEY_SIMPLEX, 5.0, ObjectColor, 5, CV_AA);
circle(frame, Center, i + 2, ObjectColor, 2, CV_AA);
}
virtual void operator() (const Range& range) const
{
for (int j = range.start; j < range.end; ++j)
{
VideoWriter* writer = writers->operator[](j);
CV_Assert(writer != NULL);
CV_Assert(writer->isOpened());
Mat frame(CreateVideoWriterInvoker::FrameSize, CV_8UC3);
for (unsigned int i = 0; i < FrameCount; ++i)
{
GenerateFrame(frame, i);
writer->operator<< (frame);
}
}
}
protected:
static std::string to_string(unsigned int i)
{
std::stringstream stream(std::ios::out);
stream << "frame #" << i;
return stream.str();
}
private:
const std::vector<VideoWriter*>* writers;
};
const Scalar WriteVideo_Invoker::ObjectColor(Scalar::all(0));
const Point WriteVideo_Invoker::Center(CreateVideoWriterInvoker::FrameSize.height / 2,
CreateVideoWriterInvoker::FrameSize.width / 2);
class CreateVideoCaptureInvoker :
public ParallelLoopBody
{
public:
CreateVideoCaptureInvoker(std::vector<VideoCapture*>& _readers, const std::vector<std::string>& _files) :
ParallelLoopBody(), readers(&_readers), files(&_files)
{
}
virtual void operator() (const Range& range) const
{
for (int i = range.start; i != range.end; ++i)
{
readers->operator[](i) = new VideoCapture(files->operator[](i));
CV_Assert(readers->operator[](i)->isOpened());
}
}
private:
std::vector<VideoCapture*>* readers;
const std::vector<std::string>* files;
};
class ReadImageAndTest :
public ParallelLoopBody
{
public:
ReadImageAndTest(const std::vector<VideoCapture*>& _readers, cvtest::TS* _ts) :
ParallelLoopBody(), readers(&_readers), ts(_ts)
{
}
virtual void operator() (const Range& range) const
{
for (int j = range.start; j < range.end; ++j)
{
VideoCapture* capture = readers->operator[](j);
CV_Assert(capture != NULL);
CV_Assert(capture->isOpened());
const static double eps = 23.0;
unsigned int frameCount = static_cast<unsigned int>(capture->get(CAP_PROP_FRAME_COUNT));
CV_Assert(frameCount == WriteVideo_Invoker::FrameCount);
Mat reference(CreateVideoWriterInvoker::FrameSize, CV_8UC3);
for (unsigned int i = 0; i < frameCount && next; ++i)
{
Mat actual;
(*capture) >> actual;
WriteVideo_Invoker::GenerateFrame(reference, i);
EXPECT_EQ(reference.cols, actual.cols);
EXPECT_EQ(reference.rows, actual.rows);
EXPECT_EQ(reference.depth(), actual.depth());
EXPECT_EQ(reference.channels(), actual.channels());
double psnr = cvtest::PSNR(actual, reference);
if (psnr < eps)
{
#define SUM cvtest::TS::SUMMARY
ts->printf(SUM, "\nPSNR: %lf\n", psnr);
ts->printf(SUM, "Video #: %d\n", range.start);
ts->printf(SUM, "Frame #: %d\n", i);
#undef SUM
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->set_gtest_status();
Mat diff;
absdiff(actual, reference, diff);
EXPECT_EQ(countNonZero(diff.reshape(1) > 1), 0);
next = false;
}
}
}
}
static bool next;
private:
const std::vector<VideoCapture*>* readers;
cvtest::TS* ts;
};
bool ReadImageAndTest::next;
TEST(Highgui_Video_parallel_writers_and_readers, accuracy)
{
const unsigned int threadsCount = 4;
cvtest::TS* ts = cvtest::TS::ptr();
// creating VideoWriters
std::vector<VideoWriter*> writers(threadsCount);
Range range(0, threadsCount);
std::vector<std::string> files(threadsCount);
CreateVideoWriterInvoker invoker1(writers, files);
parallel_for_(range, invoker1);
// write a video
parallel_for_(range, WriteVideo_Invoker(writers));
// deleting the writers
for (std::vector<VideoWriter*>::iterator i = writers.begin(), end = writers.end(); i != end; ++i)
delete *i;
writers.clear();
std::vector<VideoCapture*> readers(threadsCount);
CreateVideoCaptureInvoker invoker2(readers, files);
parallel_for_(range, invoker2);
ReadImageAndTest::next = true;
parallel_for_(range, ReadImageAndTest(readers, ts));
// deleting tmp video files
for (std::vector<std::string>::const_iterator i = files.begin(), end = files.end(); i != end; ++i)
{
int code = remove(i->c_str());
if (code == 1)
std::cerr << "Couldn't delete " << *i << std::endl;
}
}
#endif

View File

@@ -1,115 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
#undef DEFINE_GUID
#define DEFINE_GUID(n, fourcc, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) fourcc,
unsigned long allfourcc[] = {
DEFINE_GUID(MEDIASUBTYPE_GREY, 0x59455247, 0x0000, 0x0010, 0x80, 0x00,
0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71)
DEFINE_GUID(MEDIASUBTYPE_Y8, 0x20203859, 0x0000, 0x0010, 0x80, 0x00,
0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71)
DEFINE_GUID(MEDIASUBTYPE_Y800, 0x30303859, 0x0000, 0x0010, 0x80, 0x00,
0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71)
DEFINE_GUID(CLSID_CaptureGraphBuilder2,0xbf87b6e1,0x8c27,0x11d0,0xb3,0xf0,0x00,0xaa,0x00,0x37,0x61,0xc5)
DEFINE_GUID(CLSID_FilterGraph,0xe436ebb3,0x524f,0x11ce,0x9f,0x53,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(CLSID_NullRenderer,0xc1f400a4,0x3f08,0x11d3,0x9f,0x0b,0x00,0x60,0x08,0x03,0x9e,0x37)
DEFINE_GUID(CLSID_SampleGrabber,0xc1f400a0,0x3f08,0x11d3,0x9f,0x0b,0x00,0x60,0x08,0x03,0x9e,0x37)
DEFINE_GUID(CLSID_SystemDeviceEnum,0x62be5d10,0x60eb,0x11d0,0xbd,0x3b,0x00,0xa0,0xc9,0x11,0xce,0x86)
DEFINE_GUID(CLSID_VideoInputDeviceCategory,0x860bb310,0x5d01,0x11d0,0xbd,0x3b,0x00,0xa0,0xc9,0x11,0xce,0x86)
DEFINE_GUID(FORMAT_VideoInfo,0x05589f80,0xc356,0x11ce,0xbf,0x01,0x00,0xaa,0x00,0x55,0x59,0x5a)
DEFINE_GUID(IID_IAMAnalogVideoDecoder,0xc6e13350,0x30ac,0x11d0,0xa1,0x8c,0x00,0xa0,0xc9,0x11,0x89,0x56)
DEFINE_GUID(IID_IAMCameraControl,0xc6e13370,0x30ac,0x11d0,0xa1,0x8c,0x00,0xa0,0xc9,0x11,0x89,0x56)
DEFINE_GUID(IID_IAMCrossbar,0xc6e13380,0x30ac,0x11d0,0xa1,0x8c,0x00,0xa0,0xc9,0x11,0x89,0x56)
DEFINE_GUID(IID_IAMStreamConfig,0xc6e13340,0x30ac,0x11d0,0xa1,0x8c,0x00,0xa0,0xc9,0x11,0x89,0x56)
DEFINE_GUID(IID_IAMVideoProcAmp,0xc6e13360,0x30ac,0x11d0,0xa1,0x8c,0x00,0xa0,0xc9,0x11,0x89,0x56)
DEFINE_GUID(IID_IBaseFilter,0x56a86895,0x0ad4,0x11ce,0xb0,0x3a,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(IID_ICaptureGraphBuilder2,0x93e5a4e0,0x2d50,0x11d2,0xab,0xfa,0x00,0xa0,0xc9,0xc6,0xe3,0x8d)
DEFINE_GUID(IID_ICreateDevEnum,0x29840822,0x5b84,0x11d0,0xbd,0x3b,0x00,0xa0,0xc9,0x11,0xce,0x86)
DEFINE_GUID(IID_IGraphBuilder,0x56a868a9,0x0ad4,0x11ce,0xb0,0x3a,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(IID_IMPEG2PIDMap,0xafb6c2a1,0x2c41,0x11d3,0x8a,0x60,0x00,0x00,0xf8,0x1e,0x0e,0x4a)
DEFINE_GUID(IID_IMediaControl,0x56a868b1,0x0ad4,0x11ce,0xb0,0x3a,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(IID_IMediaFilter,0x56a86899,0x0ad4,0x11ce,0xb0,0x3a,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(IID_ISampleGrabber,0x6b652fff,0x11fe,0x4fce,0x92,0xad,0x02,0x66,0xb5,0xd7,0xc7,0x8f)
DEFINE_GUID(LOOK_UPSTREAM_ONLY,0xac798be0,0x98e3,0x11d1,0xb3,0xf1,0x00,0xaa,0x00,0x37,0x61,0xc5)
DEFINE_GUID(MEDIASUBTYPE_AYUV,0x56555941,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_IYUV,0x56555949,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_RGB24,0xe436eb7d,0x524f,0x11ce,0x9f,0x53,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(MEDIASUBTYPE_RGB32,0xe436eb7e,0x524f,0x11ce,0x9f,0x53,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(MEDIASUBTYPE_RGB555,0xe436eb7c,0x524f,0x11ce,0x9f,0x53,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(MEDIASUBTYPE_RGB565,0xe436eb7b,0x524f,0x11ce,0x9f,0x53,0x00,0x20,0xaf,0x0b,0xa7,0x70)
DEFINE_GUID(MEDIASUBTYPE_I420,0x49343230,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_UYVY,0x59565955,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_Y211,0x31313259,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_Y411,0x31313459,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_Y41P,0x50313459,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_YUY2,0x32595559,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_YUYV,0x56595559,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_YV12,0x32315659,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_YVU9,0x39555659,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_YVYU,0x55595659,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIASUBTYPE_MJPG,0x47504A4D, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71) // MGB
DEFINE_GUID(MEDIATYPE_Interleaved,0x73766169,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(MEDIATYPE_Video,0x73646976,0x0000,0x0010,0x80,0x00,0x00,0xaa,0x00,0x38,0x9b,0x71)
DEFINE_GUID(PIN_CATEGORY_CAPTURE,0xfb6c4281,0x0353,0x11d1,0x90,0x5f,0x00,0x00,0xc0,0xcc,0x16,0xba)
DEFINE_GUID(PIN_CATEGORY_PREVIEW,0xfb6c4282,0x0353,0x11d1,0x90,0x5f,0x00,0x00,0xc0,0xcc,0x16,0xba)
0};
TEST(Highgui_dshow, fourcc_conversion)
{
for(int i = 0; allfourcc[i]; ++i)
{
unsigned long fourcc = allfourcc[i];
double paramValue = fourcc;
int fourccFromParam = (int)(unsigned long)(paramValue);
EXPECT_EQ(fourcc, (unsigned long)(unsigned)fourccFromParam);
}
}

View File

@@ -1,114 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
using namespace cv;
using namespace std;
class CV_FramecountTest: public cvtest::BaseTest
{
public:
void run(int);
};
void CV_FramecountTest::run(int)
{
const int time_sec = 5, fps = 25;
const string ext[] = {"avi", "mov", "mp4"};
const size_t n = sizeof(ext)/sizeof(ext[0]);
const string src_dir = ts->get_data_path();
ts->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
Ptr<CvCapture> cap;
for (size_t i = 0; i < n; ++i)
{
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
cap.reset(cvCreateFileCapture(file_path.c_str()));
if (!cap)
{
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
ts->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return;
}
//cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, 0);
IplImage* frame; int FrameCount = 0;
for(;;)
{
frame = cvQueryFrame(cap);
if( !frame )
break;
FrameCount++;
}
int framecount = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT);
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n"\
"\nName: big_buck_bunny.%s\nActual frame count: %d\n"\
"Frame count computed in the cycle of queries of frames: %d\n"\
"Frame count returned by cvGetCaptureProperty function: %d\n",
i+1, ext[i].c_str(), time_sec*fps, FrameCount, framecount);
if( (FrameCount != cvRound(time_sec*fps) ||
FrameCount != framecount) && ext[i] != "mpg" )
{
ts->printf(cvtest::TS::LOG, "FAILED\n");
ts->printf(cvtest::TS::LOG, "\nError: actual frame count and returned frame count are not matched.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return;
}
}
}
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined HAVE_FFMPEG
TEST(Highgui_Video, framecount) {CV_FramecountTest test; test.safe_run();}
#endif

View File

@@ -1,223 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
using namespace cv;
using namespace std;
class CV_VideoPositioningTest: public cvtest::BaseTest
{
public:
enum {PROGRESSIVE, RANDOM};
CV_VideoPositioningTest();
~CV_VideoPositioningTest();
virtual void run(int) = 0;
protected:
vector <int> idx;
void run_test(int method);
private:
void generate_idx_seq(CvCapture *cap, int method);
};
class CV_VideoProgressivePositioningTest: public CV_VideoPositioningTest
{
public:
CV_VideoProgressivePositioningTest() : CV_VideoPositioningTest() { }
~CV_VideoProgressivePositioningTest();
void run(int);
};
class CV_VideoRandomPositioningTest: public CV_VideoPositioningTest
{
public:
CV_VideoRandomPositioningTest(): CV_VideoPositioningTest() { }
~CV_VideoRandomPositioningTest();
void run(int);
};
CV_VideoPositioningTest::CV_VideoPositioningTest() {}
CV_VideoPositioningTest::~CV_VideoPositioningTest() {}
CV_VideoProgressivePositioningTest::~CV_VideoProgressivePositioningTest() {}
CV_VideoRandomPositioningTest::~CV_VideoRandomPositioningTest() {}
void CV_VideoPositioningTest::generate_idx_seq(CvCapture* cap, int method)
{
idx.clear();
int N = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT);
switch(method)
{
case PROGRESSIVE:
{
int pos = 1, step = 20;
do
{
idx.push_back(pos);
pos += step;
}
while (pos <= N);
break;
}
case RANDOM:
{
RNG rng(N);
idx.clear();
for( int i = 0; i < N-1; i++ )
idx.push_back(rng.uniform(0, N));
idx.push_back(N-1);
std::swap(idx.at(rng.uniform(0, N-1)), idx.at(N-1));
break;
}
default:break;
}
}
void CV_VideoPositioningTest::run_test(int method)
{
const string& src_dir = ts->get_data_path();
ts->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
const string ext[] = {"avi", "mov", "mp4", "mpg"};
int n = (int)(sizeof(ext)/sizeof(ext[0]));
int failed_videos = 0;
for (int i = 0; i < n; ++i)
{
// skip random positioning test in plain mpegs
if( method == RANDOM && ext[i] == "mpg" )
continue;
string file_path = src_dir + "video/big_buck_bunny." + ext[i];
ts->printf(cvtest::TS::LOG, "\nReading video file in %s...\n", file_path.c_str());
CvCapture* cap = cvCreateFileCapture(file_path.c_str());
if (!cap)
{
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
ts->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
failed_videos++; continue;
}
cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, 0);
generate_idx_seq(cap, method);
int N = (int)idx.size(), failed_frames = 0, failed_positions = 0, failed_iterations = 0;
for (int j = 0; j < N; ++j)
{
bool flag = false;
cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, idx.at(j));
/* IplImage* frame = cvRetrieveFrame(cap);
if (!frame)
{
if (!failed_frames)
{
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\n", i+1, ext[i].c_str());
}
failed_frames++;
ts->printf(cvtest::TS::LOG, "\nIteration: %d\n\nError: cannot read a frame with index %d.\n", j, idx.at(j));
ts->set_failed_test_info(cvtest::TS::FAIL_EXCEPTION);
flag = !flag;
} */
int val = (int)cvGetCaptureProperty(cap, CAP_PROP_POS_FRAMES);
if (idx.at(j) != val)
{
if (!(failed_frames||failed_positions))
{
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\n", i+1, ext[i].c_str());
}
failed_positions++;
if (!failed_frames)
{
ts->printf(cvtest::TS::LOG, "\nIteration: %d\n", j);
}
ts->printf(cvtest::TS::LOG, "Required pos: %d\nReturned pos: %d\n", idx.at(j), val);
ts->printf(cvtest::TS::LOG, "Error: required and returned positions are not matched.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
flag = true;
}
if (flag)
{
failed_iterations++;
failed_videos++;
break;
}
}
cvReleaseCapture(&cap);
}
ts->printf(cvtest::TS::LOG, "\nSuccessfull experiments: %d (%d%%)\n", n-failed_videos, 100*(n-failed_videos)/n);
ts->printf(cvtest::TS::LOG, "Failed experiments: %d (%d%%)\n", failed_videos, 100*failed_videos/n);
}
void CV_VideoProgressivePositioningTest::run(int)
{
run_test(PROGRESSIVE);
}
void CV_VideoRandomPositioningTest::run(int)
{
run_test(RANDOM);
}
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && defined HAVE_FFMPEG
TEST (Highgui_Video, seek_progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); }
TEST (Highgui_Video, seek_random) { CV_VideoRandomPositioningTest test; test.safe_run(); }
#endif

View File

@@ -11,81 +11,11 @@
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
//#include "opencv2/imgproc.hpp"
//#include "opencv2/imgcodecs.hpp"
//#include "opencv2/highgui.hpp"
//#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/private.hpp"
#if defined(HAVE_DSHOW) || \
defined(HAVE_TYZX) || \
defined(HAVE_VFW) || \
defined(HAVE_LIBV4L) || \
(defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_DC1394_2) || \
defined(HAVE_DC1394) || \
defined(HAVE_CMU1394) || \
defined(HAVE_MIL) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_UNICAP) || \
defined(HAVE_PVAPI) || \
defined(HAVE_OPENNI) || \
defined(HAVE_XIMEA) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_GIGE_API) || \
defined(HAVE_INTELPERC) || \
(0)
//defined(HAVE_ANDROID_NATIVE_CAMERA) || - enable after #1193
# define BUILD_WITH_CAMERA_SUPPORT 1
#else
# define BUILD_WITH_CAMERA_SUPPORT 0
#endif
#if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \
/*defined(HAVE_OPENNI) || too specialized */ \
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF)
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
#endif
#if /*defined(HAVE_XINE) || */\
defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF)
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0
#endif
namespace cvtest
{
string fourccToString(int fourcc);
struct VideoFormat
{
VideoFormat() { fourcc = -1; }
VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {}
bool empty() const { return ext.empty(); }
string ext;
int fourcc;
};
extern const VideoFormat g_specific_fmt_list[];
}
//#include "opencv2/core/private.hpp"
#endif

View File

@@ -1,579 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui/highgui_c.h"
using namespace cv;
using namespace std;
namespace cvtest
{
string fourccToString(int fourcc)
{
return format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
}
#ifdef HAVE_MSMF
const VideoFormat g_specific_fmt_list[] =
{
/*VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', '2', '5')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', '5', '0')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'c', ' ')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'h', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 'h', 'd')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 's', 'd')),
VideoFormat("wmv", CV_FOURCC_MACRO('d', 'v', 's', 'l')),
VideoFormat("wmv", CV_FOURCC_MACRO('H', '2', '6', '3')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', '4', 'S', '2')),
VideoFormat("avi", CV_FOURCC_MACRO('M', 'J', 'P', 'G')),
VideoFormat("mp4", CV_FOURCC_MACRO('M', 'P', '4', 'S')),
VideoFormat("mp4", CV_FOURCC_MACRO('M', 'P', '4', 'V')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'P', '4', '3')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'P', 'G', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'S', 'S', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('M', 'S', 'S', '2')),*/
#if !defined(_M_ARM)
VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '1')),
VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '2')),
#endif
VideoFormat("wmv", CV_FOURCC_MACRO('W', 'M', 'V', '3')),
VideoFormat("avi", CV_FOURCC_MACRO('H', '2', '6', '4')),
//VideoFormat("wmv", CV_FOURCC_MACRO('W', 'V', 'C', '1')),
VideoFormat()
};
#else
const VideoFormat g_specific_fmt_list[] =
{
VideoFormat("avi", VideoWriter::fourcc('X', 'V', 'I', 'D')),
VideoFormat("avi", VideoWriter::fourcc('M', 'P', 'E', 'G')),
VideoFormat("avi", VideoWriter::fourcc('M', 'J', 'P', 'G')),
//VideoFormat("avi", VideoWriter::fourcc('I', 'Y', 'U', 'V')),
VideoFormat("mkv", VideoWriter::fourcc('X', 'V', 'I', 'D')),
VideoFormat("mkv", VideoWriter::fourcc('M', 'P', 'E', 'G')),
VideoFormat("mkv", VideoWriter::fourcc('M', 'J', 'P', 'G')),
VideoFormat("mov", VideoWriter::fourcc('m', 'p', '4', 'v')),
VideoFormat()
};
#endif
}
class CV_HighGuiTest : public cvtest::BaseTest
{
protected:
void ImageTest (const string& dir);
void VideoTest (const string& dir, const cvtest::VideoFormat& fmt);
void SpecificImageTest (const string& dir);
void SpecificVideoTest (const string& dir, const cvtest::VideoFormat& fmt);
CV_HighGuiTest() {}
~CV_HighGuiTest() {}
virtual void run(int) = 0;
};
class CV_ImageTest : public CV_HighGuiTest
{
public:
CV_ImageTest() {}
~CV_ImageTest() {}
void run(int);
};
class CV_SpecificImageTest : public CV_HighGuiTest
{
public:
CV_SpecificImageTest() {}
~CV_SpecificImageTest() {}
void run(int);
};
class CV_VideoTest : public CV_HighGuiTest
{
public:
CV_VideoTest() {}
~CV_VideoTest() {}
void run(int);
};
class CV_SpecificVideoTest : public CV_HighGuiTest
{
public:
CV_SpecificVideoTest() {}
~CV_SpecificVideoTest() {}
void run(int);
};
void CV_HighGuiTest::ImageTest(const string& dir)
{
string _name = dir + string("../cv/shared/baboon.png");
ts->printf(ts->LOG, "reading image : %s\n", _name.c_str());
Mat image = imread(_name);
image.convertTo(image, CV_8UC3);
if (image.empty())
{
ts->set_failed_test_info(ts->FAIL_MISSING_TEST_DATA);
return;
}
const string exts[] = {
#ifdef HAVE_PNG
"png",
#endif
#ifdef HAVE_TIFF
"tiff",
#endif
#ifdef HAVE_JPEG
"jpg",
#endif
#ifdef HAVE_JASPER
"jp2",
#endif
#if 0 /*defined HAVE_OPENEXR && !defined __APPLE__*/
"exr",
#endif
"bmp",
"ppm",
"ras"
};
const size_t ext_num = sizeof(exts)/sizeof(exts[0]);
for(size_t i = 0; i < ext_num; ++i)
{
string ext = exts[i];
string full_name = cv::tempfile(ext.c_str());
ts->printf(ts->LOG, " full_name : %s\n", full_name.c_str());
imwrite(full_name, image);
Mat loaded = imread(full_name);
if (loaded.empty())
{
ts->printf(ts->LOG, "Reading failed at fmt=%s\n", ext.c_str());
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
const double thresDbell = 20;
double psnr = cvtest::PSNR(loaded, image);
if (psnr < thresDbell)
{
ts->printf(ts->LOG, "Reading image from file: too big difference (=%g) with fmt=%s\n", psnr, ext.c_str());
ts->set_failed_test_info(ts->FAIL_BAD_ACCURACY);
continue;
}
vector<uchar> from_file;
FILE *f = fopen(full_name.c_str(), "rb");
fseek(f, 0, SEEK_END);
long len = ftell(f);
from_file.resize((size_t)len);
fseek(f, 0, SEEK_SET);
from_file.resize(fread(&from_file[0], 1, from_file.size(), f));
fclose(f);
vector<uchar> buf;
imencode("." + exts[i], image, buf);
if (buf != from_file)
{
ts->printf(ts->LOG, "Encoding failed with fmt=%s\n", ext.c_str());
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
Mat buf_loaded = imdecode(Mat(buf), 1);
if (buf_loaded.empty())
{
ts->printf(ts->LOG, "Decoding failed with fmt=%s\n", ext.c_str());
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
psnr = cvtest::PSNR(buf_loaded, image);
if (psnr < thresDbell)
{
ts->printf(ts->LOG, "Decoding image from memory: too small PSNR (=%gdb) with fmt=%s\n", psnr, ext.c_str());
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
}
ts->printf(ts->LOG, "end test function : ImagesTest \n");
ts->set_failed_test_info(ts->OK);
}
void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt)
{
string src_file = dir + "../cv/shared/video_for_test.avi";
string tmp_name = cv::tempfile((cvtest::fourccToString(fmt.fourcc) + "." + fmt.ext).c_str());
ts->printf(ts->LOG, "reading video : %s and converting it to %s\n", src_file.c_str(), tmp_name.c_str());
CvCapture* cap = cvCaptureFromFile(src_file.c_str());
if (!cap)
{
ts->set_failed_test_info(ts->FAIL_MISMATCH);
return;
}
CvVideoWriter* writer = 0;
vector<Mat> frames;
for(;;)
{
IplImage* img = cvQueryFrame( cap );
if (!img)
break;
frames.push_back(cv::cvarrToMat(img, true));
if (writer == NULL)
{
writer = cvCreateVideoWriter(tmp_name.c_str(), fmt.fourcc, 24, cvGetSize(img));
if (writer == NULL)
{
ts->printf(ts->LOG, "can't create writer (with fourcc : %s)\n",
cvtest::fourccToString(fmt.fourcc).c_str());
cvReleaseCapture( &cap );
ts->set_failed_test_info(ts->FAIL_MISMATCH);
return;
}
}
cvWriteFrame(writer, img);
}
cvReleaseVideoWriter( &writer );
cvReleaseCapture( &cap );
CvCapture *saved = cvCaptureFromFile(tmp_name.c_str());
if (!saved)
{
ts->set_failed_test_info(ts->FAIL_MISMATCH);
return;
}
const double thresDbell = 20;
for(int i = 0;; i++)
{
IplImage* ipl1 = cvQueryFrame( saved );
if (!ipl1)
break;
Mat img = frames[i];
Mat img1 = cv::cvarrToMat(ipl1);
double psnr = cvtest::PSNR(img1, img);
if (psnr < thresDbell)
{
ts->printf(ts->LOG, "Too low frame %d psnr = %gdb\n", i, psnr);
ts->set_failed_test_info(ts->FAIL_MISMATCH);
//imwrite("original.png", img);
//imwrite("after_test.png", img1);
//Mat diff;
//absdiff(img, img1, diff);
//imwrite("diff.png", diff);
break;
}
}
cvReleaseCapture( &saved );
ts->printf(ts->LOG, "end test function : ImagesVideo \n");
}
void CV_HighGuiTest::SpecificImageTest(const string& dir)
{
const size_t IMAGE_COUNT = 10;
for (size_t i = 0; i < IMAGE_COUNT; ++i)
{
stringstream s; s << i;
string file_path = dir+"../python/images/QCIF_0"+s.str()+".bmp";
Mat image = imread(file_path);
if (image.empty())
{
ts->set_failed_test_info(ts->FAIL_MISSING_TEST_DATA);
return;
}
resize(image, image, Size(968, 757), 0.0, 0.0, INTER_CUBIC);
stringstream s_digit; s_digit << i;
string full_name = cv::tempfile((s_digit.str() + ".bmp").c_str());
ts->printf(ts->LOG, " full_name : %s\n", full_name.c_str());
imwrite(full_name, image);
Mat loaded = imread(full_name);
if (loaded.empty())
{
ts->printf(ts->LOG, "Reading failed at fmt=bmp\n");
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
const double thresDbell = 20;
double psnr = cvtest::PSNR(loaded, image);
if (psnr < thresDbell)
{
ts->printf(ts->LOG, "Reading image from file: too big difference (=%g) with fmt=bmp\n", psnr);
ts->set_failed_test_info(ts->FAIL_BAD_ACCURACY);
continue;
}
vector<uchar> from_file;
FILE *f = fopen(full_name.c_str(), "rb");
fseek(f, 0, SEEK_END);
long len = ftell(f);
from_file.resize((size_t)len);
fseek(f, 0, SEEK_SET);
from_file.resize(fread(&from_file[0], 1, from_file.size(), f));
fclose(f);
vector<uchar> buf;
imencode(".bmp", image, buf);
if (buf != from_file)
{
ts->printf(ts->LOG, "Encoding failed with fmt=bmp\n");
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
Mat buf_loaded = imdecode(Mat(buf), 1);
if (buf_loaded.empty())
{
ts->printf(ts->LOG, "Decoding failed with fmt=bmp\n");
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
psnr = cvtest::PSNR(buf_loaded, image);
if (psnr < thresDbell)
{
ts->printf(ts->LOG, "Decoding image from memory: too small PSNR (=%gdb) with fmt=bmp\n", psnr);
ts->set_failed_test_info(ts->FAIL_MISMATCH);
continue;
}
}
ts->printf(ts->LOG, "end test function : SpecificImageTest \n");
ts->set_failed_test_info(ts->OK);
}
void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFormat& fmt)
{
string ext = fmt.ext;
int fourcc = fmt.fourcc;
string fourcc_str = cvtest::fourccToString(fourcc);
const string video_file = cv::tempfile((fourcc_str + "." + ext).c_str());
Size frame_size(968 & -2, 757 & -2);
VideoWriter writer(video_file, fourcc, 25, frame_size, true);
if (!writer.isOpened())
{
// call it repeatedly for easier debugging
VideoWriter writer2(video_file, fourcc, 25, frame_size, true);
ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
ts->printf(ts->LOG, "Cannot create VideoWriter object with codec %s.\n", fourcc_str.c_str());
ts->set_failed_test_info(ts->FAIL_MISMATCH);
return;
}
const size_t IMAGE_COUNT = 30;
vector<Mat> images;
for( size_t i = 0; i < IMAGE_COUNT; ++i )
{
string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i);
Mat img = imread(file_path, IMREAD_COLOR);
if (img.empty())
{
ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
ts->printf(ts->LOG, "Error: cannot read frame from %s.\n", file_path.c_str());
ts->printf(ts->LOG, "Continue creating the video file...\n");
ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
break;
}
for (int k = 0; k < img.rows; ++k)
for (int l = 0; l < img.cols; ++l)
if (img.at<Vec3b>(k, l) == Vec3b::all(0))
img.at<Vec3b>(k, l) = Vec3b(0, 255, 0);
else img.at<Vec3b>(k, l) = Vec3b(0, 0, 255);
resize(img, img, frame_size, 0.0, 0.0, INTER_CUBIC);
images.push_back(img);
writer << img;
}
writer.release();
VideoCapture cap(video_file);
size_t FRAME_COUNT = (size_t)cap.get(CAP_PROP_FRAME_COUNT);
size_t allowed_extra_frames = 0;
// Hack! Newer FFmpeg versions in this combination produce a file
// whose reported duration is one frame longer than needed, and so
// the calculated frame count is also off by one. Ideally, we'd want
// to fix both writing (to produce the correct duration) and reading
// (to correctly report frame count for such files), but I don't know
// how to do either, so this is a workaround for now.
// See also the same hack in CV_PositioningTest::run.
if (fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && ext == "mkv")
allowed_extra_frames = 1;
if (FRAME_COUNT < IMAGE_COUNT || FRAME_COUNT > IMAGE_COUNT + allowed_extra_frames)
{
ts->printf(ts->LOG, "\nFrame count checking for video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
if (allowed_extra_frames != 0)
ts->printf(ts->LOG, "Required frame count: %d-%d; Returned frame count: %d\n",
IMAGE_COUNT, IMAGE_COUNT + allowed_extra_frames, FRAME_COUNT);
else
ts->printf(ts->LOG, "Required frame count: %d; Returned frame count: %d\n", IMAGE_COUNT, FRAME_COUNT);
ts->printf(ts->LOG, "Error: Incorrect frame count in the video.\n");
ts->printf(ts->LOG, "Continue checking...\n");
ts->set_failed_test_info(ts->FAIL_BAD_ACCURACY);
return;
}
for (int i = 0; (size_t)i < IMAGE_COUNT; i++)
{
Mat frame; cap >> frame;
if (frame.empty())
{
ts->printf(ts->LOG, "\nVideo file directory: %s\n", ".");
ts->printf(ts->LOG, "File name: video_%s.%s\n", fourcc_str.c_str(), ext.c_str());
ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
ts->printf(ts->LOG, "Error: cannot read the next frame with index %d.\n", i+1);
ts->set_failed_test_info(ts->FAIL_MISSING_TEST_DATA);
break;
}
Mat img = images[i];
const double thresDbell = 40;
double psnr = cvtest::PSNR(img, frame);
if (psnr > thresDbell)
{
ts->printf(ts->LOG, "\nReading frame from the file video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
ts->printf(ts->LOG, "Frame index: %d\n", i+1);
ts->printf(ts->LOG, "Difference between saved and original images: %g\n", psnr);
ts->printf(ts->LOG, "Maximum allowed difference: %g\n", thresDbell);
ts->printf(ts->LOG, "Error: too big difference between saved and original images.\n");
break;
}
}
}
void CV_ImageTest::run(int)
{
ImageTest(ts->get_data_path());
}
void CV_SpecificImageTest::run(int)
{
SpecificImageTest(ts->get_data_path());
}
void CV_VideoTest::run(int)
{
for (int i = 0; ; ++i)
{
const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[i];
if( fmt.empty() )
break;
VideoTest(ts->get_data_path(), fmt);
}
}
void CV_SpecificVideoTest::run(int)
{
for (int i = 0; ; ++i)
{
const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[i];
if( fmt.empty() )
break;
SpecificVideoTest(ts->get_data_path(), fmt);
}
}
#ifdef HAVE_JPEG
TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
#endif
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && !defined(__APPLE__)
TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); }
TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
#endif
TEST(Highgui_Image, write_read) { CV_SpecificImageTest test; test.safe_run(); }

View File

@@ -1,183 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
class CV_PositioningTest : public cvtest::BaseTest
{
public:
CV_PositioningTest()
{
framesize = Size(640, 480);
}
Mat drawFrame(int i)
{
Mat mat = Mat::zeros(framesize, CV_8UC3);
mat = Scalar(fabs(cos(i*0.08)*255), fabs(sin(i*0.05)*255), i);
putText(mat, format("%03d", i), Point(10, 350), 0, 10, Scalar(128, 255, 255), 15);
return mat;
}
string getFilename(const cvtest::VideoFormat& fmt)
{
return cv::tempfile((cvtest::fourccToString(fmt.fourcc) + "." + fmt.ext).c_str());
}
bool CreateTestVideo(const cvtest::VideoFormat& fmt, int framecount, string filename)
{
VideoWriter writer(filename, fmt.fourcc, 25, framesize, true);
if( !writer.isOpened() )
return false;
for (int i = 0; i < framecount; ++i)
{
Mat img = drawFrame(i);
writer << img;
}
return true;
}
void run(int)
{
int n_frames = 100;
for( int testcase = 0; ; testcase++ )
{
const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[testcase];
if( fmt.empty() )
break;
string filename = getFilename(fmt);
ts->printf(ts->LOG, "\nFile: %s\n", filename.c_str());
if( !CreateTestVideo(fmt, n_frames, filename) )
{
ts->printf(ts->LOG, "\nError: cannot create video file");
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
return;
}
VideoCapture cap(filename);
if (!cap.isOpened())
{
ts->printf(ts->LOG, "\nError: cannot read video file.");
ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
return;
}
int N0 = (int)cap.get(CAP_PROP_FRAME_COUNT);
cap.set(CAP_PROP_POS_FRAMES, 0);
int N = (int)cap.get(CAP_PROP_FRAME_COUNT);
// See the same hack in CV_HighGuiTest::SpecificVideoTest for explanation.
int allowed_extra_frames = 0;
if (fmt.fourcc == VideoWriter::fourcc('M', 'P', 'E', 'G') && fmt.ext == "mkv")
allowed_extra_frames = 1;
if (N < n_frames || N > n_frames + allowed_extra_frames || N != N0)
{
ts->printf(ts->LOG, "\nError: returned frame count (N0=%d, N=%d) is different from the reference number %d\n", N0, N, n_frames);
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
return;
}
for (int k = 0; k < n_frames; ++k)
{
int idx = theRNG().uniform(0, n_frames);
if( !cap.set(CAP_PROP_POS_FRAMES, idx) )
{
ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
return;
}
int idx1 = (int)cap.get(CAP_PROP_POS_FRAMES);
Mat img; cap >> img;
Mat img0 = drawFrame(idx);
if( idx != idx1 )
{
ts->printf(ts->LOG, "\nError: the current position (%d) after seek is different from specified (%d)\n",
idx1, idx);
ts->printf(ts->LOG, "Saving both frames ...\n");
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
// imwrite("opencv_test_highgui_postest_actual.png", img);
// imwrite("opencv_test_highgui_postest_expected.png", img0);
return;
}
if (img.empty())
{
ts->printf(ts->LOG, "\nError: cannot read a frame at position %d.\n", idx);
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
return;
}
double err = cvtest::PSNR(img, img0);
if( err < 20 )
{
ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);
ts->printf(ts->LOG, "Saving both frames ...\n");
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
// imwrite("opencv_test_highgui_postest_actual.png", img);
// imwrite("opencv_test_highgui_postest_expected.png", img0);
return;
}
}
}
}
Size framesize;
};
#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT && defined HAVE_FFMPEG
TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
#endif