Merge pull request from jet47:gpucodec-refactoring

This commit is contained in:
Roman Donchenko 2013-06-14 16:24:23 +04:00 committed by OpenCV Buildbot
commit 71db862dc2
26 changed files with 1381 additions and 1615 deletions

@ -5,20 +5,37 @@ Video Decoding
gpu::VideoReader_GPU gpucodec::VideoReader
-------------------- ---------------------
Video reader class. Video reader interface.
.. ocv:class:: gpu::VideoReader_GPU .. ocv:class:: gpucodec::VideoReader
gpu::VideoReader_GPU::Codec gpucodec::VideoReader::nextFrame
--------------------------- --------------------------------
Grabs, decodes and returns the next video frame.
Video codecs supported by :ocv:class:`gpu::VideoReader_GPU` . .. ocv:function:: bool gpucodec::VideoReader::nextFrame(OutputArray frame)
.. ocv:enum:: gpu::VideoReader_GPU::Codec If no frames has been grabbed (there are no more frames in video file), the methods return ``false`` . The method throws :ocv:class:`Exception` if error occurs.
gpucodec::VideoReader::format
-----------------------------
Returns information about video file format.
.. ocv:function:: FormatInfo gpucodec::VideoReader::format() const
gpucodec::Codec
---------------
Video codecs supported by :ocv:class:`gpucodec::VideoReader` .
.. ocv:enum:: gpucodec::Codec
.. ocv:emember:: MPEG1 = 0 .. ocv:emember:: MPEG1 = 0
.. ocv:emember:: MPEG2 .. ocv:emember:: MPEG2
@ -50,12 +67,12 @@ Video codecs supported by :ocv:class:`gpu::VideoReader_GPU` .
UYVY (4:2:2) UYVY (4:2:2)
gpu::VideoReader_GPU::ChromaFormat
----------------------------------
Chroma formats supported by :ocv:class:`gpu::VideoReader_GPU` . gpucodec::ChromaFormat
----------------------
Chroma formats supported by :ocv:class:`gpucodec::VideoReader` .
.. ocv:enum:: gpu::VideoReader_GPU::ChromaFormat .. ocv:enum:: gpucodec::ChromaFormat
.. ocv:emember:: Monochrome = 0 .. ocv:emember:: Monochrome = 0
.. ocv:emember:: YUV420 .. ocv:emember:: YUV420
@ -63,9 +80,10 @@ Chroma formats supported by :ocv:class:`gpu::VideoReader_GPU` .
.. ocv:emember:: YUV444 .. ocv:emember:: YUV444
gpu::VideoReader_GPU::FormatInfo
-------------------------------- gpucodec::FormatInfo
.. ocv:struct:: gpu::VideoReader_GPU::FormatInfo --------------------
.. ocv:struct:: gpucodec::FormatInfo
Struct providing information about video file format. :: Struct providing information about video file format. ::
@ -78,157 +96,58 @@ Struct providing information about video file format. ::
}; };
gpu::VideoReader_GPU::VideoReader_GPU
-------------------------------------
Constructors.
.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU() gpucodec::createVideoReader
.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(const String& filename) ---------------------------
.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(const cv::Ptr<VideoSource>& source) Creates video reader.
.. ocv:function:: Ptr<VideoReader> gpucodec::createVideoReader(const String& filename)
.. ocv:function:: Ptr<VideoReader> gpucodec::createVideoReader(const Ptr<RawVideoSource>& source)
:param filename: Name of the input video file. :param filename: Name of the input video file.
:param source: Video file parser implemented by user. :param source: RAW video source implemented by user.
The constructors initialize video reader. FFMPEG is used to read videos. User can implement own demultiplexing with :ocv:class:`gpu::VideoReader_GPU::VideoSource` . FFMPEG is used to read videos. User can implement own demultiplexing with :ocv:class:`gpucodec::RawVideoSource` .
gpu::VideoReader_GPU::open gpucodec::RawVideoSource
-------------------------- ------------------------
Initializes or reinitializes video reader. .. ocv:class:: gpucodec::RawVideoSource
.. ocv:function:: void gpu::VideoReader_GPU::open(const String& filename)
.. ocv:function:: void gpu::VideoReader_GPU::open(const cv::Ptr<VideoSource>& source)
The method opens video reader. Parameters are the same as in the constructor :ocv:func:`gpu::VideoReader_GPU::VideoReader_GPU` . The method throws :ocv:class:`Exception` if error occurs.
gpu::VideoReader_GPU::isOpened
------------------------------
Returns true if video reader has been successfully initialized.
.. ocv:function:: bool gpu::VideoReader_GPU::isOpened() const
gpu::VideoReader_GPU::close
---------------------------
Releases the video reader.
.. ocv:function:: void gpu::VideoReader_GPU::close()
gpu::VideoReader_GPU::read
--------------------------
Grabs, decodes and returns the next video frame.
.. ocv:function:: bool gpu::VideoReader_GPU::read(GpuMat& image)
If no frames has been grabbed (there are no more frames in video file), the methods return ``false`` . The method throws :ocv:class:`Exception` if error occurs.
gpu::VideoReader_GPU::format
----------------------------
Returns information about video file format.
.. ocv:function:: FormatInfo gpu::VideoReader_GPU::format() const
The method throws :ocv:class:`Exception` if video reader wasn't initialized.
gpu::VideoReader_GPU::dumpFormat
--------------------------------
Dump information about video file format to specified stream.
.. ocv:function:: void gpu::VideoReader_GPU::dumpFormat(std::ostream& st)
:param st: Output stream.
The method throws :ocv:class:`Exception` if video reader wasn't initialized.
gpu::VideoReader_GPU::VideoSource
-----------------------------------
.. ocv:class:: gpu::VideoReader_GPU::VideoSource
Interface for video demultiplexing. :: Interface for video demultiplexing. ::
class VideoSource class RawVideoSource
{ {
public: public:
VideoSource(); virtual ~RawVideoSource() {}
virtual ~VideoSource() {}
virtual bool getNextPacket(unsigned char** data, int* size, bool* endOfFile) = 0;
virtual FormatInfo format() const = 0; virtual FormatInfo format() const = 0;
virtual void start() = 0;
virtual void stop() = 0;
virtual bool isStarted() const = 0;
virtual bool hasError() const = 0;
protected:
bool parseVideoData(const unsigned char* data, size_t size, bool endOfStream = false);
}; };
User can implement own demultiplexing by implementing this interface. User can implement own demultiplexing by implementing this interface.
gpu::VideoReader_GPU::VideoSource::format gpucodec::RawVideoSource::getNextPacket
-----------------------------------------
Returns information about video file format.
.. ocv:function:: virtual FormatInfo gpu::VideoReader_GPU::VideoSource::format() const = 0
gpu::VideoReader_GPU::VideoSource::start
----------------------------------------
Starts processing.
.. ocv:function:: virtual void gpu::VideoReader_GPU::VideoSource::start() = 0
Implementation must create own thread with video processing and call periodic :ocv:func:`gpu::VideoReader_GPU::VideoSource::parseVideoData` .
gpu::VideoReader_GPU::VideoSource::stop
--------------------------------------- ---------------------------------------
Stops processing. Returns next packet with RAW video frame.
.. ocv:function:: virtual void gpu::VideoReader_GPU::VideoSource::stop() = 0 .. ocv:function:: bool gpucodec::VideoSource::getNextPacket(unsigned char** data, int* size, bool* endOfFile) = 0
:param data: Pointer to frame data.
gpu::VideoReader_GPU::VideoSource::isStarted
--------------------------------------------
Returns ``true`` if processing was successfully started.
.. ocv:function:: virtual bool gpu::VideoReader_GPU::VideoSource::isStarted() const = 0
gpu::VideoReader_GPU::VideoSource::hasError
-------------------------------------------
Returns ``true`` if error occured during processing.
.. ocv:function:: virtual bool gpu::VideoReader_GPU::VideoSource::hasError() const = 0
gpu::VideoReader_GPU::VideoSource::parseVideoData
-------------------------------------------------
Parse next video frame. Implementation must call this method after new frame was grabbed.
.. ocv:function:: bool gpu::VideoReader_GPU::VideoSource::parseVideoData(const uchar* data, size_t size, bool endOfStream = false)
:param data: Pointer to frame data. Can be ``NULL`` if ``endOfStream`` if ``true`` .
:param size: Size in bytes of current frame. :param size: Size in bytes of current frame.
:param endOfStream: Indicates that it is end of stream. :param endOfStream: Indicates that it is end of stream.
gpucodec::RawVideoSource::format
--------------------------------
Returns information about video file format.
.. ocv:function:: virtual FormatInfo gpucodec::RawVideoSource::format() const = 0

@ -5,80 +5,25 @@ Video Encoding
gpu::VideoWriter_GPU gpucodec::VideoWriter
--------------------- ---------------------
Video writer class. Video writer interface.
.. ocv:class:: gpu::VideoWriter_GPU .. ocv:class:: gpucodec::VideoWriter
The class uses H264 video codec. The implementation uses H264 video codec.
.. note:: Currently only Windows platform is supported. .. note:: Currently only Windows platform is supported.
gpu::VideoWriter_GPU::VideoWriter_GPU gpucodec::VideoWriter::write
------------------------------------- ----------------------------
Constructors.
.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU()
.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const String& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR)
.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const String& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR)
.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR)
.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR)
:param fileName: Name of the output video file. Only AVI file format is supported.
:param frameSize: Size of the input video frames.
:param fps: Framerate of the created video stream.
:param params: Encoder parameters. See :ocv:struct:`gpu::VideoWriter_GPU::EncoderParams` .
:param format: Surface format of input frames ( ``SF_UYVY`` , ``SF_YUY2`` , ``SF_YV12`` , ``SF_NV12`` , ``SF_IYUV`` , ``SF_BGR`` or ``SF_GRAY``). BGR or gray frames will be converted to YV12 format before encoding, frames with other formats will be used as is.
:param encoderCallback: Callbacks for video encoder. See :ocv:class:`gpu::VideoWriter_GPU::EncoderCallBack` . Use it if you want to work with raw video stream.
The constructors initialize video writer. FFMPEG is used to write videos. User can implement own multiplexing with :ocv:class:`gpu::VideoWriter_GPU::EncoderCallBack` .
gpu::VideoWriter_GPU::open
--------------------------
Initializes or reinitializes video writer.
.. ocv:function:: void gpu::VideoWriter_GPU::open(const String& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR)
.. ocv:function:: void gpu::VideoWriter_GPU::open(const String& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR)
.. ocv:function:: void gpu::VideoWriter_GPU::open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR)
.. ocv:function:: void gpu::VideoWriter_GPU::open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR)
The method opens video writer. Parameters are the same as in the constructor :ocv:func:`gpu::VideoWriter_GPU::VideoWriter_GPU` . The method throws :ocv:class:`Exception` if error occurs.
gpu::VideoWriter_GPU::isOpened
------------------------------
Returns true if video writer has been successfully initialized.
.. ocv:function:: bool gpu::VideoWriter_GPU::isOpened() const
gpu::VideoWriter_GPU::close
---------------------------
Releases the video writer.
.. ocv:function:: void gpu::VideoWriter_GPU::close()
gpu::VideoWriter_GPU::write
---------------------------
Writes the next video frame. Writes the next video frame.
.. ocv:function:: void gpu::VideoWriter_GPU::write(const cv::gpu::GpuMat& image, bool lastFrame = false) .. ocv:function:: void gpucodec::VideoWriter::write(InputArray frame, bool lastFrame = false) = 0
:param image: The written frame. :param frame: The written frame.
:param lastFrame: Indicates that it is end of stream. The parameter can be ignored. :param lastFrame: Indicates that it is end of stream. The parameter can be ignored.
@ -86,9 +31,34 @@ The method write the specified image to video file. The image must have the same
gpu::VideoWriter_GPU::EncoderParams gpucodec::createVideoWriter
----------------------------------- ---------------------------
.. ocv:struct:: gpu::VideoWriter_GPU::EncoderParams Creates video writer.
.. ocv:function:: Ptr<gpucodec::VideoWriter> gpucodec::createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR)
.. ocv:function:: Ptr<gpucodec::VideoWriter> gpucodec::createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR)
.. ocv:function:: Ptr<gpucodec::VideoWriter> gpucodec::createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR)
.. ocv:function:: Ptr<gpucodec::VideoWriter> gpucodec::createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR)
:param fileName: Name of the output video file. Only AVI file format is supported.
:param frameSize: Size of the input video frames.
:param fps: Framerate of the created video stream.
:param params: Encoder parameters. See :ocv:struct:`gpucodec::EncoderParams` .
:param format: Surface format of input frames ( ``SF_UYVY`` , ``SF_YUY2`` , ``SF_YV12`` , ``SF_NV12`` , ``SF_IYUV`` , ``SF_BGR`` or ``SF_GRAY``). BGR or gray frames will be converted to YV12 format before encoding, frames with other formats will be used as is.
:param encoderCallback: Callbacks for video encoder. See :ocv:class:`gpucodec::EncoderCallBack` . Use it if you want to work with raw video stream.
The constructors initialize video writer. FFMPEG is used to write videos. User can implement own multiplexing with :ocv:class:`gpucodec::EncoderCallBack` .
gpucodec::EncoderParams
-----------------------
.. ocv:struct:: gpucodec::EncoderParams
Different parameters for CUDA video encoder. :: Different parameters for CUDA video encoder. ::
@ -123,12 +93,12 @@ Different parameters for CUDA video encoder. ::
gpu::VideoWriter_GPU::EncoderParams::EncoderParams gpucodec::EncoderParams::EncoderParams
-------------------------------------------------- --------------------------------------
Constructors. Constructors.
.. ocv:function:: gpu::VideoWriter_GPU::EncoderParams::EncoderParams() .. ocv:function:: gpucodec::EncoderParams::EncoderParams()
.. ocv:function:: gpu::VideoWriter_GPU::EncoderParams::EncoderParams(const String& configFile) .. ocv:function:: gpucodec::EncoderParams::EncoderParams(const String& configFile)
:param configFile: Config file name. :param configFile: Config file name.
@ -136,29 +106,29 @@ Creates default parameters or reads parameters from config file.
gpu::VideoWriter_GPU::EncoderParams::load gpucodec::EncoderParams::load
----------------------------------------- -----------------------------
Reads parameters from config file. Reads parameters from config file.
.. ocv:function:: void gpu::VideoWriter_GPU::EncoderParams::load(const String& configFile) .. ocv:function:: void gpucodec::EncoderParams::load(const String& configFile)
:param configFile: Config file name. :param configFile: Config file name.
gpu::VideoWriter_GPU::EncoderParams::save gpucodec::EncoderParams::save
----------------------------------------- -----------------------------
Saves parameters to config file. Saves parameters to config file.
.. ocv:function:: void gpu::VideoWriter_GPU::EncoderParams::save(const String& configFile) const .. ocv:function:: void gpucodec::EncoderParams::save(const String& configFile) const
:param configFile: Config file name. :param configFile: Config file name.
gpu::VideoWriter_GPU::EncoderCallBack gpucodec::EncoderCallBack
------------------------------------- -------------------------
.. ocv:class:: gpu::VideoWriter_GPU::EncoderCallBack .. ocv:class:: gpucodec::EncoderCallBack
Callbacks for CUDA video encoder. :: Callbacks for CUDA video encoder. ::
@ -182,38 +152,38 @@ Callbacks for CUDA video encoder. ::
gpu::VideoWriter_GPU::EncoderCallBack::acquireBitStream gpucodec::EncoderCallBack::acquireBitStream
------------------------------------------------------- -------------------------------------------
Callback function to signal the start of bitstream that is to be encoded. Callback function to signal the start of bitstream that is to be encoded.
.. ocv:function:: virtual uchar* gpu::VideoWriter_GPU::EncoderCallBack::acquireBitStream(int* bufferSize) = 0 .. ocv:function:: virtual uchar* gpucodec::EncoderCallBack::acquireBitStream(int* bufferSize) = 0
Callback must allocate buffer for CUDA encoder and return pointer to it and it's size. Callback must allocate buffer for CUDA encoder and return pointer to it and it's size.
gpu::VideoWriter_GPU::EncoderCallBack::releaseBitStream gpucodec::EncoderCallBack::releaseBitStream
------------------------------------------------------- -------------------------------------------
Callback function to signal that the encoded bitstream is ready to be written to file. Callback function to signal that the encoded bitstream is ready to be written to file.
.. ocv:function:: virtual void gpu::VideoWriter_GPU::EncoderCallBack::releaseBitStream(unsigned char* data, int size) = 0 .. ocv:function:: virtual void gpucodec::EncoderCallBack::releaseBitStream(unsigned char* data, int size) = 0
gpu::VideoWriter_GPU::EncoderCallBack::onBeginFrame gpucodec::EncoderCallBack::onBeginFrame
--------------------------------------------------- ---------------------------------------
Callback function to signal that the encoding operation on the frame has started. Callback function to signal that the encoding operation on the frame has started.
.. ocv:function:: virtual void gpu::VideoWriter_GPU::EncoderCallBack::onBeginFrame(int frameNumber, PicType picType) = 0 .. ocv:function:: virtual void gpucodec::EncoderCallBack::onBeginFrame(int frameNumber, PicType picType) = 0
:param picType: Specify frame type (I-Frame, P-Frame or B-Frame). :param picType: Specify frame type (I-Frame, P-Frame or B-Frame).
gpu::VideoWriter_GPU::EncoderCallBack::onEndFrame gpucodec::EncoderCallBack::onEndFrame
------------------------------------------------- -------------------------------------
Callback function signals that the encoding operation on the frame has finished. Callback function signals that the encoding operation on the frame has finished.
.. ocv:function:: virtual void gpu::VideoWriter_GPU::EncoderCallBack::onEndFrame(int frameNumber, PicType picType) = 0 .. ocv:function:: virtual void gpucodec::EncoderCallBack::onEndFrame(int frameNumber, PicType picType) = 0
:param picType: Specify frame type (I-Frame, P-Frame or B-Frame). :param picType: Specify frame type (I-Frame, P-Frame or B-Frame).

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -47,23 +48,14 @@
# error gpucodec.hpp header must be compiled as C++ # error gpucodec.hpp header must be compiled as C++
#endif #endif
#include <iosfwd>
#include "opencv2/core/gpu.hpp" #include "opencv2/core/gpu.hpp"
namespace cv { namespace gpu { namespace cv { namespace gpucodec {
////////////////////////////////// Video Encoding ////////////////////////////////// ////////////////////////////////// Video Encoding //////////////////////////////////
// Works only under Windows // Works only under Windows.
// Supports olny H264 video codec and AVI files // Supports olny H264 video codec and AVI files.
class CV_EXPORTS VideoWriter_GPU
{
public:
struct EncoderParams;
// Callbacks for video encoder, use it if you want to work with raw video stream
class EncoderCallBack;
enum SurfaceFormat enum SurfaceFormat
{ {
@ -76,24 +68,6 @@ public:
SF_GRAY = SF_BGR SF_GRAY = SF_BGR
}; };
VideoWriter_GPU();
VideoWriter_GPU(const String& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
VideoWriter_GPU(const String& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
~VideoWriter_GPU();
// all methods throws cv::Exception if error occurs
void open(const String& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
void open(const String& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
void open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
void open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
bool isOpened() const;
void close();
void write(const cv::gpu::GpuMat& image, bool lastFrame = false);
struct CV_EXPORTS EncoderParams struct CV_EXPORTS EncoderParams
{ {
int P_Interval; // NVVE_P_INTERVAL, int P_Interval; // NVVE_P_INTERVAL,
@ -123,8 +97,6 @@ public:
void save(const String& configFile) const; void save(const String& configFile) const;
}; };
EncoderParams getParams() const;
class CV_EXPORTS EncoderCallBack class CV_EXPORTS EncoderCallBack
{ {
public: public:
@ -137,37 +109,41 @@ public:
virtual ~EncoderCallBack() {} virtual ~EncoderCallBack() {}
// callback function to signal the start of bitstream that is to be encoded //! callback function to signal the start of bitstream that is to be encoded
// must return pointer to buffer //! callback must allocate host buffer for CUDA encoder and return pointer to it and it's size
virtual uchar* acquireBitStream(int* bufferSize) = 0; virtual uchar* acquireBitStream(int* bufferSize) = 0;
// callback function to signal that the encoded bitstream is ready to be written to file //! callback function to signal that the encoded bitstream is ready to be written to file
virtual void releaseBitStream(unsigned char* data, int size) = 0; virtual void releaseBitStream(unsigned char* data, int size) = 0;
// callback function to signal that the encoding operation on the frame has started //! callback function to signal that the encoding operation on the frame has started
virtual void onBeginFrame(int frameNumber, PicType picType) = 0; virtual void onBeginFrame(int frameNumber, PicType picType) = 0;
// callback function signals that the encoding operation on the frame has finished //! callback function signals that the encoding operation on the frame has finished
virtual void onEndFrame(int frameNumber, PicType picType) = 0; virtual void onEndFrame(int frameNumber, PicType picType) = 0;
}; };
class Impl; class CV_EXPORTS VideoWriter
{
public:
virtual ~VideoWriter() {}
private: //! writes the next frame from GPU memory
cv::Ptr<Impl> impl_; virtual void write(InputArray frame, bool lastFrame = false) = 0;
virtual EncoderParams getEncoderParams() const = 0;
}; };
//! create VideoWriter for specified output file (only AVI file format is supported)
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
//! create VideoWriter for user-defined callbacks
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format = SF_BGR);
CV_EXPORTS Ptr<VideoWriter> createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
////////////////////////////////// Video Decoding ////////////////////////////////////////// ////////////////////////////////// Video Decoding //////////////////////////////////////////
namespace detail
{
class FrameQueue;
class VideoParser;
}
class CV_EXPORTS VideoReader_GPU
{
public:
enum Codec enum Codec
{ {
MPEG1 = 0, MPEG1 = 0,
@ -183,7 +159,7 @@ public:
Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0) Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0)
Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0) Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0)
Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2) Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2)
Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')), // UYVY (4:2:2) Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) // UYVY (4:2:2)
}; };
enum ChromaFormat enum ChromaFormat
@ -191,7 +167,7 @@ public:
Monochrome = 0, Monochrome = 0,
YUV420, YUV420,
YUV422, YUV422,
YUV444, YUV444
}; };
struct FormatInfo struct FormatInfo
@ -202,64 +178,29 @@ public:
int height; int height;
}; };
class VideoSource; class CV_EXPORTS VideoReader
VideoReader_GPU();
explicit VideoReader_GPU(const String& filename);
explicit VideoReader_GPU(const cv::Ptr<VideoSource>& source);
~VideoReader_GPU();
void open(const String& filename);
void open(const cv::Ptr<VideoSource>& source);
bool isOpened() const;
void close();
bool read(GpuMat& image);
FormatInfo format() const;
void dumpFormat(std::ostream& st);
class CV_EXPORTS VideoSource
{ {
public: public:
VideoSource() : frameQueue_(0), videoParser_(0) {} virtual ~VideoReader() {}
virtual ~VideoSource() {}
virtual bool nextFrame(OutputArray frame) = 0;
virtual FormatInfo format() const = 0; virtual FormatInfo format() const = 0;
virtual void start() = 0;
virtual void stop() = 0;
virtual bool isStarted() const = 0;
virtual bool hasError() const = 0;
void setFrameQueue(detail::FrameQueue* frameQueue) { frameQueue_ = frameQueue; }
void setVideoParser(detail::VideoParser* videoParser) { videoParser_ = videoParser; }
protected:
bool parseVideoData(const uchar* data, size_t size, bool endOfStream = false);
private:
VideoSource(const VideoSource&);
VideoSource& operator =(const VideoSource&);
detail::FrameQueue* frameQueue_;
detail::VideoParser* videoParser_;
}; };
class Impl; class CV_EXPORTS RawVideoSource
{
public:
virtual ~RawVideoSource() {}
private: virtual bool getNextPacket(unsigned char** data, int* size, bool* endOfFile) = 0;
cv::Ptr<Impl> impl_;
virtual FormatInfo format() const = 0;
}; };
}} // namespace cv { namespace gpu { CV_EXPORTS Ptr<VideoReader> createVideoReader(const String& filename);
CV_EXPORTS Ptr<VideoReader> createVideoReader(const Ptr<RawVideoSource>& source);
namespace cv { }} // namespace cv { namespace gpucodec {
template <> CV_EXPORTS void Ptr<cv::gpu::VideoWriter_GPU::Impl>::delete_obj();
template <> CV_EXPORTS void Ptr<cv::gpu::VideoReader_GPU::Impl>::delete_obj();
}
#endif /* __OPENCV_GPUCODEC_HPP__ */ #endif /* __OPENCV_GPUCODEC_HPP__ */

@ -74,12 +74,11 @@ PERF_TEST_P(FileName, VideoReader, Values("gpu/video/768x576.avi", "gpu/video/19
if (PERF_RUN_GPU()) if (PERF_RUN_GPU())
{ {
cv::gpu::VideoReader_GPU d_reader(inputFile); cv::Ptr<cv::gpucodec::VideoReader> d_reader = cv::gpucodec::createVideoReader(inputFile);
ASSERT_TRUE( d_reader.isOpened() );
cv::gpu::GpuMat frame; cv::gpu::GpuMat frame;
TEST_CYCLE_N(10) d_reader.read(frame); TEST_CYCLE_N(10) d_reader->nextFrame(frame);
GPU_SANITY_CHECK(frame); GPU_SANITY_CHECK(frame);
} }
@ -119,7 +118,7 @@ PERF_TEST_P(FileName, VideoWriter, Values("gpu/video/768x576.avi", "gpu/video/19
if (PERF_RUN_GPU()) if (PERF_RUN_GPU())
{ {
cv::gpu::VideoWriter_GPU d_writer; cv::Ptr<cv::gpucodec::VideoWriter> d_writer;
cv::gpu::GpuMat d_frame; cv::gpu::GpuMat d_frame;
@ -130,11 +129,11 @@ PERF_TEST_P(FileName, VideoWriter, Values("gpu/video/768x576.avi", "gpu/video/19
d_frame.upload(frame); d_frame.upload(frame);
if (!d_writer.isOpened()) if (d_writer.empty())
d_writer.open(outputFile, frame.size(), FPS); d_writer = cv::gpucodec::createVideoWriter(outputFile, frame.size(), FPS);
startTimer(); next(); startTimer(); next();
d_writer.write(d_frame); d_writer->write(d_frame);
stopTimer(); stopTimer();
} }
} }

@ -51,12 +51,7 @@
namespace cv { namespace gpu { namespace cudev namespace cv { namespace gpu { namespace cudev
{ {
__constant__ float constHueColorSpaceMat[9]; __constant__ float constHueColorSpaceMat[9] = {1.1644f, 0.0f, 1.596f, 1.1644f, -0.3918f, -0.813f, 1.1644f, 2.0172f, 0.0f};
void loadHueCSC(float hueCSC[9])
{
cudaSafeCall( cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, 9 * sizeof(float)) );
}
__device__ void YUV2RGB(const uint* yuvi, float* red, float* green, float* blue) __device__ void YUV2RGB(const uint* yuvi, float* red, float* green, float* blue)
{ {

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -44,7 +45,11 @@
#ifdef HAVE_NVCUVID #ifdef HAVE_NVCUVID
cv::gpu::detail::CuvidVideoSource::CuvidVideoSource(const String& fname) using namespace cv;
using namespace cv::gpucodec;
using namespace cv::gpucodec::detail;
cv::gpucodec::detail::CuvidVideoSource::CuvidVideoSource(const String& fname)
{ {
CUVIDSOURCEPARAMS params; CUVIDSOURCEPARAMS params;
std::memset(&params, 0, sizeof(CUVIDSOURCEPARAMS)); std::memset(&params, 0, sizeof(CUVIDSOURCEPARAMS));
@ -55,51 +60,51 @@ cv::gpu::detail::CuvidVideoSource::CuvidVideoSource(const String& fname)
params.pfnAudioDataHandler = 0; params.pfnAudioDataHandler = 0;
// now create the actual source // now create the actual source
CUresult res = cuvidCreateVideoSource(&videoSource_, fname.c_str(), &params); CUresult cuRes = cuvidCreateVideoSource(&videoSource_, fname.c_str(), &params);
if (res == CUDA_ERROR_INVALID_SOURCE) if (cuRes == CUDA_ERROR_INVALID_SOURCE)
throw std::runtime_error("Unsupported video source"); throw std::runtime_error("");
cuSafeCall( res ); cuSafeCall( cuRes );
CUVIDEOFORMAT vidfmt; CUVIDEOFORMAT vidfmt;
cuSafeCall( cuvidGetSourceVideoFormat(videoSource_, &vidfmt, 0) ); cuSafeCall( cuvidGetSourceVideoFormat(videoSource_, &vidfmt, 0) );
format_.codec = static_cast<VideoReader_GPU::Codec>(vidfmt.codec); format_.codec = static_cast<Codec>(vidfmt.codec);
format_.chromaFormat = static_cast<VideoReader_GPU::ChromaFormat>(vidfmt.chroma_format); format_.chromaFormat = static_cast<ChromaFormat>(vidfmt.chroma_format);
format_.width = vidfmt.coded_width; format_.width = vidfmt.coded_width;
format_.height = vidfmt.coded_height; format_.height = vidfmt.coded_height;
} }
cv::gpu::detail::CuvidVideoSource::~CuvidVideoSource() cv::gpucodec::detail::CuvidVideoSource::~CuvidVideoSource()
{ {
cuvidDestroyVideoSource(videoSource_); cuvidDestroyVideoSource(videoSource_);
} }
cv::gpu::VideoReader_GPU::FormatInfo cv::gpu::detail::CuvidVideoSource::format() const FormatInfo cv::gpucodec::detail::CuvidVideoSource::format() const
{ {
return format_; return format_;
} }
void cv::gpu::detail::CuvidVideoSource::start() void cv::gpucodec::detail::CuvidVideoSource::start()
{ {
cuSafeCall( cuvidSetVideoSourceState(videoSource_, cudaVideoState_Started) ); cuSafeCall( cuvidSetVideoSourceState(videoSource_, cudaVideoState_Started) );
} }
void cv::gpu::detail::CuvidVideoSource::stop() void cv::gpucodec::detail::CuvidVideoSource::stop()
{ {
cuSafeCall( cuvidSetVideoSourceState(videoSource_, cudaVideoState_Stopped) ); cuSafeCall( cuvidSetVideoSourceState(videoSource_, cudaVideoState_Stopped) );
} }
bool cv::gpu::detail::CuvidVideoSource::isStarted() const bool cv::gpucodec::detail::CuvidVideoSource::isStarted() const
{ {
return (cuvidGetVideoSourceState(videoSource_) == cudaVideoState_Started); return (cuvidGetVideoSourceState(videoSource_) == cudaVideoState_Started);
} }
bool cv::gpu::detail::CuvidVideoSource::hasError() const bool cv::gpucodec::detail::CuvidVideoSource::hasError() const
{ {
return (cuvidGetVideoSourceState(videoSource_) == cudaVideoState_Error); return (cuvidGetVideoSourceState(videoSource_) == cudaVideoState_Error);
} }
int CUDAAPI cv::gpu::detail::CuvidVideoSource::HandleVideoData(void* userData, CUVIDSOURCEDATAPACKET* packet) int CUDAAPI cv::gpucodec::detail::CuvidVideoSource::HandleVideoData(void* userData, CUVIDSOURCEDATAPACKET* packet)
{ {
CuvidVideoSource* thiz = static_cast<CuvidVideoSource*>(userData); CuvidVideoSource* thiz = static_cast<CuvidVideoSource*>(userData);

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,25 +41,25 @@
// //
//M*/ //M*/
#ifndef __CUVUD_VIDEO_SOURCE_H__ #ifndef __CUVID_VIDEO_SOURCE_HPP__
#define __CUVUD_VIDEO_SOURCE_H__ #define __CUVID_VIDEO_SOURCE_HPP__
#include "opencv2/core/private.gpu.hpp"
#include "opencv2/gpucodec.hpp"
#include "thread.h"
#include <nvcuvid.h> #include <nvcuvid.h>
namespace cv { namespace gpu { namespace detail #include "opencv2/core/private.gpu.hpp"
#include "opencv2/gpucodec.hpp"
#include "video_source.hpp"
namespace cv { namespace gpucodec { namespace detail
{ {
class CuvidVideoSource : public VideoReader_GPU::VideoSource class CuvidVideoSource : public VideoSource
{ {
public: public:
explicit CuvidVideoSource(const String& fname); explicit CuvidVideoSource(const String& fname);
~CuvidVideoSource(); ~CuvidVideoSource();
VideoReader_GPU::FormatInfo format() const; FormatInfo format() const;
void start(); void start();
void stop(); void stop();
bool isStarted() const; bool isStarted() const;
@ -78,9 +79,9 @@ private:
static int CUDAAPI HandleVideoData(void* pUserData, CUVIDSOURCEDATAPACKET* pPacket); static int CUDAAPI HandleVideoData(void* pUserData, CUVIDSOURCEDATAPACKET* pPacket);
CUvideosource videoSource_; CUvideosource videoSource_;
VideoReader_GPU::FormatInfo format_; FormatInfo format_;
}; };
}}} }}}
#endif // __CUVUD_VIDEO_SOURCE_H__ #endif // __CUVID_VIDEO_SOURCE_HPP__

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -48,6 +49,10 @@
#include "../src/cap_ffmpeg_impl.hpp" #include "../src/cap_ffmpeg_impl.hpp"
#endif #endif
using namespace cv;
using namespace cv::gpucodec;
using namespace cv::gpucodec::detail;
namespace namespace
{ {
Create_InputMediaStream_FFMPEG_Plugin create_InputMediaStream_FFMPEG_p = 0; Create_InputMediaStream_FFMPEG_Plugin create_InputMediaStream_FFMPEG_p = 0;
@ -94,7 +99,7 @@ namespace
} }
} }
cv::gpu::detail::FFmpegVideoSource::FFmpegVideoSource(const String& fname) : cv::gpucodec::detail::FFmpegVideoSource::FFmpegVideoSource(const String& fname) :
stream_(0) stream_(0)
{ {
CV_Assert( init_MediaStream_FFMPEG() ); CV_Assert( init_MediaStream_FFMPEG() );
@ -106,75 +111,33 @@ cv::gpu::detail::FFmpegVideoSource::FFmpegVideoSource(const String& fname) :
stream_ = create_InputMediaStream_FFMPEG_p(fname.c_str(), &codec, &chroma_format, &width, &height); stream_ = create_InputMediaStream_FFMPEG_p(fname.c_str(), &codec, &chroma_format, &width, &height);
if (!stream_) if (!stream_)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported video source"); CV_Error(Error::StsUnsupportedFormat, "Unsupported video source");
format_.codec = static_cast<VideoReader_GPU::Codec>(codec); format_.codec = static_cast<Codec>(codec);
format_.chromaFormat = static_cast<VideoReader_GPU::ChromaFormat>(chroma_format); format_.chromaFormat = static_cast<ChromaFormat>(chroma_format);
format_.width = width; format_.width = width;
format_.height = height; format_.height = height;
} }
cv::gpu::VideoReader_GPU::FormatInfo cv::gpu::detail::FFmpegVideoSource::format() const cv::gpucodec::detail::FFmpegVideoSource::~FFmpegVideoSource()
{
if (stream_)
release_InputMediaStream_FFMPEG_p(stream_);
}
FormatInfo cv::gpucodec::detail::FFmpegVideoSource::format() const
{ {
return format_; return format_;
} }
void cv::gpu::detail::FFmpegVideoSource::start() bool cv::gpucodec::detail::FFmpegVideoSource::getNextPacket(unsigned char** data, int* size, bool* bEndOfFile)
{ {
stop_ = false;
hasError_ = false;
thread_ = new Thread(readLoop, this);
}
void cv::gpu::detail::FFmpegVideoSource::stop()
{
stop_ = true;
thread_->wait();
thread_.release();
}
bool cv::gpu::detail::FFmpegVideoSource::isStarted() const
{
return !stop_;
}
bool cv::gpu::detail::FFmpegVideoSource::hasError() const
{
return hasError_;
}
void cv::gpu::detail::FFmpegVideoSource::readLoop(void* userData)
{
FFmpegVideoSource* thiz = static_cast<FFmpegVideoSource*>(userData);
for (;;)
{
unsigned char* data;
int size;
int endOfFile; int endOfFile;
if (!read_InputMediaStream_FFMPEG_p(thiz->stream_, &data, &size, &endOfFile)) int res = read_InputMediaStream_FFMPEG_p(stream_, data, size, &endOfFile);
{
thiz->hasError_ = !endOfFile;
break;
}
if (!thiz->parseVideoData(data, size)) *bEndOfFile = (endOfFile != 0);
{ return res != 0;
thiz->hasError_ = true;
break;
}
if (thiz->stop_)
break;
}
thiz->parseVideoData(0, 0, true);
}
template <> void cv::Ptr<InputMediaStream_FFMPEG>::delete_obj()
{
if (obj) release_InputMediaStream_FFMPEG_p(obj);
} }
#endif // HAVE_CUDA #endif // HAVE_CUDA

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,43 +41,31 @@
// //
//M*/ //M*/
#ifndef __FFMPEG_VIDEO_SOURCE_H__ #ifndef __FFMPEG_VIDEO_SOURCE_HPP__
#define __FFMPEG_VIDEO_SOURCE_H__ #define __FFMPEG_VIDEO_SOURCE_HPP__
#include "opencv2/gpucodec.hpp" #include "opencv2/gpucodec.hpp"
#include "thread.h"
struct InputMediaStream_FFMPEG; struct InputMediaStream_FFMPEG;
namespace cv { namespace gpu { namespace detail { namespace cv { namespace gpucodec { namespace detail {
class FFmpegVideoSource : public VideoReader_GPU::VideoSource class FFmpegVideoSource : public RawVideoSource
{ {
public: public:
FFmpegVideoSource(const String& fname); FFmpegVideoSource(const String& fname);
~FFmpegVideoSource();
VideoReader_GPU::FormatInfo format() const; bool getNextPacket(unsigned char** data, int* size, bool* endOfFile);
void start();
void stop(); FormatInfo format() const;
bool isStarted() const;
bool hasError() const;
private: private:
VideoReader_GPU::FormatInfo format_; FormatInfo format_;
cv::Ptr<InputMediaStream_FFMPEG> stream_; InputMediaStream_FFMPEG* stream_;
cv::Ptr<Thread> thread_;
volatile bool stop_;
volatile bool hasError_;
static void readLoop(void* userData);
}; };
}}} }}}
namespace cv { #endif // __FFMPEG_VIDEO_SOURCE_HPP__
template <> void Ptr<InputMediaStream_FFMPEG>::delete_obj();
}
#endif // __FFMPEG_VIDEO_SOURCE_H__

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -44,7 +45,7 @@
#ifdef HAVE_NVCUVID #ifdef HAVE_NVCUVID
cv::gpu::detail::FrameQueue::FrameQueue() : cv::gpucodec::detail::FrameQueue::FrameQueue() :
endOfDecode_(0), endOfDecode_(0),
framesInQueue_(0), framesInQueue_(0),
readPosition_(0) readPosition_(0)
@ -53,7 +54,7 @@ cv::gpu::detail::FrameQueue::FrameQueue() :
std::memset((void*) isFrameInUse_, 0, sizeof(isFrameInUse_)); std::memset((void*) isFrameInUse_, 0, sizeof(isFrameInUse_));
} }
bool cv::gpu::detail::FrameQueue::waitUntilFrameAvailable(int pictureIndex) bool cv::gpucodec::detail::FrameQueue::waitUntilFrameAvailable(int pictureIndex)
{ {
while (isInUse(pictureIndex)) while (isInUse(pictureIndex))
{ {
@ -67,7 +68,7 @@ bool cv::gpu::detail::FrameQueue::waitUntilFrameAvailable(int pictureIndex)
return true; return true;
} }
void cv::gpu::detail::FrameQueue::enqueue(const CUVIDPARSERDISPINFO* picParams) void cv::gpucodec::detail::FrameQueue::enqueue(const CUVIDPARSERDISPINFO* picParams)
{ {
// Mark the frame as 'in-use' so we don't re-use it for decoding until it is no longer needed // Mark the frame as 'in-use' so we don't re-use it for decoding until it is no longer needed
// for display // for display
@ -98,7 +99,7 @@ void cv::gpu::detail::FrameQueue::enqueue(const CUVIDPARSERDISPINFO* picParams)
} while (!isEndOfDecode()); } while (!isEndOfDecode());
} }
bool cv::gpu::detail::FrameQueue::dequeue(CUVIDPARSERDISPINFO& displayInfo) bool cv::gpucodec::detail::FrameQueue::dequeue(CUVIDPARSERDISPINFO& displayInfo)
{ {
AutoLock autoLock(mtx_); AutoLock autoLock(mtx_);

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,15 +41,15 @@
// //
//M*/ //M*/
#ifndef __FRAME_QUEUE_H__ #ifndef __FRAME_QUEUE_HPP__
#define __FRAME_QUEUE_H__ #define __FRAME_QUEUE_HPP__
#include "opencv2/core/utility.hpp" #include "opencv2/core/utility.hpp"
#include "opencv2/core/private.gpu.hpp" #include "opencv2/core/private.gpu.hpp"
#include <nvcuvid.h> #include <nvcuvid.h>
namespace cv { namespace gpu { namespace detail namespace cv { namespace gpucodec { namespace detail
{ {
class FrameQueue class FrameQueue
@ -94,4 +95,4 @@ private:
}}} }}}
#endif // __FRAME_QUEUE_H__ #endif // __FRAME_QUEUE_HPP__

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -66,12 +67,13 @@
#include <unistd.h> #include <unistd.h>
#endif #endif
#include "thread.h" #include "thread.hpp"
#include "ffmpeg_video_source.h" #include "video_source.hpp"
#include "cuvid_video_source.h" #include "ffmpeg_video_source.hpp"
#include "frame_queue.h" #include "cuvid_video_source.hpp"
#include "video_decoder.h" #include "frame_queue.hpp"
#include "video_parser.h" #include "video_decoder.hpp"
#include "video_parser.hpp"
#include "../src/cap_ffmpeg_api.hpp" #include "../src/cap_ffmpeg_api.hpp"
#endif #endif

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -44,7 +45,7 @@
#ifdef HAVE_NVCUVID #ifdef HAVE_NVCUVID
using namespace cv::gpu::detail; using namespace cv::gpucodec::detail;
#ifdef WIN32 #ifdef WIN32
@ -66,7 +67,7 @@ namespace
} }
} }
class cv::gpu::detail::Thread::Impl class cv::gpucodec::detail::Thread::Impl
{ {
public: public:
Impl(Thread::Func func, void* userData) Impl(Thread::Func func, void* userData)
@ -119,7 +120,7 @@ namespace
} }
} }
class cv::gpu::detail::Thread::Impl class cv::gpucodec::detail::Thread::Impl
{ {
public: public:
Impl(Thread::Func func, void* userData) Impl(Thread::Func func, void* userData)
@ -147,17 +148,17 @@ private:
#endif #endif
cv::gpu::detail::Thread::Thread(Func func, void* userData) : cv::gpucodec::detail::Thread::Thread(Func func, void* userData) :
impl_(new Impl(func, userData)) impl_(new Impl(func, userData))
{ {
} }
void cv::gpu::detail::Thread::wait() void cv::gpucodec::detail::Thread::wait()
{ {
impl_->wait(); impl_->wait();
} }
void cv::gpu::detail::Thread::sleep(int ms) void cv::gpucodec::detail::Thread::sleep(int ms)
{ {
#ifdef WIN32 #ifdef WIN32
::Sleep(ms); ::Sleep(ms);
@ -166,7 +167,7 @@ void cv::gpu::detail::Thread::sleep(int ms)
#endif #endif
} }
template <> void cv::Ptr<cv::gpu::detail::Thread::Impl>::delete_obj() template <> void cv::Ptr<cv::gpucodec::detail::Thread::Impl>::delete_obj()
{ {
if (obj) delete obj; if (obj) delete obj;
} }

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,12 +41,12 @@
// //
//M*/ //M*/
#ifndef __THREAD_WRAPPERS_H__ #ifndef __THREAD_WRAPPERS_HPP__
#define __THREAD_WRAPPERS_H__ #define __THREAD_WRAPPERS_HPP__
#include "opencv2/core.hpp" #include "opencv2/core.hpp"
namespace cv { namespace gpu { namespace detail { namespace cv { namespace gpucodec { namespace detail {
class Thread class Thread
{ {
@ -67,7 +68,7 @@ private:
}}} }}}
namespace cv { namespace cv {
template <> void Ptr<cv::gpu::detail::Thread::Impl>::delete_obj(); template <> void Ptr<cv::gpucodec::detail::Thread::Impl>::delete_obj();
} }
#endif // __THREAD_WRAPPERS_H__ #endif // __THREAD_WRAPPERS_HPP__

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -44,7 +45,7 @@
#ifdef HAVE_NVCUVID #ifdef HAVE_NVCUVID
void cv::gpu::detail::VideoDecoder::create(const VideoReader_GPU::FormatInfo& videoFormat) void cv::gpucodec::detail::VideoDecoder::create(const FormatInfo& videoFormat)
{ {
release(); release();
@ -103,7 +104,7 @@ void cv::gpu::detail::VideoDecoder::create(const VideoReader_GPU::FormatInfo& vi
cuSafeCall( cuvidCreateDecoder(&decoder_, &createInfo_) ); cuSafeCall( cuvidCreateDecoder(&decoder_, &createInfo_) );
} }
void cv::gpu::detail::VideoDecoder::release() void cv::gpucodec::detail::VideoDecoder::release()
{ {
if (decoder_) if (decoder_)
{ {

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,21 +41,21 @@
// //
//M*/ //M*/
#ifndef __VIDEO_DECODER_H__ #ifndef __VIDEO_DECODER_HPP__
#define __VIDEO_DECODER_H__ #define __VIDEO_DECODER_HPP__
#include <nvcuvid.h>
#include "opencv2/core/private.gpu.hpp" #include "opencv2/core/private.gpu.hpp"
#include "opencv2/gpucodec.hpp" #include "opencv2/gpucodec.hpp"
#include <nvcuvid.h> namespace cv { namespace gpucodec { namespace detail
namespace cv { namespace gpu { namespace detail
{ {
class VideoDecoder class VideoDecoder
{ {
public: public:
VideoDecoder(const VideoReader_GPU::FormatInfo& videoFormat, CUvideoctxlock lock) : lock_(lock), decoder_(0) VideoDecoder(const FormatInfo& videoFormat, CUvideoctxlock lock) : lock_(lock), decoder_(0)
{ {
create(videoFormat); create(videoFormat);
} }
@ -64,7 +65,7 @@ public:
release(); release();
} }
void create(const VideoReader_GPU::FormatInfo& videoFormat); void create(const FormatInfo& videoFormat);
void release(); void release();
// Get the code-type currently used. // Get the code-type currently used.
@ -84,17 +85,17 @@ public:
return cuvidDecodePicture(decoder_, picParams) == CUDA_SUCCESS; return cuvidDecodePicture(decoder_, picParams) == CUDA_SUCCESS;
} }
cv::gpu::GpuMat mapFrame(int picIdx, CUVIDPROCPARAMS& videoProcParams) gpu::GpuMat mapFrame(int picIdx, CUVIDPROCPARAMS& videoProcParams)
{ {
CUdeviceptr ptr; CUdeviceptr ptr;
unsigned int pitch; unsigned int pitch;
cuSafeCall( cuvidMapVideoFrame(decoder_, picIdx, &ptr, &pitch, &videoProcParams) ); cuSafeCall( cuvidMapVideoFrame(decoder_, picIdx, &ptr, &pitch, &videoProcParams) );
return GpuMat(targetHeight() * 3 / 2, targetWidth(), CV_8UC1, (void*) ptr, pitch); return gpu::GpuMat(targetHeight() * 3 / 2, targetWidth(), CV_8UC1, (void*) ptr, pitch);
} }
void unmapFrame(cv::gpu::GpuMat& frame) void unmapFrame(gpu::GpuMat& frame)
{ {
cuSafeCall( cuvidUnmapVideoFrame(decoder_, (CUdeviceptr) frame.data) ); cuSafeCall( cuvidUnmapVideoFrame(decoder_, (CUdeviceptr) frame.data) );
frame.release(); frame.release();
@ -108,4 +109,4 @@ private:
}}} }}}
#endif // __VIDEO_DECODER_H__ #endif // __VIDEO_DECODER_HPP__

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -44,11 +45,11 @@
#ifdef HAVE_NVCUVID #ifdef HAVE_NVCUVID
cv::gpu::detail::VideoParser::VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue) : cv::gpucodec::detail::VideoParser::VideoParser(VideoDecoder* videoDecoder, FrameQueue* frameQueue) :
videoDecoder_(videoDecoder), frameQueue_(frameQueue), unparsedPackets_(0), hasError_(false) videoDecoder_(videoDecoder), frameQueue_(frameQueue), unparsedPackets_(0), hasError_(false)
{ {
CUVIDPARSERPARAMS params; CUVIDPARSERPARAMS params;
memset(&params, 0, sizeof(CUVIDPARSERPARAMS)); std::memset(&params, 0, sizeof(CUVIDPARSERPARAMS));
params.CodecType = videoDecoder->codec(); params.CodecType = videoDecoder->codec();
params.ulMaxNumDecodeSurfaces = videoDecoder->maxDecodeSurfaces(); params.ulMaxNumDecodeSurfaces = videoDecoder->maxDecodeSurfaces();
@ -61,7 +62,7 @@ cv::gpu::detail::VideoParser::VideoParser(VideoDecoder* videoDecoder, FrameQueue
cuSafeCall( cuvidCreateVideoParser(&parser_, &params) ); cuSafeCall( cuvidCreateVideoParser(&parser_, &params) );
} }
bool cv::gpu::detail::VideoParser::parseVideoData(const unsigned char* data, size_t size, bool endOfStream) bool cv::gpucodec::detail::VideoParser::parseVideoData(const unsigned char* data, size_t size, bool endOfStream)
{ {
CUVIDSOURCEDATAPACKET packet; CUVIDSOURCEDATAPACKET packet;
std::memset(&packet, 0, sizeof(CUVIDSOURCEDATAPACKET)); std::memset(&packet, 0, sizeof(CUVIDSOURCEDATAPACKET));
@ -95,7 +96,7 @@ bool cv::gpu::detail::VideoParser::parseVideoData(const unsigned char* data, siz
return !frameQueue_->isEndOfDecode(); return !frameQueue_->isEndOfDecode();
} }
int CUDAAPI cv::gpu::detail::VideoParser::HandleVideoSequence(void* userData, CUVIDEOFORMAT* format) int CUDAAPI cv::gpucodec::detail::VideoParser::HandleVideoSequence(void* userData, CUVIDEOFORMAT* format)
{ {
VideoParser* thiz = static_cast<VideoParser*>(userData); VideoParser* thiz = static_cast<VideoParser*>(userData);
@ -106,10 +107,10 @@ int CUDAAPI cv::gpu::detail::VideoParser::HandleVideoSequence(void* userData, CU
format->coded_height != thiz->videoDecoder_->frameHeight() || format->coded_height != thiz->videoDecoder_->frameHeight() ||
format->chroma_format != thiz->videoDecoder_->chromaFormat()) format->chroma_format != thiz->videoDecoder_->chromaFormat())
{ {
VideoReader_GPU::FormatInfo newFormat; FormatInfo newFormat;
newFormat.codec = static_cast<VideoReader_GPU::Codec>(format->codec); newFormat.codec = static_cast<Codec>(format->codec);
newFormat.chromaFormat = static_cast<VideoReader_GPU::ChromaFormat>(format->chroma_format); newFormat.chromaFormat = static_cast<ChromaFormat>(format->chroma_format);
newFormat.width = format->coded_width; newFormat.width = format->coded_width;
newFormat.height = format->coded_height; newFormat.height = format->coded_height;
@ -127,7 +128,7 @@ int CUDAAPI cv::gpu::detail::VideoParser::HandleVideoSequence(void* userData, CU
return true; return true;
} }
int CUDAAPI cv::gpu::detail::VideoParser::HandlePictureDecode(void* userData, CUVIDPICPARAMS* picParams) int CUDAAPI cv::gpucodec::detail::VideoParser::HandlePictureDecode(void* userData, CUVIDPICPARAMS* picParams)
{ {
VideoParser* thiz = static_cast<VideoParser*>(userData); VideoParser* thiz = static_cast<VideoParser*>(userData);
@ -147,7 +148,7 @@ int CUDAAPI cv::gpu::detail::VideoParser::HandlePictureDecode(void* userData, CU
return true; return true;
} }
int CUDAAPI cv::gpu::detail::VideoParser::HandlePictureDisplay(void* userData, CUVIDPARSERDISPINFO* picParams) int CUDAAPI cv::gpucodec::detail::VideoParser::HandlePictureDisplay(void* userData, CUVIDPARSERDISPINFO* picParams)
{ {
VideoParser* thiz = static_cast<VideoParser*>(userData); VideoParser* thiz = static_cast<VideoParser*>(userData);

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,17 +41,17 @@
// //
//M*/ //M*/
#ifndef __VIDEO_PARSER_H__ #ifndef __VIDEO_PARSER_HPP__
#define __VIDEO_PARSER_H__ #define __VIDEO_PARSER_HPP__
#include "opencv2/core/private.gpu.hpp"
#include "opencv2/gpucodec.hpp"
#include "frame_queue.h"
#include "video_decoder.h"
#include <nvcuvid.h> #include <nvcuvid.h>
namespace cv { namespace gpu { namespace detail #include "opencv2/core/private.gpu.hpp"
#include "opencv2/gpucodec.hpp"
#include "frame_queue.hpp"
#include "video_decoder.hpp"
namespace cv { namespace gpucodec { namespace detail
{ {
class VideoParser class VideoParser
@ -91,4 +92,4 @@ private:
}}} }}}
#endif // __VIDEO_PARSER_H__ #endif // __VIDEO_PARSER_HPP__

@ -42,50 +42,52 @@
#include "precomp.hpp" #include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::gpucodec;
#ifndef HAVE_NVCUVID #ifndef HAVE_NVCUVID
class cv::gpu::VideoReader_GPU::Impl Ptr<VideoReader> cv::gpucodec::createVideoReader(const String&) { throw_no_cuda(); return Ptr<VideoReader>(); }
{ Ptr<VideoReader> cv::gpucodec::createVideoReader(const Ptr<RawVideoSource>&) { throw_no_cuda(); return Ptr<VideoReader>(); }
};
cv::gpu::VideoReader_GPU::VideoReader_GPU() { throw_no_cuda(); }
cv::gpu::VideoReader_GPU::VideoReader_GPU(const String&) { throw_no_cuda(); }
cv::gpu::VideoReader_GPU::VideoReader_GPU(const cv::Ptr<VideoSource>&) { throw_no_cuda(); }
cv::gpu::VideoReader_GPU::~VideoReader_GPU() { }
void cv::gpu::VideoReader_GPU::open(const String&) { throw_no_cuda(); }
void cv::gpu::VideoReader_GPU::open(const cv::Ptr<VideoSource>&) { throw_no_cuda(); }
bool cv::gpu::VideoReader_GPU::isOpened() const { return false; }
void cv::gpu::VideoReader_GPU::close() { }
bool cv::gpu::VideoReader_GPU::read(GpuMat&) { throw_no_cuda(); return false; }
cv::gpu::VideoReader_GPU::FormatInfo cv::gpu::VideoReader_GPU::format() const { throw_no_cuda(); FormatInfo format_ = {MPEG1,Monochrome,0,0}; return format_; }
bool cv::gpu::VideoReader_GPU::VideoSource::parseVideoData(const unsigned char*, size_t, bool) { throw_no_cuda(); return false; }
void cv::gpu::VideoReader_GPU::dumpFormat(std::ostream&) { throw_no_cuda(); }
#else // HAVE_NVCUVID #else // HAVE_NVCUVID
class cv::gpu::VideoReader_GPU::Impl namespace cv { namespace gpu { namespace cudev
{
void NV12_to_RGB(const PtrStepb decodedFrame, PtrStepSz<uint> interopFrame, cudaStream_t stream = 0);
}}}
namespace
{
class VideoReaderImpl : public VideoReader
{ {
public: public:
explicit Impl(const cv::Ptr<cv::gpu::VideoReader_GPU::VideoSource>& source); explicit VideoReaderImpl(const Ptr<detail::VideoSource>& source);
~Impl(); ~VideoReaderImpl();
bool grab(cv::gpu::GpuMat& frame); bool nextFrame(OutputArray frame);
cv::gpu::VideoReader_GPU::FormatInfo format() const { return videoSource_->format(); } FormatInfo format() const;
private: private:
cv::Ptr<cv::gpu::VideoReader_GPU::VideoSource> videoSource_; Ptr<detail::VideoSource> videoSource_;
cv::Ptr<cv::gpu::detail::FrameQueue> frameQueue_; Ptr<detail::FrameQueue> frameQueue_;
cv::Ptr<cv::gpu::detail::VideoDecoder> videoDecoder_; Ptr<detail::VideoDecoder> videoDecoder_;
cv::Ptr<cv::gpu::detail::VideoParser> videoParser_; Ptr<detail::VideoParser> videoParser_;
CUvideoctxlock lock_; CUvideoctxlock lock_;
std::deque< std::pair<CUVIDPARSERDISPINFO, CUVIDPROCPARAMS> > frames_; std::deque< std::pair<CUVIDPARSERDISPINFO, CUVIDPROCPARAMS> > frames_;
}; };
cv::gpu::VideoReader_GPU::Impl::Impl(const cv::Ptr<VideoSource>& source) : FormatInfo VideoReaderImpl::format() const
{
return videoSource_->format();
}
VideoReaderImpl::VideoReaderImpl(const Ptr<detail::VideoSource>& source) :
videoSource_(source), videoSource_(source),
lock_(0) lock_(0)
{ {
@ -93,9 +95,6 @@ cv::gpu::VideoReader_GPU::Impl::Impl(const cv::Ptr<VideoSource>& source) :
GpuMat temp(1, 1, CV_8UC1); GpuMat temp(1, 1, CV_8UC1);
temp.release(); temp.release();
DeviceInfo devInfo;
CV_Assert( devInfo.supports(FEATURE_SET_COMPUTE_11) );
CUcontext ctx; CUcontext ctx;
cuSafeCall( cuCtxGetCurrent(&ctx) ); cuSafeCall( cuCtxGetCurrent(&ctx) );
cuSafeCall( cuvidCtxLockCreate(&lock_, ctx) ); cuSafeCall( cuvidCtxLockCreate(&lock_, ctx) );
@ -104,26 +103,16 @@ cv::gpu::VideoReader_GPU::Impl::Impl(const cv::Ptr<VideoSource>& source) :
videoDecoder_ = new detail::VideoDecoder(videoSource_->format(), lock_); videoDecoder_ = new detail::VideoDecoder(videoSource_->format(), lock_);
videoParser_ = new detail::VideoParser(videoDecoder_, frameQueue_); videoParser_ = new detail::VideoParser(videoDecoder_, frameQueue_);
videoSource_->setFrameQueue(frameQueue_);
videoSource_->setVideoParser(videoParser_); videoSource_->setVideoParser(videoParser_);
videoSource_->start(); videoSource_->start();
} }
cv::gpu::VideoReader_GPU::Impl::~Impl() VideoReaderImpl::~VideoReaderImpl()
{ {
frameQueue_->endDecode(); frameQueue_->endDecode();
videoSource_->stop(); videoSource_->stop();
} }
namespace cv { namespace gpu { namespace cudev
{
void loadHueCSC(float hueCSC[9]);
void NV12_to_RGB(const PtrStepb decodedFrame, PtrStepSz<uint> interopFrame, cudaStream_t stream = 0);
}}}
namespace
{
class VideoCtxAutoLock class VideoCtxAutoLock
{ {
public: public:
@ -134,78 +123,22 @@ namespace
CUvideoctxlock m_lock; CUvideoctxlock m_lock;
}; };
enum ColorSpace void cudaPostProcessFrame(const GpuMat& decodedFrame, OutputArray _outFrame, int width, int height)
{
ITU601 = 1,
ITU709 = 2
};
void setColorSpaceMatrix(ColorSpace CSC, float hueCSC[9], float hue)
{
float hueSin = std::sin(hue);
float hueCos = std::cos(hue);
if (CSC == ITU601)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else if (CSC == ITU709)
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
}
void cudaPostProcessFrame(const cv::gpu::GpuMat& decodedFrame, cv::gpu::GpuMat& interopFrame, int width, int height)
{ {
using namespace cv::gpu::cudev; using namespace cv::gpu::cudev;
static bool updateCSC = true;
static float hueColorSpaceMat[9];
// Upload the Color Space Conversion Matrices
if (updateCSC)
{
const ColorSpace colorSpace = ITU601;
const float hue = 0.0f;
// CCIR 601/709
setColorSpaceMatrix(colorSpace, hueColorSpaceMat, hue);
updateCSC = false;
}
// Final Stage: NV12toARGB color space conversion // Final Stage: NV12toARGB color space conversion
interopFrame.create(height, width, CV_8UC4); _outFrame.create(height, width, CV_8UC4);
GpuMat outFrame = _outFrame.getGpuMat();
loadHueCSC(hueColorSpaceMat); NV12_to_RGB(decodedFrame, outFrame);
NV12_to_RGB(decodedFrame, interopFrame);
}
} }
bool cv::gpu::VideoReader_GPU::Impl::grab(GpuMat& frame) bool VideoReaderImpl::nextFrame(OutputArray frame)
{ {
if (videoSource_->hasError() || videoParser_->hasError()) if (videoSource_->hasError() || videoParser_->hasError())
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported video source"); CV_Error(Error::StsUnsupportedFormat, "Unsupported video source");
if (!videoSource_->isStarted() || frameQueue_->isEndOfDecode()) if (!videoSource_->isStarted() || frameQueue_->isEndOfDecode())
return false; return false;
@ -220,7 +153,7 @@ bool cv::gpu::VideoReader_GPU::Impl::grab(GpuMat& frame)
break; break;
if (videoSource_->hasError() || videoParser_->hasError()) if (videoSource_->hasError() || videoParser_->hasError())
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported video source"); CV_Error(Error::StsUnsupportedFormat, "Unsupported video source");
if (frameQueue_->isEndOfDecode()) if (frameQueue_->isEndOfDecode())
return false; return false;
@ -256,7 +189,7 @@ bool cv::gpu::VideoReader_GPU::Impl::grab(GpuMat& frame)
VideoCtxAutoLock autoLock(lock_); VideoCtxAutoLock autoLock(lock_);
// map decoded video frame to CUDA surface // map decoded video frame to CUDA surface
cv::gpu::GpuMat decodedFrame = videoDecoder_->mapFrame(frameInfo.first.picture_index, frameInfo.second); GpuMat decodedFrame = videoDecoder_->mapFrame(frameInfo.first.picture_index, frameInfo.second);
// perform post processing on the CUDA surface (performs colors space conversion and post processing) // perform post processing on the CUDA surface (performs colors space conversion and post processing)
// comment this out if we inclue the line of code seen above // comment this out if we inclue the line of code seen above
@ -273,120 +206,31 @@ bool cv::gpu::VideoReader_GPU::Impl::grab(GpuMat& frame)
return true; return true;
} }
////////////////////////////////////////////////////////////////////////////
cv::gpu::VideoReader_GPU::VideoReader_GPU()
{
} }
cv::gpu::VideoReader_GPU::VideoReader_GPU(const String& filename) Ptr<VideoReader> cv::gpucodec::createVideoReader(const String& filename)
{
open(filename);
}
cv::gpu::VideoReader_GPU::VideoReader_GPU(const cv::Ptr<VideoSource>& source)
{
open(source);
}
cv::gpu::VideoReader_GPU::~VideoReader_GPU()
{
close();
}
void cv::gpu::VideoReader_GPU::open(const String& filename)
{ {
CV_Assert( !filename.empty() ); CV_Assert( !filename.empty() );
#ifndef __APPLE__ Ptr<detail::VideoSource> videoSource;
try try
{ {
cv::Ptr<VideoSource> source(new detail::CuvidVideoSource(filename)); videoSource = new detail::CuvidVideoSource(filename);
open(source);
} }
catch (const std::runtime_error&) catch (...)
#endif
{ {
cv::Ptr<VideoSource> source(new cv::gpu::detail::FFmpegVideoSource(filename)); Ptr<RawVideoSource> source(new detail::FFmpegVideoSource(filename));
open(source); videoSource = new detail::RawVideoSourceWrapper(source);
}
} }
void cv::gpu::VideoReader_GPU::open(const cv::Ptr<VideoSource>& source) return new VideoReaderImpl(videoSource);
{
CV_Assert( !source.empty() );
close();
impl_ = new Impl(source);
} }
bool cv::gpu::VideoReader_GPU::isOpened() const Ptr<VideoReader> cv::gpucodec::createVideoReader(const Ptr<RawVideoSource>& source)
{ {
return !impl_.empty(); Ptr<detail::VideoSource> videoSource(new detail::RawVideoSourceWrapper(source));
} return new VideoReaderImpl(videoSource);
void cv::gpu::VideoReader_GPU::close()
{
impl_.release();
}
bool cv::gpu::VideoReader_GPU::read(GpuMat& image)
{
if (!isOpened())
return false;
if (!impl_->grab(image))
{
close();
return false;
}
return true;
}
cv::gpu::VideoReader_GPU::FormatInfo cv::gpu::VideoReader_GPU::format() const
{
CV_Assert( isOpened() );
return impl_->format();
}
bool cv::gpu::VideoReader_GPU::VideoSource::parseVideoData(const unsigned char* data, size_t size, bool endOfStream)
{
return videoParser_->parseVideoData(data, size, endOfStream);
}
void cv::gpu::VideoReader_GPU::dumpFormat(std::ostream& st)
{
static const char* codecs[] =
{
"MPEG1",
"MPEG2",
"MPEG4",
"VC1",
"H264",
"JPEG",
"H264_SVC",
"H264_MVC"
};
static const char* chromas[] =
{
"Monochrome",
"YUV420",
"YUV422",
"YUV444"
};
FormatInfo _format = this->format();
st << "Frame Size : " << _format.width << "x" << _format.height << std::endl;
st << "Codec : " << (_format.codec <= H264_MVC ? codecs[_format.codec] : "Uncompressed YUV") << std::endl;
st << "Chroma Format : " << chromas[_format.chromaFormat] << std::endl;
} }
#endif // HAVE_NVCUVID #endif // HAVE_NVCUVID
template <> void cv::Ptr<cv::gpu::VideoReader_GPU::Impl>::delete_obj()
{
if (obj) delete obj;
}

@ -0,0 +1,121 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef HAVE_NVCUVID
using namespace cv;
using namespace cv::gpucodec;
using namespace cv::gpucodec::detail;
bool cv::gpucodec::detail::VideoSource::parseVideoData(const unsigned char* data, size_t size, bool endOfStream)
{
return videoParser_->parseVideoData(data, size, endOfStream);
}
cv::gpucodec::detail::RawVideoSourceWrapper::RawVideoSourceWrapper(const Ptr<RawVideoSource>& source) :
source_(source)
{
CV_Assert( !source_.empty() );
}
cv::gpucodec::FormatInfo cv::gpucodec::detail::RawVideoSourceWrapper::format() const
{
return source_->format();
}
void cv::gpucodec::detail::RawVideoSourceWrapper::start()
{
stop_ = false;
hasError_ = false;
thread_ = new Thread(readLoop, this);
}
void cv::gpucodec::detail::RawVideoSourceWrapper::stop()
{
stop_ = true;
thread_->wait();
thread_.release();
}
bool cv::gpucodec::detail::RawVideoSourceWrapper::isStarted() const
{
return !stop_;
}
bool cv::gpucodec::detail::RawVideoSourceWrapper::hasError() const
{
return hasError_;
}
void cv::gpucodec::detail::RawVideoSourceWrapper::readLoop(void* userData)
{
RawVideoSourceWrapper* thiz = static_cast<RawVideoSourceWrapper*>(userData);
for (;;)
{
unsigned char* data;
int size;
bool endOfFile;
if (!thiz->source_->getNextPacket(&data, &size, &endOfFile))
{
thiz->hasError_ = !endOfFile;
break;
}
if (!thiz->parseVideoData(data, size))
{
thiz->hasError_ = true;
break;
}
if (thiz->stop_)
break;
}
thiz->parseVideoData(0, 0, true);
}
#endif // HAVE_NVCUVID

@ -0,0 +1,99 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __GPUCODEC_VIDEO_SOURCE_H__
#define __GPUCODEC_VIDEO_SOURCE_H__
#include "opencv2/core/private.gpu.hpp"
#include "opencv2/gpucodec.hpp"
#include "thread.hpp"
namespace cv { namespace gpucodec { namespace detail
{
class VideoParser;
class VideoSource
{
public:
virtual ~VideoSource() {}
virtual FormatInfo format() const = 0;
virtual void start() = 0;
virtual void stop() = 0;
virtual bool isStarted() const = 0;
virtual bool hasError() const = 0;
void setVideoParser(detail::VideoParser* videoParser) { videoParser_ = videoParser; }
protected:
bool parseVideoData(const uchar* data, size_t size, bool endOfStream = false);
private:
detail::VideoParser* videoParser_;
};
class RawVideoSourceWrapper : public VideoSource
{
public:
RawVideoSourceWrapper(const Ptr<RawVideoSource>& source);
FormatInfo format() const;
void start();
void stop();
bool isStarted() const;
bool hasError() const;
private:
Ptr<RawVideoSource> source_;
Ptr<Thread> thread_;
volatile bool stop_;
volatile bool hasError_;
static void readLoop(void* userData);
};
}}}
#endif // __GPUCODEC_VIDEO_SOURCE_H__

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -42,36 +43,32 @@
#include "precomp.hpp" #include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::gpucodec;
#if !defined(HAVE_NVCUVID) || !defined(WIN32) #if !defined(HAVE_NVCUVID) || !defined(WIN32)
class cv::gpu::VideoWriter_GPU::Impl cv::gpucodec::EncoderParams::EncoderParams() { throw_no_cuda(); }
{ cv::gpucodec::EncoderParams::EncoderParams(const String&) { throw_no_cuda(); }
}; void cv::gpucodec::EncoderParams::load(const String&) { throw_no_cuda(); }
void cv::gpucodec::EncoderParams::save(const String&) const { throw_no_cuda(); }
cv::gpu::VideoWriter_GPU::VideoWriter_GPU() { throw_no_cuda(); } Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const String&, Size, double, SurfaceFormat) { throw_no_cuda(); return Ptr<VideoWriter>(); }
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const String&, cv::Size, double, SurfaceFormat) { throw_no_cuda(); } Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const String&, Size, double, const EncoderParams&, SurfaceFormat) { throw_no_cuda(); return Ptr<VideoWriter>(); }
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const String&, cv::Size, double, const EncoderParams&, SurfaceFormat) { throw_no_cuda(); }
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr<EncoderCallBack>&, cv::Size, double, SurfaceFormat) { throw_no_cuda(); }
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr<EncoderCallBack>&, cv::Size, double, const EncoderParams&, SurfaceFormat) { throw_no_cuda(); }
cv::gpu::VideoWriter_GPU::~VideoWriter_GPU() {}
void cv::gpu::VideoWriter_GPU::open(const String&, cv::Size, double, SurfaceFormat) { throw_no_cuda(); }
void cv::gpu::VideoWriter_GPU::open(const String&, cv::Size, double, const EncoderParams&, SurfaceFormat) { throw_no_cuda(); }
void cv::gpu::VideoWriter_GPU::open(const cv::Ptr<EncoderCallBack>&, cv::Size, double, SurfaceFormat) { throw_no_cuda(); }
void cv::gpu::VideoWriter_GPU::open(const cv::Ptr<EncoderCallBack>&, cv::Size, double, const EncoderParams&, SurfaceFormat) { throw_no_cuda(); }
bool cv::gpu::VideoWriter_GPU::isOpened() const { return false; }
void cv::gpu::VideoWriter_GPU::close() {}
void cv::gpu::VideoWriter_GPU::write(const cv::gpu::GpuMat&, bool) { throw_no_cuda(); }
cv::gpu::VideoWriter_GPU::EncoderParams cv::gpu::VideoWriter_GPU::getParams() const { EncoderParams params; throw_no_cuda(); return params; }
cv::gpu::VideoWriter_GPU::EncoderParams::EncoderParams() { throw_no_cuda(); } Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const Ptr<EncoderCallBack>&, Size, double, SurfaceFormat) { throw_no_cuda(); return Ptr<VideoWriter>(); }
cv::gpu::VideoWriter_GPU::EncoderParams::EncoderParams(const String&) { throw_no_cuda(); } Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const Ptr<EncoderCallBack>&, Size, double, const EncoderParams&, SurfaceFormat) { throw_no_cuda(); return Ptr<VideoWriter>(); }
void cv::gpu::VideoWriter_GPU::EncoderParams::load(const String&) { throw_no_cuda(); }
void cv::gpu::VideoWriter_GPU::EncoderParams::save(const String&) const { throw_no_cuda(); }
#else // !defined HAVE_CUDA || !defined WIN32 #else // !defined HAVE_CUDA || !defined WIN32
namespace cv { namespace gpu { namespace cudev
{
void RGB_to_YV12(const PtrStepSzb src, int cn, PtrStepSzb dst, cudaStream_t stream = 0);
}}}
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// VideoWriter_GPU::Impl // VideoWriterImpl
namespace namespace
{ {
@ -84,7 +81,7 @@ namespace
err = NVGetHWEncodeCaps(); err = NVGetHWEncodeCaps();
if (err) if (err)
CV_Error(cv::Error::GpuNotSupported, "No CUDA capability present"); CV_Error(Error::GpuNotSupported, "No CUDA capability present");
// Create the Encoder API Interface // Create the Encoder API Interface
err = NVCreateEncoder(&encoder_); err = NVCreateEncoder(&encoder_);
@ -113,30 +110,26 @@ namespace
MPEG4, // not supported yet MPEG4, // not supported yet
H264 H264
}; };
}
class cv::gpu::VideoWriter_GPU::Impl class VideoWriterImpl : public VideoWriter
{ {
public: public:
Impl(const cv::Ptr<EncoderCallBack>& callback, cv::Size frameSize, double fps, SurfaceFormat format, CodecType codec = H264); VideoWriterImpl(const Ptr<EncoderCallBack>& callback, Size frameSize, double fps, SurfaceFormat format, CodecType codec = H264);
Impl(const cv::Ptr<EncoderCallBack>& callback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format, CodecType codec = H264); VideoWriterImpl(const Ptr<EncoderCallBack>& callback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format, CodecType codec = H264);
void write(const cv::gpu::GpuMat& image, bool lastFrame); void write(InputArray frame, bool lastFrame = false);
EncoderParams getParams() const; EncoderParams getEncoderParams() const;
private: private:
Impl(const Impl&);
Impl& operator=(const Impl&);
void initEncoder(double fps); void initEncoder(double fps);
void setEncodeParams(const EncoderParams& params); void setEncodeParams(const EncoderParams& params);
void initGpuMemory(); void initGpuMemory();
void initCallBacks(); void initCallBacks();
void createHWEncoder(); void createHWEncoder();
cv::Ptr<EncoderCallBack> callback_; Ptr<EncoderCallBack> callback_;
cv::Size frameSize_; Size frameSize_;
CodecType codec_; CodecType codec_;
SurfaceFormat inputFormat_; SurfaceFormat inputFormat_;
@ -144,7 +137,7 @@ private:
NVEncoderWrapper encoder_; NVEncoderWrapper encoder_;
cv::gpu::GpuMat videoFrame_; GpuMat videoFrame_;
CUvideoctxlock cuCtxLock_; CUvideoctxlock cuCtxLock_;
// CallBacks // CallBacks
@ -155,14 +148,14 @@ private:
static void NVENCAPI HandleOnEndFrame(const NVVE_EndFrameInfo* pefi, void* pUserdata); static void NVENCAPI HandleOnEndFrame(const NVVE_EndFrameInfo* pefi, void* pUserdata);
}; };
cv::gpu::VideoWriter_GPU::Impl::Impl(const cv::Ptr<EncoderCallBack>& callback, cv::Size frameSize, double fps, SurfaceFormat format, CodecType codec) : VideoWriterImpl::VideoWriterImpl(const Ptr<EncoderCallBack>& callback, Size frameSize, double fps, SurfaceFormat format, CodecType codec) :
callback_(callback), callback_(callback),
frameSize_(frameSize), frameSize_(frameSize),
codec_(codec), codec_(codec),
inputFormat_(format), inputFormat_(format),
cuCtxLock_(0) cuCtxLock_(0)
{ {
surfaceFormat_ = inputFormat_ == SF_BGR ? YV12 : static_cast<NVVE_SurfaceFormat>(inputFormat_); surfaceFormat_ = (inputFormat_ == SF_BGR ? YV12 : static_cast<NVVE_SurfaceFormat>(inputFormat_));
initEncoder(fps); initEncoder(fps);
@ -173,14 +166,14 @@ cv::gpu::VideoWriter_GPU::Impl::Impl(const cv::Ptr<EncoderCallBack>& callback, c
createHWEncoder(); createHWEncoder();
} }
cv::gpu::VideoWriter_GPU::Impl::Impl(const cv::Ptr<EncoderCallBack>& callback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format, CodecType codec) : VideoWriterImpl::VideoWriterImpl(const Ptr<EncoderCallBack>& callback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format, CodecType codec) :
callback_(callback), callback_(callback),
frameSize_(frameSize), frameSize_(frameSize),
codec_(codec), codec_(codec),
inputFormat_(format), inputFormat_(format),
cuCtxLock_(0) cuCtxLock_(0)
{ {
surfaceFormat_ = inputFormat_ == SF_BGR ? YV12 : static_cast<NVVE_SurfaceFormat>(inputFormat_); surfaceFormat_ = (inputFormat_ == SF_BGR ? YV12 : static_cast<NVVE_SurfaceFormat>(inputFormat_));
initEncoder(fps); initEncoder(fps);
@ -193,7 +186,7 @@ cv::gpu::VideoWriter_GPU::Impl::Impl(const cv::Ptr<EncoderCallBack>& callback, c
createHWEncoder(); createHWEncoder();
} }
void cv::gpu::VideoWriter_GPU::Impl::initEncoder(double fps) void VideoWriterImpl::initEncoder(double fps)
{ {
int err; int err;
@ -205,7 +198,7 @@ void cv::gpu::VideoWriter_GPU::Impl::initEncoder(double fps)
}; };
err = NVSetCodec(encoder_, codecs_id[codec_]); err = NVSetCodec(encoder_, codecs_id[codec_]);
if (err) if (err)
CV_Error(cv::Error::StsNotImplemented, "Codec format is not supported"); CV_Error(Error::StsNotImplemented, "Codec format is not supported");
// Set default params // Set default params
@ -239,12 +232,12 @@ void cv::gpu::VideoWriter_GPU::Impl::initEncoder(double fps)
// Select device for encoding // Select device for encoding
int gpuID = cv::gpu::getDevice(); int gpuID = getDevice();
err = NVSetParamValue(encoder_, NVVE_FORCE_GPU_SELECTION, &gpuID); err = NVSetParamValue(encoder_, NVVE_FORCE_GPU_SELECTION, &gpuID);
CV_Assert( err == 0 ); CV_Assert( err == 0 );
} }
void cv::gpu::VideoWriter_GPU::Impl::setEncodeParams(const EncoderParams& params) void VideoWriterImpl::setEncodeParams(const EncoderParams& params)
{ {
int err; int err;
@ -328,7 +321,7 @@ void cv::gpu::VideoWriter_GPU::Impl::setEncodeParams(const EncoderParams& params
CV_Assert( err == 0 ); CV_Assert( err == 0 );
} }
cv::gpu::VideoWriter_GPU::EncoderParams cv::gpu::VideoWriter_GPU::Impl::getParams() const EncoderParams VideoWriterImpl::getEncoderParams() const
{ {
int err; int err;
@ -429,13 +422,12 @@ cv::gpu::VideoWriter_GPU::EncoderParams cv::gpu::VideoWriter_GPU::Impl::getParam
return params; return params;
} }
void cv::gpu::VideoWriter_GPU::Impl::initGpuMemory() void VideoWriterImpl::initGpuMemory()
{ {
int err; int err;
CUresult cuRes;
// initialize context // initialize context
cv::gpu::GpuMat temp(1, 1, CV_8U); GpuMat temp(1, 1, CV_8U);
temp.release(); temp.release();
static const int bpp[] = static const int bpp[] =
@ -448,8 +440,7 @@ void cv::gpu::VideoWriter_GPU::Impl::initGpuMemory()
}; };
CUcontext cuContext; CUcontext cuContext;
cuRes = cuCtxGetCurrent(&cuContext); cuSafeCall( cuCtxGetCurrent(&cuContext) );
CV_Assert( cuRes == CUDA_SUCCESS );
// Allocate the CUDA memory Pitched Surface // Allocate the CUDA memory Pitched Surface
if (surfaceFormat_ == UYVY || surfaceFormat_ == YUY2) if (surfaceFormat_ == UYVY || surfaceFormat_ == YUY2)
@ -458,8 +449,7 @@ void cv::gpu::VideoWriter_GPU::Impl::initGpuMemory()
videoFrame_.create((frameSize_.height * bpp[surfaceFormat_]) / 8, frameSize_.width, CV_8UC1); videoFrame_.create((frameSize_.height * bpp[surfaceFormat_]) / 8, frameSize_.width, CV_8UC1);
// Create the Video Context Lock (used for synchronization) // Create the Video Context Lock (used for synchronization)
cuRes = cuvidCtxLockCreate(&cuCtxLock_, cuContext); cuSafeCall( cuvidCtxLockCreate(&cuCtxLock_, cuContext) );
CV_Assert( cuRes == CUDA_SUCCESS );
// If we are using GPU Device Memory with NVCUVENC, it is necessary to create a // If we are using GPU Device Memory with NVCUVENC, it is necessary to create a
// CUDA Context with a Context Lock cuvidCtxLock. The Context Lock needs to be passed to NVCUVENC // CUDA Context with a Context Lock cuvidCtxLock. The Context Lock needs to be passed to NVCUVENC
@ -472,7 +462,7 @@ void cv::gpu::VideoWriter_GPU::Impl::initGpuMemory()
CV_Assert( err == 0 ); CV_Assert( err == 0 );
} }
void cv::gpu::VideoWriter_GPU::Impl::initCallBacks() void VideoWriterImpl::initCallBacks()
{ {
NVVE_CallbackParams cb; NVVE_CallbackParams cb;
memset(&cb, 0, sizeof(NVVE_CallbackParams)); memset(&cb, 0, sizeof(NVVE_CallbackParams));
@ -485,7 +475,7 @@ void cv::gpu::VideoWriter_GPU::Impl::initCallBacks()
NVRegisterCB(encoder_, cb, this); NVRegisterCB(encoder_, cb, this);
} }
void cv::gpu::VideoWriter_GPU::Impl::createHWEncoder() void VideoWriterImpl::createHWEncoder()
{ {
int err; int err;
@ -494,19 +484,16 @@ void cv::gpu::VideoWriter_GPU::Impl::createHWEncoder()
CV_Assert( err == 0 ); CV_Assert( err == 0 );
} }
namespace
{
// UYVY/YUY2 are both 4:2:2 formats (16bpc) // UYVY/YUY2 are both 4:2:2 formats (16bpc)
// Luma, U, V are interleaved, chroma is subsampled (w/2,h) // Luma, U, V are interleaved, chroma is subsampled (w/2,h)
void copyUYVYorYUY2Frame(cv::Size frameSize, const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) void copyUYVYorYUY2Frame(Size frameSize, const GpuMat& src, GpuMat& dst)
{ {
CUresult res;
// Source is YUVY/YUY2 4:2:2, the YUV data in a packed and interleaved // Source is YUVY/YUY2 4:2:2, the YUV data in a packed and interleaved
// YUV Copy setup // YUV Copy setup
CUDA_MEMCPY2D stCopyYUV422; CUDA_MEMCPY2D stCopyYUV422;
memset((void*)&stCopyYUV422, 0, sizeof(stCopyYUV422)); memset(&stCopyYUV422, 0, sizeof(CUDA_MEMCPY2D));
stCopyYUV422.srcXInBytes = 0; stCopyYUV422.srcXInBytes = 0;
stCopyYUV422.srcY = 0; stCopyYUV422.srcY = 0;
stCopyYUV422.srcMemoryType = CU_MEMORYTYPE_DEVICE; stCopyYUV422.srcMemoryType = CU_MEMORYTYPE_DEVICE;
@ -527,21 +514,19 @@ namespace
stCopyYUV422.Height = frameSize.height; stCopyYUV422.Height = frameSize.height;
// DMA Luma/Chroma // DMA Luma/Chroma
res = cuMemcpy2D(&stCopyYUV422); cuSafeCall( cuMemcpy2D(&stCopyYUV422) );
CV_Assert( res == CUDA_SUCCESS );
} }
// YV12/IYUV are both 4:2:0 planar formats (12bpc) // YV12/IYUV are both 4:2:0 planar formats (12bpc)
// Luma, U, V chroma planar (12bpc), chroma is subsampled (w/2,h/2) // Luma, U, V chroma planar (12bpc), chroma is subsampled (w/2,h/2)
void copyYV12orIYUVFrame(cv::Size frameSize, const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) void copyYV12orIYUVFrame(Size frameSize, const GpuMat& src, GpuMat& dst)
{ {
CUresult res;
// Source is YV12/IYUV, this native format is converted to NV12 format by the video encoder // Source is YV12/IYUV, this native format is converted to NV12 format by the video encoder
// (1) luma copy setup // (1) luma copy setup
CUDA_MEMCPY2D stCopyLuma; CUDA_MEMCPY2D stCopyLuma;
memset((void*)&stCopyLuma, 0, sizeof(stCopyLuma)); memset(&stCopyLuma, 0, sizeof(CUDA_MEMCPY2D));
stCopyLuma.srcXInBytes = 0; stCopyLuma.srcXInBytes = 0;
stCopyLuma.srcY = 0; stCopyLuma.srcY = 0;
stCopyLuma.srcMemoryType = CU_MEMORYTYPE_DEVICE; stCopyLuma.srcMemoryType = CU_MEMORYTYPE_DEVICE;
@ -563,7 +548,8 @@ namespace
// (2) chroma copy setup, U/V can be done together // (2) chroma copy setup, U/V can be done together
CUDA_MEMCPY2D stCopyChroma; CUDA_MEMCPY2D stCopyChroma;
memset((void*)&stCopyChroma, 0, sizeof(stCopyChroma)); memset(&stCopyChroma, 0, sizeof(CUDA_MEMCPY2D));
stCopyChroma.srcXInBytes = 0; stCopyChroma.srcXInBytes = 0;
stCopyChroma.srcY = frameSize.height << 1; // U/V chroma offset stCopyChroma.srcY = frameSize.height << 1; // U/V chroma offset
stCopyChroma.srcMemoryType = CU_MEMORYTYPE_DEVICE; stCopyChroma.srcMemoryType = CU_MEMORYTYPE_DEVICE;
@ -584,26 +570,23 @@ namespace
stCopyChroma.Height = frameSize.height; // U/V are sent together stCopyChroma.Height = frameSize.height; // U/V are sent together
// DMA Luma // DMA Luma
res = cuMemcpy2D(&stCopyLuma); cuSafeCall( cuMemcpy2D(&stCopyLuma) );
CV_Assert( res == CUDA_SUCCESS );
// DMA Chroma channels (UV side by side) // DMA Chroma channels (UV side by side)
res = cuMemcpy2D(&stCopyChroma); cuSafeCall( cuMemcpy2D(&stCopyChroma) );
CV_Assert( res == CUDA_SUCCESS );
} }
// NV12 is 4:2:0 format (12bpc) // NV12 is 4:2:0 format (12bpc)
// Luma followed by U/V chroma interleaved (12bpc), chroma is subsampled (w/2,h/2) // Luma followed by U/V chroma interleaved (12bpc), chroma is subsampled (w/2,h/2)
void copyNV12Frame(cv::Size frameSize, const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst) void copyNV12Frame(Size frameSize, const GpuMat& src, GpuMat& dst)
{ {
CUresult res;
// Source is NV12 in pitch linear memory // Source is NV12 in pitch linear memory
// Because we are assume input is NV12 (if we take input in the native format), the encoder handles NV12 as a native format in pitch linear memory // Because we are assume input is NV12 (if we take input in the native format), the encoder handles NV12 as a native format in pitch linear memory
// Luma/Chroma can be done in a single transfer // Luma/Chroma can be done in a single transfer
CUDA_MEMCPY2D stCopyNV12; CUDA_MEMCPY2D stCopyNV12;
memset((void*)&stCopyNV12, 0, sizeof(stCopyNV12)); memset(&stCopyNV12, 0, sizeof(CUDA_MEMCPY2D));
stCopyNV12.srcXInBytes = 0; stCopyNV12.srcXInBytes = 0;
stCopyNV12.srcY = 0; stCopyNV12.srcY = 0;
stCopyNV12.srcMemoryType = CU_MEMORYTYPE_DEVICE; stCopyNV12.srcMemoryType = CU_MEMORYTYPE_DEVICE;
@ -624,18 +607,13 @@ namespace
stCopyNV12.Height = (frameSize.height * 3) >> 1; stCopyNV12.Height = (frameSize.height * 3) >> 1;
// DMA Luma/Chroma // DMA Luma/Chroma
res = cuMemcpy2D(&stCopyNV12); cuSafeCall( cuMemcpy2D(&stCopyNV12) );
CV_Assert( res == CUDA_SUCCESS );
}
} }
namespace cv { namespace gpu { namespace cudev void VideoWriterImpl::write(InputArray _frame, bool lastFrame)
{ {
void RGB_to_YV12(const PtrStepSzb src, int cn, PtrStepSzb dst, cudaStream_t stream = 0); GpuMat frame = _frame.getGpuMat();
}}}
void cv::gpu::VideoWriter_GPU::Impl::write(const cv::gpu::GpuMat& frame, bool lastFrame)
{
if (inputFormat_ == SF_BGR) if (inputFormat_ == SF_BGR)
{ {
CV_Assert( frame.size() == frameSize_ ); CV_Assert( frame.size() == frameSize_ );
@ -660,11 +638,12 @@ void cv::gpu::VideoWriter_GPU::Impl::write(const cv::gpu::GpuMat& frame, bool la
efparams.picBuf = 0; // Must be set to NULL in order to support device memory input efparams.picBuf = 0; // Must be set to NULL in order to support device memory input
// Don't forget we need to lock/unlock between memcopies // Don't forget we need to lock/unlock between memcopies
CUresult res = cuvidCtxLock(cuCtxLock_, 0); cuSafeCall( cuvidCtxLock(cuCtxLock_, 0) );
CV_Assert( res == CUDA_SUCCESS );
if (inputFormat_ == SF_BGR) if (inputFormat_ == SF_BGR)
cv::gpu::cudev::RGB_to_YV12(frame, frame.channels(), videoFrame_); {
cudev::RGB_to_YV12(frame, frame.channels(), videoFrame_);
}
else else
{ {
switch (surfaceFormat_) switch (surfaceFormat_)
@ -685,37 +664,36 @@ void cv::gpu::VideoWriter_GPU::Impl::write(const cv::gpu::GpuMat& frame, bool la
} }
} }
res = cuvidCtxUnlock(cuCtxLock_, 0); cuSafeCall( cuvidCtxUnlock(cuCtxLock_, 0) );
CV_Assert( res == CUDA_SUCCESS );
int err = NVEncodeFrame(encoder_, &efparams, 0, videoFrame_.data); int err = NVEncodeFrame(encoder_, &efparams, 0, videoFrame_.data);
CV_Assert( err == 0 ); CV_Assert( err == 0 );
} }
unsigned char* NVENCAPI cv::gpu::VideoWriter_GPU::Impl::HandleAcquireBitStream(int* pBufferSize, void* pUserdata) unsigned char* NVENCAPI VideoWriterImpl::HandleAcquireBitStream(int* pBufferSize, void* pUserdata)
{ {
Impl* thiz = static_cast<Impl*>(pUserdata); VideoWriterImpl* thiz = static_cast<VideoWriterImpl*>(pUserdata);
return thiz->callback_->acquireBitStream(pBufferSize); return thiz->callback_->acquireBitStream(pBufferSize);
} }
void NVENCAPI cv::gpu::VideoWriter_GPU::Impl::HandleReleaseBitStream(int nBytesInBuffer, unsigned char* cb, void* pUserdata) void NVENCAPI VideoWriterImpl::HandleReleaseBitStream(int nBytesInBuffer, unsigned char* cb, void* pUserdata)
{ {
Impl* thiz = static_cast<Impl*>(pUserdata); VideoWriterImpl* thiz = static_cast<VideoWriterImpl*>(pUserdata);
thiz->callback_->releaseBitStream(cb, nBytesInBuffer); thiz->callback_->releaseBitStream(cb, nBytesInBuffer);
} }
void NVENCAPI cv::gpu::VideoWriter_GPU::Impl::HandleOnBeginFrame(const NVVE_BeginFrameInfo* pbfi, void* pUserdata) void NVENCAPI VideoWriterImpl::HandleOnBeginFrame(const NVVE_BeginFrameInfo* pbfi, void* pUserdata)
{ {
Impl* thiz = static_cast<Impl*>(pUserdata); VideoWriterImpl* thiz = static_cast<VideoWriterImpl*>(pUserdata);
thiz->callback_->onBeginFrame(pbfi->nFrameNumber, static_cast<EncoderCallBack::PicType>(pbfi->nPicType)); thiz->callback_->onBeginFrame(pbfi->nFrameNumber, static_cast<EncoderCallBack::PicType>(pbfi->nPicType));
} }
void NVENCAPI cv::gpu::VideoWriter_GPU::Impl::HandleOnEndFrame(const NVVE_EndFrameInfo* pefi, void* pUserdata) void NVENCAPI VideoWriterImpl::HandleOnEndFrame(const NVVE_EndFrameInfo* pefi, void* pUserdata)
{ {
Impl* thiz = static_cast<Impl*>(pUserdata); VideoWriterImpl* thiz = static_cast<VideoWriterImpl*>(pUserdata);
thiz->callback_->onEndFrame(pefi->nFrameNumber, static_cast<EncoderCallBack::PicType>(pefi->nPicType)); thiz->callback_->onEndFrame(pefi->nFrameNumber, static_cast<EncoderCallBack::PicType>(pefi->nPicType));
} }
@ -723,10 +701,10 @@ void NVENCAPI cv::gpu::VideoWriter_GPU::Impl::HandleOnEndFrame(const NVVE_EndFra
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// FFMPEG // FFMPEG
class EncoderCallBackFFMPEG : public cv::gpu::VideoWriter_GPU::EncoderCallBack class EncoderCallBackFFMPEG : public EncoderCallBack
{ {
public: public:
EncoderCallBackFFMPEG(const cv::String& fileName, cv::Size frameSize, double fps); EncoderCallBackFFMPEG(const String& fileName, Size frameSize, double fps);
~EncoderCallBackFFMPEG(); ~EncoderCallBackFFMPEG();
unsigned char* acquireBitStream(int* bufferSize); unsigned char* acquireBitStream(int* bufferSize);
@ -735,27 +713,28 @@ public:
void onEndFrame(int frameNumber, PicType picType); void onEndFrame(int frameNumber, PicType picType);
private: private:
EncoderCallBackFFMPEG(const EncoderCallBackFFMPEG&); static bool init_MediaStream_FFMPEG();
EncoderCallBackFFMPEG& operator=(const EncoderCallBackFFMPEG&);
struct OutputMediaStream_FFMPEG* stream_; struct OutputMediaStream_FFMPEG* stream_;
std::vector<uchar> buf_; std::vector<uchar> buf_;
bool isKeyFrame_; bool isKeyFrame_;
static Create_OutputMediaStream_FFMPEG_Plugin create_OutputMediaStream_FFMPEG_p;
static Release_OutputMediaStream_FFMPEG_Plugin release_OutputMediaStream_FFMPEG_p;
static Write_OutputMediaStream_FFMPEG_Plugin write_OutputMediaStream_FFMPEG_p;
}; };
namespace Create_OutputMediaStream_FFMPEG_Plugin EncoderCallBackFFMPEG::create_OutputMediaStream_FFMPEG_p = 0;
{ Release_OutputMediaStream_FFMPEG_Plugin EncoderCallBackFFMPEG::release_OutputMediaStream_FFMPEG_p = 0;
Create_OutputMediaStream_FFMPEG_Plugin create_OutputMediaStream_FFMPEG_p = 0; Write_OutputMediaStream_FFMPEG_Plugin EncoderCallBackFFMPEG::write_OutputMediaStream_FFMPEG_p = 0;
Release_OutputMediaStream_FFMPEG_Plugin release_OutputMediaStream_FFMPEG_p = 0;
Write_OutputMediaStream_FFMPEG_Plugin write_OutputMediaStream_FFMPEG_p = 0;
bool init_MediaStream_FFMPEG() bool EncoderCallBackFFMPEG::init_MediaStream_FFMPEG()
{ {
static bool initialized = 0; static bool initialized = false;
if (!initialized) if (!initialized)
{ {
#if defined WIN32 || defined _WIN32 #if defined(WIN32) || defined(_WIN32)
const char* module_name = "opencv_ffmpeg" const char* module_name = "opencv_ffmpeg"
CVAUX_STR(CV_VERSION_EPOCH) CVAUX_STR(CV_VERSION_MAJOR) CVAUX_STR(CV_VERSION_MINOR) CVAUX_STR(CV_VERSION_EPOCH) CVAUX_STR(CV_VERSION_MAJOR) CVAUX_STR(CV_VERSION_MINOR)
#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__) #if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__)
@ -776,7 +755,7 @@ namespace
initialized = create_OutputMediaStream_FFMPEG_p != 0 && release_OutputMediaStream_FFMPEG_p != 0 && write_OutputMediaStream_FFMPEG_p != 0; initialized = create_OutputMediaStream_FFMPEG_p != 0 && release_OutputMediaStream_FFMPEG_p != 0 && write_OutputMediaStream_FFMPEG_p != 0;
} }
#elif defined HAVE_FFMPEG #elif defined(HAVE_FFMPEG)
create_OutputMediaStream_FFMPEG_p = create_OutputMediaStream_FFMPEG; create_OutputMediaStream_FFMPEG_p = create_OutputMediaStream_FFMPEG;
release_OutputMediaStream_FFMPEG_p = release_OutputMediaStream_FFMPEG; release_OutputMediaStream_FFMPEG_p = release_OutputMediaStream_FFMPEG;
write_OutputMediaStream_FFMPEG_p = write_OutputMediaStream_FFMPEG; write_OutputMediaStream_FFMPEG_p = write_OutputMediaStream_FFMPEG;
@ -787,9 +766,8 @@ namespace
return initialized; return initialized;
} }
}
EncoderCallBackFFMPEG::EncoderCallBackFFMPEG(const cv::String& fileName, cv::Size frameSize, double fps) : EncoderCallBackFFMPEG::EncoderCallBackFFMPEG(const String& fileName, Size frameSize, double fps) :
stream_(0), isKeyFrame_(false) stream_(0), isKeyFrame_(false)
{ {
int buf_size = std::max(frameSize.area() * 4, 1024 * 1024); int buf_size = std::max(frameSize.area() * 4, 1024 * 1024);
@ -820,7 +798,7 @@ void EncoderCallBackFFMPEG::releaseBitStream(unsigned char* data, int size)
void EncoderCallBackFFMPEG::onBeginFrame(int frameNumber, PicType picType) void EncoderCallBackFFMPEG::onBeginFrame(int frameNumber, PicType picType)
{ {
(void) frameNumber; (void) frameNumber;
isKeyFrame_ = picType == IFRAME; isKeyFrame_ = (picType == IFRAME);
} }
void EncoderCallBackFFMPEG::onEndFrame(int frameNumber, PicType picType) void EncoderCallBackFFMPEG::onEndFrame(int frameNumber, PicType picType)
@ -828,93 +806,12 @@ void EncoderCallBackFFMPEG::onEndFrame(int frameNumber, PicType picType)
(void) frameNumber; (void) frameNumber;
(void) picType; (void) picType;
} }
///////////////////////////////////////////////////////////////////////////
// VideoWriter_GPU
cv::gpu::VideoWriter_GPU::VideoWriter_GPU()
{
}
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const String& fileName, cv::Size frameSize, double fps, SurfaceFormat format)
{
open(fileName, frameSize, fps, format);
}
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const String& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format)
{
open(fileName, frameSize, fps, params, format);
}
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format)
{
open(encoderCallback, frameSize, fps, format);
}
cv::gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format)
{
open(encoderCallback, frameSize, fps, params, format);
}
cv::gpu::VideoWriter_GPU::~VideoWriter_GPU()
{
close();
}
void cv::gpu::VideoWriter_GPU::open(const String& fileName, cv::Size frameSize, double fps, SurfaceFormat format)
{
close();
cv::Ptr<EncoderCallBack> encoderCallback(new EncoderCallBackFFMPEG(fileName, frameSize, fps));
open(encoderCallback, frameSize, fps, format);
}
void cv::gpu::VideoWriter_GPU::open(const String& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format)
{
close();
cv::Ptr<EncoderCallBack> encoderCallback(new EncoderCallBackFFMPEG(fileName, frameSize, fps));
open(encoderCallback, frameSize, fps, params, format);
}
void cv::gpu::VideoWriter_GPU::open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format)
{
close();
impl_ = new Impl(encoderCallback, frameSize, fps, format);
}
void cv::gpu::VideoWriter_GPU::open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format)
{
close();
impl_ = new Impl(encoderCallback, frameSize, fps, params, format);
}
bool cv::gpu::VideoWriter_GPU::isOpened() const
{
return !impl_.empty();
}
void cv::gpu::VideoWriter_GPU::close()
{
impl_.release();
}
void cv::gpu::VideoWriter_GPU::write(const cv::gpu::GpuMat& image, bool lastFrame)
{
CV_Assert( isOpened() );
impl_->write(image, lastFrame);
}
cv::gpu::VideoWriter_GPU::EncoderParams cv::gpu::VideoWriter_GPU::getParams() const
{
CV_Assert( isOpened() );
return impl_->getParams();
} }
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
// VideoWriter_GPU::EncoderParams // EncoderParams
cv::gpu::VideoWriter_GPU::EncoderParams::EncoderParams() cv::gpucodec::EncoderParams::EncoderParams()
{ {
P_Interval = 3; P_Interval = 3;
IDR_Period = 15; IDR_Period = 15;
@ -937,66 +834,86 @@ cv::gpu::VideoWriter_GPU::EncoderParams::EncoderParams()
DisableSPSPPS = 0; DisableSPSPPS = 0;
} }
cv::gpu::VideoWriter_GPU::EncoderParams::EncoderParams(const String& configFile) cv::gpucodec::EncoderParams::EncoderParams(const String& configFile)
{ {
load(configFile); load(configFile);
} }
void cv::gpu::VideoWriter_GPU::EncoderParams::load(const String& configFile) void cv::gpucodec::EncoderParams::load(const String& configFile)
{ {
cv::FileStorage fs(configFile, cv::FileStorage::READ); FileStorage fs(configFile, FileStorage::READ);
CV_Assert( fs.isOpened() ); CV_Assert( fs.isOpened() );
cv::read(fs["P_Interval" ], P_Interval, 3); read(fs["P_Interval" ], P_Interval, 3);
cv::read(fs["IDR_Period" ], IDR_Period, 15); read(fs["IDR_Period" ], IDR_Period, 15);
cv::read(fs["DynamicGOP" ], DynamicGOP, 0); read(fs["DynamicGOP" ], DynamicGOP, 0);
cv::read(fs["RCType" ], RCType, 1); read(fs["RCType" ], RCType, 1);
cv::read(fs["AvgBitrate" ], AvgBitrate, 4000000); read(fs["AvgBitrate" ], AvgBitrate, 4000000);
cv::read(fs["PeakBitrate" ], PeakBitrate, 10000000); read(fs["PeakBitrate" ], PeakBitrate, 10000000);
cv::read(fs["QP_Level_Intra" ], QP_Level_Intra, 25); read(fs["QP_Level_Intra" ], QP_Level_Intra, 25);
cv::read(fs["QP_Level_InterP"], QP_Level_InterP, 28); read(fs["QP_Level_InterP"], QP_Level_InterP, 28);
cv::read(fs["QP_Level_InterB"], QP_Level_InterB, 31); read(fs["QP_Level_InterB"], QP_Level_InterB, 31);
cv::read(fs["DeblockMode" ], DeblockMode, 1); read(fs["DeblockMode" ], DeblockMode, 1);
cv::read(fs["ProfileLevel" ], ProfileLevel, 65357); read(fs["ProfileLevel" ], ProfileLevel, 65357);
cv::read(fs["ForceIntra" ], ForceIntra, 0); read(fs["ForceIntra" ], ForceIntra, 0);
cv::read(fs["ForceIDR" ], ForceIDR, 0); read(fs["ForceIDR" ], ForceIDR, 0);
cv::read(fs["ClearStat" ], ClearStat, 0); read(fs["ClearStat" ], ClearStat, 0);
cv::read(fs["DIMode" ], DIMode, 1); read(fs["DIMode" ], DIMode, 1);
cv::read(fs["Presets" ], Presets, 2); read(fs["Presets" ], Presets, 2);
cv::read(fs["DisableCabac" ], DisableCabac, 0); read(fs["DisableCabac" ], DisableCabac, 0);
cv::read(fs["NaluFramingType"], NaluFramingType, 0); read(fs["NaluFramingType"], NaluFramingType, 0);
cv::read(fs["DisableSPSPPS" ], DisableSPSPPS, 0); read(fs["DisableSPSPPS" ], DisableSPSPPS, 0);
} }
void cv::gpu::VideoWriter_GPU::EncoderParams::save(const String& configFile) const void cv::gpucodec::EncoderParams::save(const String& configFile) const
{ {
cv::FileStorage fs(configFile, cv::FileStorage::WRITE); FileStorage fs(configFile, FileStorage::WRITE);
CV_Assert( fs.isOpened() ); CV_Assert( fs.isOpened() );
cv::write(fs, "P_Interval" , P_Interval); write(fs, "P_Interval" , P_Interval);
cv::write(fs, "IDR_Period" , IDR_Period); write(fs, "IDR_Period" , IDR_Period);
cv::write(fs, "DynamicGOP" , DynamicGOP); write(fs, "DynamicGOP" , DynamicGOP);
cv::write(fs, "RCType" , RCType); write(fs, "RCType" , RCType);
cv::write(fs, "AvgBitrate" , AvgBitrate); write(fs, "AvgBitrate" , AvgBitrate);
cv::write(fs, "PeakBitrate" , PeakBitrate); write(fs, "PeakBitrate" , PeakBitrate);
cv::write(fs, "QP_Level_Intra" , QP_Level_Intra); write(fs, "QP_Level_Intra" , QP_Level_Intra);
cv::write(fs, "QP_Level_InterP", QP_Level_InterP); write(fs, "QP_Level_InterP", QP_Level_InterP);
cv::write(fs, "QP_Level_InterB", QP_Level_InterB); write(fs, "QP_Level_InterB", QP_Level_InterB);
cv::write(fs, "DeblockMode" , DeblockMode); write(fs, "DeblockMode" , DeblockMode);
cv::write(fs, "ProfileLevel" , ProfileLevel); write(fs, "ProfileLevel" , ProfileLevel);
cv::write(fs, "ForceIntra" , ForceIntra); write(fs, "ForceIntra" , ForceIntra);
cv::write(fs, "ForceIDR" , ForceIDR); write(fs, "ForceIDR" , ForceIDR);
cv::write(fs, "ClearStat" , ClearStat); write(fs, "ClearStat" , ClearStat);
cv::write(fs, "DIMode" , DIMode); write(fs, "DIMode" , DIMode);
cv::write(fs, "Presets" , Presets); write(fs, "Presets" , Presets);
cv::write(fs, "DisableCabac" , DisableCabac); write(fs, "DisableCabac" , DisableCabac);
cv::write(fs, "NaluFramingType", NaluFramingType); write(fs, "NaluFramingType", NaluFramingType);
cv::write(fs, "DisableSPSPPS" , DisableSPSPPS); write(fs, "DisableSPSPPS" , DisableSPSPPS);
}
///////////////////////////////////////////////////////////////////////////
// createVideoWriter
Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const String& fileName, Size frameSize, double fps, SurfaceFormat format)
{
Ptr<EncoderCallBack> encoderCallback(new EncoderCallBackFFMPEG(fileName, frameSize, fps));
return createVideoWriter(encoderCallback, frameSize, fps, format);
}
Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const String& fileName, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format)
{
Ptr<EncoderCallBack> encoderCallback(new EncoderCallBackFFMPEG(fileName, frameSize, fps));
return createVideoWriter(encoderCallback, frameSize, fps, params, format);
}
Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, SurfaceFormat format)
{
return new VideoWriterImpl(encoderCallback, frameSize, fps, format);
}
Ptr<VideoWriter> cv::gpucodec::createVideoWriter(const Ptr<EncoderCallBack>& encoderCallback, Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format)
{
return new VideoWriterImpl(encoderCallback, frameSize, fps, params, format);
} }
#endif // !defined HAVE_CUDA || !defined WIN32 #endif // !defined HAVE_CUDA || !defined WIN32
template <> void cv::Ptr<cv::gpu::VideoWriter_GPU::Impl>::delete_obj()
{
if (obj) delete obj;
}

@ -57,19 +57,15 @@ GPU_TEST_P(Video, Reader)
const std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1); const std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
cv::gpu::VideoReader_GPU reader(inputFile); cv::Ptr<cv::gpucodec::VideoReader> reader = cv::gpucodec::createVideoReader(inputFile);
ASSERT_TRUE(reader.isOpened());
cv::gpu::GpuMat frame; cv::gpu::GpuMat frame;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
ASSERT_TRUE(reader.read(frame)); ASSERT_TRUE(reader->nextFrame(frame));
ASSERT_FALSE(frame.empty()); ASSERT_FALSE(frame.empty());
} }
reader.close();
ASSERT_FALSE(reader.isOpened());
} }
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
@ -89,7 +85,7 @@ GPU_TEST_P(Video, Writer)
cv::VideoCapture reader(inputFile); cv::VideoCapture reader(inputFile);
ASSERT_TRUE(reader.isOpened()); ASSERT_TRUE(reader.isOpened());
cv::gpu::VideoWriter_GPU d_writer; cv::Ptr<cv::gpucodec::VideoWriter> d_writer;
cv::Mat frame; cv::Mat frame;
cv::gpu::GpuMat d_frame; cv::gpu::GpuMat d_frame;
@ -101,14 +97,14 @@ GPU_TEST_P(Video, Writer)
d_frame.upload(frame); d_frame.upload(frame);
if (!d_writer.isOpened()) if (d_writer.empty())
d_writer.open(outputFile, frame.size(), FPS); d_writer = cv::gpucodec::createVideoWriter(outputFile, frame.size(), FPS);
d_writer.write(d_frame); d_writer->write(d_frame);
} }
reader.release(); reader.release();
d_writer.close(); d_writer.release();
reader.open(outputFile); reader.open(outputFile);
ASSERT_TRUE(reader.isOpened()); ASSERT_TRUE(reader.isOpened());

@ -210,7 +210,7 @@ namespace
private: private:
String fileName_; String fileName_;
VideoReader_GPU reader_; Ptr<gpucodec::VideoReader> reader_;
GpuMat frame_; GpuMat frame_;
}; };
@ -223,13 +223,13 @@ namespace
{ {
if (_frame.kind() == _InputArray::GPU_MAT) if (_frame.kind() == _InputArray::GPU_MAT)
{ {
bool res = reader_.read(_frame.getGpuMatRef()); bool res = reader_->nextFrame(_frame.getGpuMatRef());
if (!res) if (!res)
_frame.release(); _frame.release();
} }
else else
{ {
bool res = reader_.read(frame_); bool res = reader_->nextFrame(frame_);
if (!res) if (!res)
_frame.release(); _frame.release();
else else
@ -239,9 +239,7 @@ namespace
void VideoFrameSource_GPU::reset() void VideoFrameSource_GPU::reset()
{ {
reader_.close(); reader_ = gpucodec::createVideoReader(fileName_);
reader_.open(fileName_);
CV_Assert( reader_.isOpened() );
} }
} }

@ -30,8 +30,7 @@ int main(int argc, const char* argv[])
cv::VideoCapture reader(fname); cv::VideoCapture reader(fname);
cv::gpu::GpuMat d_frame; cv::gpu::GpuMat d_frame;
cv::gpu::VideoReader_GPU d_reader(fname); cv::Ptr<cv::gpucodec::VideoReader> d_reader = cv::gpucodec::createVideoReader(fname);
d_reader.dumpFormat(std::cout);
cv::TickMeter tm; cv::TickMeter tm;
std::vector<double> cpu_times; std::vector<double> cpu_times;
@ -46,7 +45,7 @@ int main(int argc, const char* argv[])
cpu_times.push_back(tm.getTimeMilli()); cpu_times.push_back(tm.getTimeMilli());
tm.reset(); tm.start(); tm.reset(); tm.start();
if (!d_reader.read(d_frame)) if (!d_reader->nextFrame(d_frame))
break; break;
tm.stop(); tm.stop();
gpu_times.push_back(tm.getTimeMilli()); gpu_times.push_back(tm.getTimeMilli());

@ -33,7 +33,7 @@ int main(int argc, const char* argv[])
cv::gpu::printShortCudaDeviceInfo(cv::gpu::getDevice()); cv::gpu::printShortCudaDeviceInfo(cv::gpu::getDevice());
cv::VideoWriter writer; cv::VideoWriter writer;
cv::gpu::VideoWriter_GPU d_writer; cv::Ptr<cv::gpucodec::VideoWriter> d_writer;
cv::Mat frame; cv::Mat frame;
cv::gpu::GpuMat d_frame; cv::gpu::GpuMat d_frame;
@ -64,11 +64,11 @@ int main(int argc, const char* argv[])
return -1; return -1;
} }
if (!d_writer.isOpened()) if (d_writer.empty())
{ {
std::cout << "Open GPU Writer" << std::endl; std::cout << "Open GPU Writer" << std::endl;
d_writer.open("output_gpu.avi", frame.size(), FPS); d_writer = cv::gpucodec::createVideoWriter("output_gpu.avi", frame.size(), FPS);
} }
d_frame.upload(frame); d_frame.upload(frame);
@ -81,7 +81,7 @@ int main(int argc, const char* argv[])
cpu_times.push_back(tm.getTimeMilli()); cpu_times.push_back(tm.getTimeMilli());
tm.reset(); tm.start(); tm.reset(); tm.start();
d_writer.write(d_frame); d_writer->write(d_frame);
tm.stop(); tm.stop();
gpu_times.push_back(tm.getTimeMilli()); gpu_times.push_back(tm.getTimeMilli());
} }