"atomic bomb" commit. Reorganized OpenCV directory structure

This commit is contained in:
Vadim Pisarevsky
2010-05-11 17:44:00 +00:00
commit 127d6649a1
1761 changed files with 1766340 additions and 0 deletions

View File

@@ -0,0 +1,3 @@
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../../3rdparty/include")
set(deps opencv_lapack zlib flann)
define_opencv_module(core ${deps})

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CORE_EIGEN_HPP__
#define __OPENCV_CORE_EIGEN_HPP__
#ifdef __cplusplus
#include "cxcore.h"
#include <Eigen/Core>
namespace cv
{
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst )
{
if( !(src.Flags & Eigen::RowMajorBit) )
{
Mat _src(src.cols(), src.rows(), DataType<_Tp>::type,
(void*)src.data(), src.stride()*sizeof(_Tp));
transpose(_src, dst);
}
else
{
Mat _src(src.rows(), src.cols(), DataType<_Tp>::type,
(void*)src.data(), src.stride()*sizeof(_Tp));
_src.copyTo(dst);
}
}
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
{
CV_DbgAssert(src.rows == _rows && src.cols == _cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else if( src.cols == src.rows )
{
src.convertTo(_dst, _dst.type());
transpose(_dst, _dst);
}
else
Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
else
{
Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
}
template<typename _Tp>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
{
dst.resize(src.rows, src.cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else if( src.cols == src.rows )
{
src.convertTo(_dst, _dst.type());
transpose(_dst, _dst);
}
else
Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
else
{
Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
}
template<typename _Tp>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
{
CV_Assert(src.cols == 1);
dst.resize(src.rows);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else
Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
else
{
Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
}
template<typename _Tp>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )
{
CV_Assert(src.rows == 1);
dst.resize(src.cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else
Mat(src.t()).convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
else
{
Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
}
}
#endif
#endif

View File

@@ -0,0 +1,220 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CORE_FLANN_HPP__
#define __OPENCV_CORE_FLANN_HPP__
#ifdef __cplusplus
namespace flann
{
class Index;
}
namespace cv {
namespace flann {
/* Nearest neighbor index algorithms */
enum flann_algorithm_t {
LINEAR = 0,
KDTREE = 1,
KMEANS = 2,
COMPOSITE = 3,
SAVED = 254,
AUTOTUNED = 255
};
enum flann_centers_init_t {
CENTERS_RANDOM = 0,
CENTERS_GONZALES = 1,
CENTERS_KMEANSPP = 2
};
enum flann_log_level_t {
LOG_NONE = 0,
LOG_FATAL = 1,
LOG_ERROR = 2,
LOG_WARN = 3,
LOG_INFO = 4
};
enum flann_distance_t {
EUCLIDEAN = 1,
MANHATTAN = 2,
MINKOWSKI = 3
};
class CV_EXPORTS IndexFactory
{
public:
virtual ~IndexFactory() {}
virtual ::flann::Index* createIndex(const Mat& dataset) const = 0;
};
struct CV_EXPORTS IndexParams : public IndexFactory {
protected:
IndexParams() {};
};
struct CV_EXPORTS LinearIndexParams : public IndexParams {
LinearIndexParams() {};
::flann::Index* createIndex(const Mat& dataset) const;
};
struct CV_EXPORTS KDTreeIndexParams : public IndexParams {
KDTreeIndexParams(int trees_ = 4) : trees(trees_) {};
int trees; // number of randomized trees to use (for kdtree)
::flann::Index* createIndex(const Mat& dataset) const;
};
struct CV_EXPORTS KMeansIndexParams : public IndexParams {
KMeansIndexParams(int branching_ = 32, int iterations_ = 11,
flann_centers_init_t centers_init_ = CENTERS_RANDOM, float cb_index_ = 0.2 ) :
branching(branching_),
iterations(iterations_),
centers_init(centers_init_),
cb_index(cb_index_) {};
int branching; // branching factor (for kmeans tree)
int iterations; // max iterations to perform in one kmeans clustering (kmeans tree)
flann_centers_init_t centers_init; // algorithm used for picking the initial cluster centers for kmeans tree
float cb_index; // cluster boundary index. Used when searching the kmeans tree
::flann::Index* createIndex(const Mat& dataset) const;
};
struct CV_EXPORTS CompositeIndexParams : public IndexParams {
CompositeIndexParams(int trees_ = 4, int branching_ = 32, int iterations_ = 11,
flann_centers_init_t centers_init_ = CENTERS_RANDOM, float cb_index_ = 0.2 ) :
trees(trees_),
branching(branching_),
iterations(iterations_),
centers_init(centers_init_),
cb_index(cb_index_) {};
int trees; // number of randomized trees to use (for kdtree)
int branching; // branching factor (for kmeans tree)
int iterations; // max iterations to perform in one kmeans clustering (kmeans tree)
flann_centers_init_t centers_init; // algorithm used for picking the initial cluster centers for kmeans tree
float cb_index; // cluster boundary index. Used when searching the kmeans tree
::flann::Index* createIndex(const Mat& dataset) const;
};
struct CV_EXPORTS AutotunedIndexParams : public IndexParams {
AutotunedIndexParams( float target_precision_ = 0.9, float build_weight_ = 0.01,
float memory_weight_ = 0, float sample_fraction_ = 0.1) :
target_precision(target_precision_),
build_weight(build_weight_),
memory_weight(memory_weight_),
sample_fraction(sample_fraction_) {};
float target_precision; // precision desired (used for autotuning, -1 otherwise)
float build_weight; // build tree time weighting factor
float memory_weight; // index memory weighting factor
float sample_fraction; // what fraction of the dataset to use for autotuning
::flann::Index* createIndex(const Mat& dataset) const;
};
struct CV_EXPORTS SavedIndexParams : public IndexParams {
SavedIndexParams() {}
SavedIndexParams(std::string filename_) : filename(filename_) {}
std::string filename; // filename of the stored index
::flann::Index* createIndex(const Mat& dataset) const;
};
struct CV_EXPORTS SearchParams {
SearchParams(int checks_ = 32) :
checks(checks_) {};
int checks;
};
class CV_EXPORTS Index {
::flann::Index* nnIndex;
public:
Index(const Mat& features, const IndexParams& params);
~Index();
void knnSearch(const vector<float>& queries, vector<int>& indices, vector<float>& dists, int knn, const SearchParams& params);
void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& params);
int radiusSearch(const vector<float>& query, vector<int>& indices, vector<float>& dists, float radius, const SearchParams& params);
int radiusSearch(const Mat& query, Mat& indices, Mat& dists, float radius, const SearchParams& params);
void save(std::string filename);
int veclen() const;
int size() const;
};
CV_EXPORTS int hierarchicalClustering(const Mat& features, Mat& centers,
const KMeansIndexParams& params);
}
}
#endif // __cplusplus
#endif

View File

@@ -0,0 +1,681 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/* The header is for internal use and it is likely to change.
It contains some macro definitions that are used in cxcore, cv, cvaux
and, probably, other libraries. If you need some of this functionality,
the safe way is to copy it into your code and rename the macros.
*/
#ifndef __OPENCV_CORE_INTERNAL_HPP__
#define __OPENCV_CORE_INTERNAL_HPP__
#include <vector>
#ifdef HAVE_CONFIG_H
#include <cvconfig.h>
#endif
#if defined WIN32 || defined _WIN32
# ifndef WIN32
# define WIN32
# endif
# ifndef _WIN32
# define _WIN32
# endif
#endif
#if defined WIN32 || defined WINCE
#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
#endif
#include <windows.h>
#undef small
#undef min
#undef max
#else
#include <pthread.h>
#include <sys/mman.h>
#endif
#ifdef __BORLANDC__
#ifndef WIN32
#define WIN32
#endif
#ifndef _WIN32
#define _WIN32
#endif
#define CV_DLL
#undef _CV_ALWAYS_PROFILE_
#define _CV_ALWAYS_NO_PROFILE_
#endif
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#define __BEGIN__ __CV_BEGIN__
#define __END__ __CV_END__
#define EXIT __CV_EXIT__
#ifdef HAVE_IPP
#include "ipp.h"
CV_INLINE IppiSize ippiSize(int width, int height)
{
IppiSize size = { width, height };
return size;
}
#endif
#if defined __SSE2__ || _MSC_VER >= 1300
#include "emmintrin.h"
#define CV_SSE 1
#define CV_SSE2 1
#if defined __SSE3__ || _MSC_VER >= 1400
#include "pmmintrin.h"
#define CV_SSE3 1
#endif
#else
#define CV_SSE 0
#define CV_SSE2 0
#define CV_SSE3 0
#endif
#ifndef IPPI_CALL
#define IPPI_CALL(func) CV_Assert((func) >= 0)
#endif
#ifdef HAVE_TBB
#include "tbb/tbb_stddef.h"
#if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
#include "tbb/tbb.h"
#undef min
#undef max
#else
#undef HAVE_TBB
#endif
#endif
#ifdef __cplusplus
#ifdef HAVE_TBB
namespace cv
{
typedef tbb::blocked_range<int> BlockedRange;
template<typename Body> static inline
void parallel_for( const BlockedRange& range, const Body& body )
{
tbb::parallel_for(range, body);
}
template<typename Iterator, typename Body> static inline
void parallel_do( Iterator first, Iterator last, const Body& body )
{
tbb::parallel_do(first, last, body);
}
typedef tbb::split Split;
template<typename Body> static inline
void parallel_reduce( const BlockedRange& range, Body& body )
{
tbb::parallel_reduce(range, body);
}
typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
}
#else
namespace cv
{
class BlockedRange
{
public:
BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
int begin() const { return _begin; }
int end() const { return _end; }
int grainsize() const { return _grainsize; }
protected:
int _begin, _end, _grainsize;
};
template<typename Body> static inline
void parallel_for( const BlockedRange& range, const Body& body )
{
body(range);
}
template<typename Iterator, typename Body> static inline
void parallel_do( Iterator first, Iterator last, const Body& body )
{
for( ; first != last; ++first )
body(*first);
}
class Split {};
template<typename Body> static inline
void parallel_reduce( const BlockedRange& range, Body& body )
{
body(range);
}
typedef std::vector<Rect> ConcurrentRectVector;
}
#endif
#endif
/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */
#define CV_MAX_INLINE_MAT_OP_SIZE 10
/* maximal linear size of matrix to allocate it on stack. */
#define CV_MAX_LOCAL_MAT_SIZE 32
/* maximal size of local memory storage */
#define CV_MAX_LOCAL_SIZE \
(CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double))
/* default image row align (in bytes) */
#define CV_DEFAULT_IMAGE_ROW_ALIGN 4
/* matrices are continuous by default */
#define CV_DEFAULT_MAT_ROW_ALIGN 1
/* maximum size of dynamic memory buffer.
cvAlloc reports an error if a larger block is requested. */
#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2)))
/* the alignment of all the allocated buffers */
#define CV_MALLOC_ALIGN 16
/* default alignment for dynamic data strucutures, resided in storages. */
#define CV_STRUCT_ALIGN ((int)sizeof(double))
/* default storage block size */
#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128)
/* default memory block for sparse array elements */
#define CV_SPARSE_MAT_BLOCK (1<<12)
/* initial hash table size */
#define CV_SPARSE_HASH_SIZE0 (1<<10)
/* maximal average node_count/hash_size ratio beyond which hash table is resized */
#define CV_SPARSE_HASH_RATIO 3
/* max length of strings */
#define CV_MAX_STRLEN 1024
#if 0 /*def CV_CHECK_FOR_NANS*/
#define CV_CHECK_NANS( arr ) cvCheckArray((arr))
#else
#define CV_CHECK_NANS( arr )
#endif
/****************************************************************************************\
* Common declarations *
\****************************************************************************************/
/* get alloca declaration */
#ifdef __GNUC__
#undef alloca
#define alloca __builtin_alloca
#elif defined WIN32 || defined _WIN32 || defined WIN64 || defined _WIN64 || \
defined WINCE || defined _MSC_VER || defined __BORLANDC__
#include <malloc.h>
#elif defined HAVE_ALLOCA_H
#include <alloca.h>
#elif defined HAVE_ALLOCA
#include <stdlib.h>
#else
#error "No alloca!"
#endif
#ifdef __GNUC__
#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
#elif defined _MSC_VER
#define CV_DECL_ALIGNED(x) __declspec(align(x))
#else
#define CV_DECL_ALIGNED(x)
#endif
/* ! DO NOT make it an inline function */
#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
#if defined _MSC_VER || defined __BORLANDC__
#define CV_BIG_INT(n) n##I64
#define CV_BIG_UINT(n) n##UI64
#else
#define CV_BIG_INT(n) n##LL
#define CV_BIG_UINT(n) n##ULL
#endif
#define CV_IMPL CV_EXTERN_C
#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; }
/* default step, set in case of continuous data
to work around checks for valid step in some ipp functions */
#define CV_STUB_STEP (1 << 30)
#define CV_SIZEOF_FLOAT ((int)sizeof(float))
#define CV_SIZEOF_SHORT ((int)sizeof(short))
#define CV_ORIGIN_TL 0
#define CV_ORIGIN_BL 1
/* IEEE754 constants and macros */
#define CV_POS_INF 0x7f800000
#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */
#define CV_1F 0x3f800000
#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0))
#define CV_TOGGLE_DBL(x) \
((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0))
#define CV_NOP(a) (a)
#define CV_ADD(a, b) ((a) + (b))
#define CV_SUB(a, b) ((a) - (b))
#define CV_MUL(a, b) ((a) * (b))
#define CV_AND(a, b) ((a) & (b))
#define CV_OR(a, b) ((a) | (b))
#define CV_XOR(a, b) ((a) ^ (b))
#define CV_ANDN(a, b) (~(a) & (b))
#define CV_ORN(a, b) (~(a) | (b))
#define CV_SQR(a) ((a) * (a))
#define CV_LT(a, b) ((a) < (b))
#define CV_LE(a, b) ((a) <= (b))
#define CV_EQ(a, b) ((a) == (b))
#define CV_NE(a, b) ((a) != (b))
#define CV_GT(a, b) ((a) > (b))
#define CV_GE(a, b) ((a) >= (b))
#define CV_NONZERO(a) ((a) != 0)
#define CV_NONZERO_FLT(a) (((a)+(a)) != 0)
/* general-purpose saturation macros */
#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0)
#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128)
#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0)
#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768)
#define CV_CAST_32S(t) (int)(t)
#define CV_CAST_64S(t) (int64)(t)
#define CV_CAST_32F(t) (float)(t)
#define CV_CAST_64F(t) (double)(t)
#define CV_PASTE2(a,b) a##b
#define CV_PASTE(a,b) CV_PASTE2(a,b)
#define CV_EMPTY
#define CV_MAKE_STR(a) #a
#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x)))
#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0])))
#define cvUnsupportedFormat "Unsupported format"
CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) )
{
assert( (align & (align-1)) == 0 );
return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) );
}
CV_INLINE int cvAlign( int size, int align )
{
assert( (align & (align-1)) == 0 && size < INT_MAX );
return (size + align - 1) & -align;
}
CV_INLINE CvSize cvGetMatSize( const CvMat* mat )
{
CvSize size;
size.width = mat->cols;
size.height = mat->rows;
return size;
}
#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))
#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n)))
/****************************************************************************************\
Generic implementation of QuickSort algorithm.
----------------------------------------------
Using this macro user can declare customized sort function that can be much faster
than built-in qsort function because of lower overhead on elements
comparison and exchange. The macro takes less_than (or LT) argument - a macro or function
that takes 2 arguments returns non-zero if the first argument should be before the second
one in the sorted sequence and zero otherwise.
Example:
Suppose that the task is to sort points by ascending of y coordinates and if
y's are equal x's should ascend.
The code is:
------------------------------------------------------------------------------
#define cmp_pts( pt1, pt2 ) \
((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x))
[static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts )
------------------------------------------------------------------------------
After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );"
is available to user.
aux is an additional parameter, which can be used when comparing elements.
The current implementation was derived from *BSD system qsort():
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
\****************************************************************************************/
#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \
void func_name( T *array, size_t total, user_data_type aux ) \
{ \
int isort_thresh = 7; \
T t; \
int sp = 0; \
\
struct \
{ \
T *lb; \
T *ub; \
} \
stack[48]; \
\
aux = aux; \
\
if( total <= 1 ) \
return; \
\
stack[0].lb = array; \
stack[0].ub = array + (total - 1); \
\
while( sp >= 0 ) \
{ \
T* left = stack[sp].lb; \
T* right = stack[sp--].ub; \
\
for(;;) \
{ \
int i, n = (int)(right - left) + 1, m; \
T* ptr; \
T* ptr2; \
\
if( n <= isort_thresh ) \
{ \
insert_sort: \
for( ptr = left + 1; ptr <= right; ptr++ ) \
{ \
for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \
CV_SWAP( ptr2[0], ptr2[-1], t ); \
} \
break; \
} \
else \
{ \
T* left0; \
T* left1; \
T* right0; \
T* right1; \
T* pivot; \
T* a; \
T* b; \
T* c; \
int swap_cnt = 0; \
\
left0 = left; \
right0 = right; \
pivot = left + (n/2); \
\
if( n > 40 ) \
{ \
int d = n / 8; \
a = left, b = left + d, c = left + 2*d; \
left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
: (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
\
a = pivot - d, b = pivot, c = pivot + d; \
pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
: (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
\
a = right - 2*d, b = right - d, c = right; \
right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
: (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
} \
\
a = left, b = pivot, c = right; \
pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
: (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
if( pivot != left0 ) \
{ \
CV_SWAP( *pivot, *left0, t ); \
pivot = left0; \
} \
left = left1 = left0 + 1; \
right = right1 = right0; \
\
for(;;) \
{ \
while( left <= right && !LT(*pivot, *left) ) \
{ \
if( !LT(*left, *pivot) ) \
{ \
if( left > left1 ) \
CV_SWAP( *left1, *left, t ); \
swap_cnt = 1; \
left1++; \
} \
left++; \
} \
\
while( left <= right && !LT(*right, *pivot) ) \
{ \
if( !LT(*pivot, *right) ) \
{ \
if( right < right1 ) \
CV_SWAP( *right1, *right, t ); \
swap_cnt = 1; \
right1--; \
} \
right--; \
} \
\
if( left > right ) \
break; \
CV_SWAP( *left, *right, t ); \
swap_cnt = 1; \
left++; \
right--; \
} \
\
if( swap_cnt == 0 ) \
{ \
left = left0, right = right0; \
goto insert_sort; \
} \
\
n = MIN( (int)(left1 - left0), (int)(left - left1) ); \
for( i = 0; i < n; i++ ) \
CV_SWAP( left0[i], left[i-n], t ); \
\
n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \
for( i = 0; i < n; i++ ) \
CV_SWAP( left[i], right0[i-n+1], t ); \
n = (int)(left - left1); \
m = (int)(right1 - right); \
if( n > 1 ) \
{ \
if( m > 1 ) \
{ \
if( n > m ) \
{ \
stack[++sp].lb = left0; \
stack[sp].ub = left0 + n - 1; \
left = right0 - m + 1, right = right0; \
} \
else \
{ \
stack[++sp].lb = right0 - m + 1; \
stack[sp].ub = right0; \
left = left0, right = left0 + n - 1; \
} \
} \
else \
left = left0, right = left0 + n - 1; \
} \
else if( m > 1 ) \
left = right0 - m + 1, right = right0; \
else \
break; \
} \
} \
} \
}
#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \
CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int )
/****************************************************************************************\
* Structures and macros for integration with IPP *
\****************************************************************************************/
/* IPP-compatible return codes */
typedef enum CvStatus
{
CV_BADMEMBLOCK_ERR = -113,
CV_INPLACE_NOT_SUPPORTED_ERR= -112,
CV_UNMATCHED_ROI_ERR = -111,
CV_NOTFOUND_ERR = -110,
CV_BADCONVERGENCE_ERR = -109,
CV_BADDEPTH_ERR = -107,
CV_BADROI_ERR = -106,
CV_BADHEADER_ERR = -105,
CV_UNMATCHED_FORMATS_ERR = -104,
CV_UNSUPPORTED_COI_ERR = -103,
CV_UNSUPPORTED_CHANNELS_ERR = -102,
CV_UNSUPPORTED_DEPTH_ERR = -101,
CV_UNSUPPORTED_FORMAT_ERR = -100,
CV_BADARG_ERR = -49, //ipp comp
CV_NOTDEFINED_ERR = -48, //ipp comp
CV_BADCHANNELS_ERR = -47, //ipp comp
CV_BADRANGE_ERR = -44, //ipp comp
CV_BADSTEP_ERR = -29, //ipp comp
CV_BADFLAG_ERR = -12,
CV_DIV_BY_ZERO_ERR = -11, //ipp comp
CV_BADCOEF_ERR = -10,
CV_BADFACTOR_ERR = -7,
CV_BADPOINT_ERR = -6,
CV_BADSCALE_ERR = -4,
CV_OUTOFMEM_ERR = -3,
CV_NULLPTR_ERR = -2,
CV_BADSIZE_ERR = -1,
CV_NO_ERR = 0,
CV_OK = CV_NO_ERR
}
CvStatus;
#define CV_NOTHROW throw()
typedef struct CvFuncTable
{
void* fn_2d[CV_DEPTH_MAX];
}
CvFuncTable;
typedef struct CvBigFuncTable
{
void* fn_2d[CV_DEPTH_MAX*CV_CN_MAX];
}
CvBigFuncTable;
#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \
(tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \
(tab).fn_2d[CV_8S] = 0; \
(tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \
(tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \
(tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \
(tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \
(tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,58 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright( C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort(including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
definition of the current version of OpenCV
Usefull to test in user programs
*/
#ifndef __OPENCV_VERSION_HPP__
#define __OPENCV_VERSION_HPP__
#define CV_MAJOR_VERSION 2
#define CV_MINOR_VERSION 1
#define CV_SUBMINOR_VERSION 0
#define CVAUX_STR_EXP(__A) #__A
#define CVAUX_STR(__A) CVAUX_STR_EXP(__A)
#define CV_VERSION CVAUX_STR(CV_MAJOR_VERSION) "." CVAUX_STR(CV_MINOR_VERSION) "." CVAUX_STR(CV_SUBMINOR_VERSION)
#endif

View File

@@ -0,0 +1,621 @@
///////////////////////////////////////////////////////////////////////////////
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to
// this license. If you do not agree to this license, do not download,
// install, copy or use the software.
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Google, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation or contributors may not be used to endorse
// or promote products derived from this software without specific
// prior written permission.
//
// This software is provided by the copyright holders and contributors "as is"
// and any express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular purpose
// are disclaimed. In no event shall the Intel Corporation or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
/////////////////////////////////////////////////////////////////////////////////
//
// Image class which provides a thin layer around an IplImage. The goals
// of the class design are:
// 1. All the data has explicit ownership to avoid memory leaks
// 2. No hidden allocations or copies for performance.
// 3. Easy access to OpenCV methods (which will access IPP if available)
// 4. Can easily treat external data as an image
// 5. Easy to create images which are subsets of other images
// 6. Fast pixel access which can take advantage of number of channels
// if known at compile time.
//
// The WImage class is the image class which provides the data accessors.
// The 'W' comes from the fact that it is also a wrapper around the popular
// but inconvenient IplImage class. A WImage can be constructed either using a
// WImageBuffer class which allocates and frees the data,
// or using a WImageView class which constructs a subimage or a view into
// external data. The view class does no memory management. Each class
// actually has two versions, one when the number of channels is known at
// compile time and one when it isn't. Using the one with the number of
// channels specified can provide some compile time optimizations by using the
// fact that the number of channels is a constant.
//
// We use the convention (c,r) to refer to column c and row r with (0,0) being
// the upper left corner. This is similar to standard Euclidean coordinates
// with the first coordinate varying in the horizontal direction and the second
// coordinate varying in the vertical direction.
// Thus (c,r) is usually in the domain [0, width) X [0, height)
//
// Example usage:
// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar
// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix
// vector<float> vec(10, 3.0f);
// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data
//
// im.SetZero(); // same as cvSetZero(im.Ipl())
// *im(2, 3) = 15; // Modify the element at column 2, row 3
// MySetRand(&sub_im);
//
// // Copy the second row into the first. This can be done with no memory
// // allocation and will use SSE if IPP is available.
// int w = im.Width();
// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1));
//
// // Doesn't care about source of data since using WImage
// void MySetRand(WImage_b* im) { // Works with any number of channels
// for (int r = 0; r < im->Height(); ++r) {
// float* row = im->Row(r);
// for (int c = 0; c < im->Width(); ++c) {
// for (int ch = 0; ch < im->Channels(); ++ch, ++row) {
// *row = uchar(rand() & 255);
// }
// }
// }
// }
//
// Functions that are not part of the basic image allocation, viewing, and
// access should come from OpenCV, except some useful functions that are not
// part of OpenCV can be found in wimage_util.h
#ifndef __OPENCV_CORE_WIMAGE_HPP__
#define __OPENCV_CORE_WIMAGE_HPP__
#include "opencv2/core/core_c.h"
#ifdef __cplusplus
namespace cv {
template <typename T> class WImage;
template <typename T> class WImageBuffer;
template <typename T> class WImageView;
template<typename T, int C> class WImageC;
template<typename T, int C> class WImageBufferC;
template<typename T, int C> class WImageViewC;
// Commonly used typedefs.
typedef WImage<uchar> WImage_b;
typedef WImageView<uchar> WImageView_b;
typedef WImageBuffer<uchar> WImageBuffer_b;
typedef WImageC<uchar, 1> WImage1_b;
typedef WImageViewC<uchar, 1> WImageView1_b;
typedef WImageBufferC<uchar, 1> WImageBuffer1_b;
typedef WImageC<uchar, 3> WImage3_b;
typedef WImageViewC<uchar, 3> WImageView3_b;
typedef WImageBufferC<uchar, 3> WImageBuffer3_b;
typedef WImage<float> WImage_f;
typedef WImageView<float> WImageView_f;
typedef WImageBuffer<float> WImageBuffer_f;
typedef WImageC<float, 1> WImage1_f;
typedef WImageViewC<float, 1> WImageView1_f;
typedef WImageBufferC<float, 1> WImageBuffer1_f;
typedef WImageC<float, 3> WImage3_f;
typedef WImageViewC<float, 3> WImageView3_f;
typedef WImageBufferC<float, 3> WImageBuffer3_f;
// There isn't a standard for signed and unsigned short so be more
// explicit in the typename for these cases.
typedef WImage<short> WImage_16s;
typedef WImageView<short> WImageView_16s;
typedef WImageBuffer<short> WImageBuffer_16s;
typedef WImageC<short, 1> WImage1_16s;
typedef WImageViewC<short, 1> WImageView1_16s;
typedef WImageBufferC<short, 1> WImageBuffer1_16s;
typedef WImageC<short, 3> WImage3_16s;
typedef WImageViewC<short, 3> WImageView3_16s;
typedef WImageBufferC<short, 3> WImageBuffer3_16s;
typedef WImage<ushort> WImage_16u;
typedef WImageView<ushort> WImageView_16u;
typedef WImageBuffer<ushort> WImageBuffer_16u;
typedef WImageC<ushort, 1> WImage1_16u;
typedef WImageViewC<ushort, 1> WImageView1_16u;
typedef WImageBufferC<ushort, 1> WImageBuffer1_16u;
typedef WImageC<ushort, 3> WImage3_16u;
typedef WImageViewC<ushort, 3> WImageView3_16u;
typedef WImageBufferC<ushort, 3> WImageBuffer3_16u;
//
// WImage definitions
//
// This WImage class gives access to the data it refers to. It can be
// constructed either by allocating the data with a WImageBuffer class or
// using the WImageView class to refer to a subimage or outside data.
template<typename T>
class WImage
{
public:
typedef T BaseType;
// WImage is an abstract class with no other virtual methods so make the
// destructor virtual.
virtual ~WImage() = 0;
// Accessors
IplImage* Ipl() {return image_; }
const IplImage* Ipl() const {return image_; }
T* ImageData() { return reinterpret_cast<T*>(image_->imageData); }
const T* ImageData() const {
return reinterpret_cast<const T*>(image_->imageData);
}
int Width() const {return image_->width; }
int Height() const {return image_->height; }
// WidthStep is the number of bytes to go to the pixel with the next y coord
int WidthStep() const {return image_->widthStep; }
int Channels() const {return image_->nChannels; }
int ChannelSize() const {return sizeof(T); } // number of bytes per channel
// Number of bytes per pixel
int PixelSize() const {return Channels() * ChannelSize(); }
// Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number
// of bits per channel and with the signed bit set.
// This is known at compile time using specializations.
int Depth() const;
inline const T* Row(int r) const {
return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep);
}
inline T* Row(int r) {
return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep);
}
// Pixel accessors which returns a pointer to the start of the channel
inline T* operator() (int c, int r) {
return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep) +
c*Channels();
}
inline const T* operator() (int c, int r) const {
return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep) +
c*Channels();
}
// Copy the contents from another image which is just a convenience to cvCopy
void CopyFrom(const WImage<T>& src) { cvCopy(src.Ipl(), image_); }
// Set contents to zero which is just a convenient to cvSetZero
void SetZero() { cvSetZero(image_); }
// Construct a view into a region of this image
WImageView<T> View(int c, int r, int width, int height);
protected:
// Disallow copy and assignment
WImage(const WImage&);
void operator=(const WImage&);
explicit WImage(IplImage* img) : image_(img) {
assert(!img || img->depth == Depth());
}
void SetIpl(IplImage* image) {
assert(!image || image->depth == Depth());
image_ = image;
}
IplImage* image_;
};
// Image class when both the pixel type and number of channels
// are known at compile time. This wrapper will speed up some of the operations
// like accessing individual pixels using the () operator.
template<typename T, int C>
class WImageC : public WImage<T>
{
public:
typedef typename WImage<T>::BaseType BaseType;
enum { kChannels = C };
explicit WImageC(IplImage* img) : WImage<T>(img) {
assert(!img || img->nChannels == Channels());
}
// Construct a view into a region of this image
WImageViewC<T, C> View(int c, int r, int width, int height);
// Copy the contents from another image which is just a convenience to cvCopy
void CopyFrom(const WImageC<T, C>& src) {
cvCopy(src.Ipl(), WImage<T>::image_);
}
// WImageC is an abstract class with no other virtual methods so make the
// destructor virtual.
virtual ~WImageC() = 0;
int Channels() const {return C; }
protected:
// Disallow copy and assignment
WImageC(const WImageC&);
void operator=(const WImageC&);
void SetIpl(IplImage* image) {
assert(!image || image->depth == WImage<T>::Depth());
WImage<T>::SetIpl(image);
}
};
//
// WImageBuffer definitions
//
// Image class which owns the data, so it can be allocated and is always
// freed. It cannot be copied but can be explicity cloned.
//
template<typename T>
class WImageBuffer : public WImage<T>
{
public:
typedef typename WImage<T>::BaseType BaseType;
// Default constructor which creates an object that can be
WImageBuffer() : WImage<T>(0) {}
WImageBuffer(int width, int height, int nchannels) : WImage<T>(0) {
Allocate(width, height, nchannels);
}
// Constructor which takes ownership of a given IplImage so releases
// the image on destruction.
explicit WImageBuffer(IplImage* img) : WImage<T>(img) {}
// Allocate an image. Does nothing if current size is the same as
// the new size.
void Allocate(int width, int height, int nchannels);
// Set the data to point to an image, releasing the old data
void SetIpl(IplImage* img) {
ReleaseImage();
WImage<T>::SetIpl(img);
}
// Clone an image which reallocates the image if of a different dimension.
void CloneFrom(const WImage<T>& src) {
Allocate(src.Width(), src.Height(), src.Channels());
CopyFrom(src);
}
~WImageBuffer() {
ReleaseImage();
}
// Release the image if it isn't null.
void ReleaseImage() {
if (WImage<T>::image_) {
IplImage* image = WImage<T>::image_;
cvReleaseImage(&image);
WImage<T>::SetIpl(0);
}
}
bool IsNull() const {return WImage<T>::image_ == NULL; }
private:
// Disallow copy and assignment
WImageBuffer(const WImageBuffer&);
void operator=(const WImageBuffer&);
};
// Like a WImageBuffer class but when the number of channels is known
// at compile time.
template<typename T, int C>
class WImageBufferC : public WImageC<T, C>
{
public:
typedef typename WImage<T>::BaseType BaseType;
enum { kChannels = C };
// Default constructor which creates an object that can be
WImageBufferC() : WImageC<T, C>(0) {}
WImageBufferC(int width, int height) : WImageC<T, C>(0) {
Allocate(width, height);
}
// Constructor which takes ownership of a given IplImage so releases
// the image on destruction.
explicit WImageBufferC(IplImage* img) : WImageC<T, C>(img) {}
// Allocate an image. Does nothing if current size is the same as
// the new size.
void Allocate(int width, int height);
// Set the data to point to an image, releasing the old data
void SetIpl(IplImage* img) {
ReleaseImage();
WImageC<T, C>::SetIpl(img);
}
// Clone an image which reallocates the image if of a different dimension.
void CloneFrom(const WImageC<T, C>& src) {
Allocate(src.Width(), src.Height());
CopyFrom(src);
}
~WImageBufferC() {
ReleaseImage();
}
// Release the image if it isn't null.
void ReleaseImage() {
if (WImage<T>::image_) {
IplImage* image = WImage<T>::image_;
cvReleaseImage(&image);
WImageC<T, C>::SetIpl(0);
}
}
bool IsNull() const {return WImage<T>::image_ == NULL; }
private:
// Disallow copy and assignment
WImageBufferC(const WImageBufferC&);
void operator=(const WImageBufferC&);
};
//
// WImageView definitions
//
// View into an image class which allows treating a subimage as an image
// or treating external data as an image
//
template<typename T>
class WImageView : public WImage<T>
{
public:
typedef typename WImage<T>::BaseType BaseType;
// Construct a subimage. No checks are done that the subimage lies
// completely inside the original image.
WImageView(WImage<T>* img, int c, int r, int width, int height);
// Refer to external data.
// If not given width_step assumed to be same as width.
WImageView(T* data, int width, int height, int channels, int width_step = -1);
// Refer to external data. This does NOT take ownership
// of the supplied IplImage.
WImageView(IplImage* img) : WImage<T>(img) {}
// Copy constructor
WImageView(const WImage<T>& img) : WImage<T>(0) {
header_ = *(img.Ipl());
WImage<T>::SetIpl(&header_);
}
WImageView& operator=(const WImage<T>& img) {
header_ = *(img.Ipl());
WImage<T>::SetIpl(&header_);
return *this;
}
protected:
IplImage header_;
};
template<typename T, int C>
class WImageViewC : public WImageC<T, C>
{
public:
typedef typename WImage<T>::BaseType BaseType;
enum { kChannels = C };
// Default constructor needed for vectors of views.
WImageViewC();
virtual ~WImageViewC() {}
// Construct a subimage. No checks are done that the subimage lies
// completely inside the original image.
WImageViewC(WImageC<T, C>* img,
int c, int r, int width, int height);
// Refer to external data
WImageViewC(T* data, int width, int height, int width_step = -1);
// Refer to external data. This does NOT take ownership
// of the supplied IplImage.
WImageViewC(IplImage* img) : WImageC<T, C>(img) {}
// Copy constructor which does a shallow copy to allow multiple views
// of same data. gcc-4.1.1 gets confused if both versions of
// the constructor and assignment operator are not provided.
WImageViewC(const WImageC<T, C>& img) : WImageC<T, C>(0) {
header_ = *(img.Ipl());
WImageC<T, C>::SetIpl(&header_);
}
WImageViewC(const WImageViewC<T, C>& img) : WImageC<T, C>(0) {
header_ = *(img.Ipl());
WImageC<T, C>::SetIpl(&header_);
}
WImageViewC& operator=(const WImageC<T, C>& img) {
header_ = *(img.Ipl());
WImageC<T, C>::SetIpl(&header_);
return *this;
}
WImageViewC& operator=(const WImageViewC<T, C>& img) {
header_ = *(img.Ipl());
WImageC<T, C>::SetIpl(&header_);
return *this;
}
protected:
IplImage header_;
};
// Specializations for depth
template<>
inline int WImage<uchar>::Depth() const {return IPL_DEPTH_8U; }
template<>
inline int WImage<signed char>::Depth() const {return IPL_DEPTH_8S; }
template<>
inline int WImage<short>::Depth() const {return IPL_DEPTH_16S; }
template<>
inline int WImage<ushort>::Depth() const {return IPL_DEPTH_16U; }
template<>
inline int WImage<int>::Depth() const {return IPL_DEPTH_32S; }
template<>
inline int WImage<float>::Depth() const {return IPL_DEPTH_32F; }
template<>
inline int WImage<double>::Depth() const {return IPL_DEPTH_64F; }
//
// Pure virtual destructors still need to be defined.
//
template<typename T> inline WImage<T>::~WImage() {}
template<typename T, int C> inline WImageC<T, C>::~WImageC() {}
//
// Allocate ImageData
//
template<typename T>
inline void WImageBuffer<T>::Allocate(int width, int height, int nchannels)
{
if (IsNull() || WImage<T>::Width() != width ||
WImage<T>::Height() != height || WImage<T>::Channels() != nchannels) {
ReleaseImage();
WImage<T>::image_ = cvCreateImage(cvSize(width, height),
WImage<T>::Depth(), nchannels);
}
}
template<typename T, int C>
inline void WImageBufferC<T, C>::Allocate(int width, int height)
{
if (IsNull() || WImage<T>::Width() != width || WImage<T>::Height() != height) {
ReleaseImage();
WImageC<T, C>::SetIpl(cvCreateImage(cvSize(width, height),WImage<T>::Depth(), C));
}
}
//
// ImageView methods
//
template<typename T>
WImageView<T>::WImageView(WImage<T>* img, int c, int r, int width, int height)
: WImage<T>(0)
{
header_ = *(img->Ipl());
header_.imageData = reinterpret_cast<char*>((*img)(c, r));
header_.width = width;
header_.height = height;
WImage<T>::SetIpl(&header_);
}
template<typename T>
WImageView<T>::WImageView(T* data, int width, int height, int nchannels, int width_step)
: WImage<T>(0)
{
cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), nchannels);
header_.imageData = reinterpret_cast<char*>(data);
if (width_step > 0) {
header_.widthStep = width_step;
}
WImage<T>::SetIpl(&header_);
}
template<typename T, int C>
WImageViewC<T, C>::WImageViewC(WImageC<T, C>* img, int c, int r, int width, int height)
: WImageC<T, C>(0)
{
header_ = *(img->Ipl());
header_.imageData = reinterpret_cast<char*>((*img)(c, r));
header_.width = width;
header_.height = height;
WImageC<T, C>::SetIpl(&header_);
}
template<typename T, int C>
WImageViewC<T, C>::WImageViewC() : WImageC<T, C>(0) {
cvInitImageHeader(&header_, cvSize(0, 0), WImage<T>::Depth(), C);
header_.imageData = reinterpret_cast<char*>(0);
WImageC<T, C>::SetIpl(&header_);
}
template<typename T, int C>
WImageViewC<T, C>::WImageViewC(T* data, int width, int height, int width_step)
: WImageC<T, C>(0)
{
cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), C);
header_.imageData = reinterpret_cast<char*>(data);
if (width_step > 0) {
header_.widthStep = width_step;
}
WImageC<T, C>::SetIpl(&header_);
}
// Construct a view into a region of an image
template<typename T>
WImageView<T> WImage<T>::View(int c, int r, int width, int height) {
return WImageView<T>(this, c, r, width, height);
}
template<typename T, int C>
WImageViewC<T, C> WImageC<T, C>::View(int c, int r, int width, int height) {
return WImageViewC<T, C>(this, c, r, width, height);
}
} // end of namespace
#endif // __cplusplus
#endif

697
modules/core/src/alloc.cpp Normal file
View File

@@ -0,0 +1,697 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#define CV_USE_SYSTEM_MALLOC 1
namespace cv
{
static void* OutOfMemoryError(size_t size)
{
CV_Error_(CV_StsNoMem, ("Failed to allocate %lu bytes", (unsigned long)size));
return 0;
}
#if CV_USE_SYSTEM_MALLOC
void deleteThreadAllocData() {}
void* fastMalloc( size_t size )
{
uchar* udata = (uchar*)malloc(size + sizeof(void*) + CV_MALLOC_ALIGN);
if(!udata)
return OutOfMemoryError(size);
uchar** adata = alignPtr((uchar**)udata + 1, CV_MALLOC_ALIGN);
adata[-1] = udata;
return adata;
}
void fastFree(void* ptr)
{
if(ptr)
{
uchar* udata = ((uchar**)ptr)[-1];
CV_DbgAssert(udata < (uchar*)ptr &&
((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN));
free(udata);
}
}
#else
#if 0
#define SANITY_CHECK(block) \
CV_Assert(((size_t)(block) & (MEM_BLOCK_SIZE-1)) == 0 && \
(unsigned)(block)->binIdx <= (unsigned)MAX_BIN && \
(block)->signature == MEM_BLOCK_SIGNATURE)
#else
#define SANITY_CHECK(block)
#endif
#define STAT(stmt)
#ifdef WIN32
struct CriticalSection
{
CriticalSection() { InitializeCriticalSection(&cs); }
~CriticalSection() { DeleteCriticalSection(&cs); }
void lock() { EnterCriticalSection(&cs); }
void unlock() { LeaveCriticalSection(&cs); }
bool trylock() { return TryEnterCriticalSection(&cs) != 0; }
CRITICAL_SECTION cs;
};
void* SystemAlloc(size_t size)
{
void* ptr = malloc(size);
return ptr ? ptr : OutOfMemoryError(size);
}
void SystemFree(void* ptr, size_t)
{
free(ptr);
}
#else
struct CriticalSection
{
CriticalSection() { pthread_mutex_init(&mutex, 0); }
~CriticalSection() { pthread_mutex_destroy(&mutex); }
void lock() { pthread_mutex_lock(&mutex); }
void unlock() { pthread_mutex_unlock(&mutex); }
bool trylock() { return pthread_mutex_trylock(&mutex) == 0; }
pthread_mutex_t mutex;
};
void* SystemAlloc(size_t size)
{
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
void* ptr = 0;
ptr = mmap(ptr, size, (PROT_READ | PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
return ptr != MAP_FAILED ? ptr : OutOfMemoryError(size);
}
void SystemFree(void* ptr, size_t size)
{
munmap(ptr, size);
}
#endif
struct AutoLock
{
AutoLock(CriticalSection& _cs) : cs(&_cs) { cs->lock(); }
~AutoLock() { cs->unlock(); }
CriticalSection* cs;
};
const size_t MEM_BLOCK_SIGNATURE = 0x01234567;
const int MEM_BLOCK_SHIFT = 14;
const size_t MEM_BLOCK_SIZE = 1 << MEM_BLOCK_SHIFT;
const size_t HDR_SIZE = 128;
const size_t MAX_BLOCK_SIZE = MEM_BLOCK_SIZE - HDR_SIZE;
const int MAX_BIN = 28;
static const int binSizeTab[MAX_BIN+1] =
{ 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 128, 160, 192, 256, 320, 384, 480, 544, 672, 768,
896, 1056, 1328, 1600, 2688, 4048, 5408, 8128, 16256 };
struct MallocTables
{
void initBinTab()
{
int i, j = 0, n;
for( i = 0; i <= MAX_BIN; i++ )
{
n = binSizeTab[i]>>3;
for( ; j <= n; j++ )
binIdx[j] = (uchar)i;
}
}
int bin(size_t size)
{
assert( size <= MAX_BLOCK_SIZE );
return binIdx[(size + 7)>>3];
}
MallocTables()
{
initBinTab();
}
uchar binIdx[MAX_BLOCK_SIZE/8+1];
};
MallocTables mallocTables;
struct Node
{
Node* next;
};
struct ThreadData;
struct Block
{
Block(Block* _next)
{
signature = MEM_BLOCK_SIGNATURE;
prev = 0;
next = _next;
privateFreeList = publicFreeList = 0;
bumpPtr = endPtr = 0;
objSize = 0;
threadData = 0;
data = (uchar*)this + HDR_SIZE;
}
~Block() {}
void init(Block* _prev, Block* _next, int _objSize, ThreadData* _threadData)
{
prev = _prev;
if(prev)
prev->next = this;
next = _next;
if(next)
next->prev = this;
objSize = _objSize;
binIdx = mallocTables.bin(objSize);
threadData = _threadData;
privateFreeList = publicFreeList = 0;
bumpPtr = data;
int nobjects = MAX_BLOCK_SIZE/objSize;
endPtr = bumpPtr + nobjects*objSize;
almostEmptyThreshold = (nobjects + 1)/2;
allocated = 0;
}
bool isFilled() const { return allocated > almostEmptyThreshold; }
size_t signature;
Block* prev;
Block* next;
Node* privateFreeList;
Node* publicFreeList;
uchar* bumpPtr;
uchar* endPtr;
uchar* data;
ThreadData* threadData;
int objSize;
int binIdx;
int allocated;
int almostEmptyThreshold;
CriticalSection cs;
};
struct BigBlock
{
BigBlock(int bigBlockSize, BigBlock* _next)
{
first = alignPtr((Block*)(this+1), MEM_BLOCK_SIZE);
next = _next;
nblocks = (int)(((char*)this + bigBlockSize - (char*)first)/MEM_BLOCK_SIZE);
Block* p = 0;
for( int i = nblocks-1; i >= 0; i-- )
p = ::new((uchar*)first + i*MEM_BLOCK_SIZE) Block(p);
}
~BigBlock()
{
for( int i = nblocks-1; i >= 0; i-- )
((Block*)((uchar*)first+i*MEM_BLOCK_SIZE))->~Block();
}
BigBlock* next;
Block* first;
int nblocks;
};
struct BlockPool
{
BlockPool(int _bigBlockSize=1<<20) : pool(0), bigBlockSize(_bigBlockSize)
{
}
~BlockPool()
{
AutoLock lock(cs);
while( pool )
{
BigBlock* nextBlock = pool->next;
pool->~BigBlock();
SystemFree(pool, bigBlockSize);
pool = nextBlock;
}
}
Block* alloc()
{
AutoLock lock(cs);
Block* block;
if( !freeBlocks )
{
BigBlock* bblock = ::new(SystemAlloc(bigBlockSize)) BigBlock(bigBlockSize, pool);
assert( bblock != 0 );
freeBlocks = bblock->first;
pool = bblock;
}
block = freeBlocks;
freeBlocks = freeBlocks->next;
if( freeBlocks )
freeBlocks->prev = 0;
STAT(stat.bruttoBytes += MEM_BLOCK_SIZE);
return block;
}
void free(Block* block)
{
AutoLock lock(cs);
block->prev = 0;
block->next = freeBlocks;
freeBlocks = block;
STAT(stat.bruttoBytes -= MEM_BLOCK_SIZE);
}
CriticalSection cs;
Block* freeBlocks;
BigBlock* pool;
int bigBlockSize;
int blocksPerBigBlock;
};
BlockPool mallocPool;
enum { START=0, FREE=1, GC=2 };
struct ThreadData
{
ThreadData() { for(int i = 0; i <= MAX_BIN; i++) bins[i][START] = bins[i][FREE] = bins[i][GC] = 0; }
~ThreadData()
{
// mark all the thread blocks as abandoned or even release them
for( int i = 0; i <= MAX_BIN; i++ )
{
Block *bin = bins[i][START], *block = bin;
bins[i][START] = bins[i][FREE] = bins[i][GC] = 0;
if( block )
{
do
{
Block* next = block->next;
int allocated = block->allocated;
{
AutoLock lock(block->cs);
block->next = block->prev = 0;
block->threadData = 0;
Node *node = block->publicFreeList;
for( ; node != 0; node = node->next )
allocated--;
}
if( allocated == 0 )
mallocPool.free(block);
block = next;
}
while( block != bin );
}
}
}
void moveBlockToFreeList( Block* block )
{
int i = block->binIdx;
Block*& freePtr = bins[i][FREE];
CV_DbgAssert( block->next->prev == block && block->prev->next == block );
if( block != freePtr )
{
Block*& gcPtr = bins[i][GC];
if( gcPtr == block )
gcPtr = block->next;
if( block->next != block )
{
block->prev->next = block->next;
block->next->prev = block->prev;
}
block->next = freePtr->next;
block->prev = freePtr;
freePtr = block->next->prev = block->prev->next = block;
}
}
Block* bins[MAX_BIN+1][3];
#ifdef WIN32
#ifdef WINCE
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif
static DWORD tlsKey;
static ThreadData* get()
{
ThreadData* data;
if( tlsKey == TLS_OUT_OF_INDEXES )
tlsKey = TlsAlloc();
data = (ThreadData*)TlsGetValue(tlsKey);
if( !data )
{
data = new ThreadData;
TlsSetValue(tlsKey, data);
}
return data;
}
#else
static void deleteData(void* data)
{
delete (ThreadData*)data;
}
static pthread_key_t tlsKey;
static ThreadData* get()
{
ThreadData* data;
if( !tlsKey )
pthread_key_create(&tlsKey, deleteData);
data = (ThreadData*)pthread_getspecific(tlsKey);
if( !data )
{
data = new ThreadData;
pthread_setspecific(tlsKey, data);
}
return data;
}
#endif
};
#ifdef WIN32
DWORD ThreadData::tlsKey = TLS_OUT_OF_INDEXES;
void deleteThreadAllocData()
{
if( ThreadData::tlsKey != TLS_OUT_OF_INDEXES )
delete (ThreadData*)TlsGetValue( ThreadData::tlsKey );
}
#else
pthread_key_t ThreadData::tlsKey = 0;
#endif
#if 0
static void checkList(ThreadData* tls, int idx)
{
Block* block = tls->bins[idx][START];
if( !block )
{
CV_DbgAssert( tls->bins[idx][FREE] == 0 && tls->bins[idx][GC] == 0 );
}
else
{
bool gcInside = false;
bool freeInside = false;
do
{
if( tls->bins[idx][FREE] == block )
freeInside = true;
if( tls->bins[idx][GC] == block )
gcInside = true;
block = block->next;
}
while( block != tls->bins[idx][START] );
CV_DbgAssert( gcInside && freeInside );
}
}
#else
#define checkList(tls, idx)
#endif
void* fastMalloc( size_t size )
{
if( size > MAX_BLOCK_SIZE )
{
size_t size1 = size + sizeof(uchar*)*2 + MEM_BLOCK_SIZE;
uchar* udata = (uchar*)SystemAlloc(size1);
uchar** adata = alignPtr((uchar**)udata + 2, MEM_BLOCK_SIZE);
adata[-1] = udata;
adata[-2] = (uchar*)size1;
return adata;
}
{
ThreadData* tls = ThreadData::get();
int idx = mallocTables.bin(size);
Block*& startPtr = tls->bins[idx][START];
Block*& gcPtr = tls->bins[idx][GC];
Block*& freePtr = tls->bins[idx][FREE], *block = freePtr;
checkList(tls, idx);
size = binSizeTab[idx];
STAT(
stat.nettoBytes += size;
stat.mallocCalls++;
);
uchar* data = 0;
for(;;)
{
if( block )
{
// try to find non-full block
for(;;)
{
CV_DbgAssert( block->next->prev == block && block->prev->next == block );
if( block->bumpPtr )
{
data = block->bumpPtr;
if( (block->bumpPtr += size) >= block->endPtr )
block->bumpPtr = 0;
break;
}
if( block->privateFreeList )
{
data = (uchar*)block->privateFreeList;
block->privateFreeList = block->privateFreeList->next;
break;
}
if( block == startPtr )
break;
block = block->next;
}
#if 0
avg_k += _k;
avg_nk++;
if( avg_nk == 1000 )
{
printf("avg search iters per 1e3 allocs = %g\n", (double)avg_k/avg_nk );
avg_k = avg_nk = 0;
}
#endif
freePtr = block;
if( !data )
{
block = gcPtr;
for( int k = 0; k < 2; k++ )
{
SANITY_CHECK(block);
CV_DbgAssert( block->next->prev == block && block->prev->next == block );
if( block->publicFreeList )
{
{
AutoLock lock(block->cs);
block->privateFreeList = block->publicFreeList;
block->publicFreeList = 0;
}
Node* node = block->privateFreeList;
for(;node != 0; node = node->next)
--block->allocated;
data = (uchar*)block->privateFreeList;
block->privateFreeList = block->privateFreeList->next;
gcPtr = block->next;
if( block->allocated+1 <= block->almostEmptyThreshold )
tls->moveBlockToFreeList(block);
break;
}
block = block->next;
}
if( !data )
gcPtr = block;
}
}
if( data )
break;
block = mallocPool.alloc();
block->init(startPtr ? startPtr->prev : block, startPtr ? startPtr : block, (int)size, tls);
if( !startPtr )
startPtr = gcPtr = freePtr = block;
checkList(tls, block->binIdx);
SANITY_CHECK(block);
}
++block->allocated;
return data;
}
}
void fastFree( void* ptr )
{
if( ((size_t)ptr & (MEM_BLOCK_SIZE-1)) == 0 )
{
if( ptr != 0 )
{
void* origPtr = ((void**)ptr)[-1];
size_t sz = (size_t)((void**)ptr)[-2];
SystemFree( origPtr, sz );
}
return;
}
{
ThreadData* tls = ThreadData::get();
Node* node = (Node*)ptr;
Block* block = (Block*)((size_t)ptr & -(int)MEM_BLOCK_SIZE);
assert( block->signature == MEM_BLOCK_SIGNATURE );
if( block->threadData == tls )
{
STAT(
stat.nettoBytes -= block->objSize;
stat.freeCalls++;
float ratio = (float)stat.nettoBytes/stat.bruttoBytes;
if( stat.minUsageRatio > ratio )
stat.minUsageRatio = ratio;
);
SANITY_CHECK(block);
bool prevFilled = block->isFilled();
--block->allocated;
if( !block->isFilled() && (block->allocated == 0 || prevFilled) )
{
if( block->allocated == 0 )
{
int idx = block->binIdx;
Block*& startPtr = tls->bins[idx][START];
Block*& freePtr = tls->bins[idx][FREE];
Block*& gcPtr = tls->bins[idx][GC];
if( block == block->next )
{
CV_DbgAssert( startPtr == block && freePtr == block && gcPtr == block );
startPtr = freePtr = gcPtr = 0;
}
else
{
if( freePtr == block )
freePtr = block->next;
if( gcPtr == block )
gcPtr = block->next;
if( startPtr == block )
startPtr = block->next;
block->prev->next = block->next;
block->next->prev = block->prev;
}
mallocPool.free(block);
checkList(tls, idx);
return;
}
tls->moveBlockToFreeList(block);
}
node->next = block->privateFreeList;
block->privateFreeList = node;
}
else
{
AutoLock lock(block->cs);
SANITY_CHECK(block);
node->next = block->publicFreeList;
block->publicFreeList = node;
if( block->threadData == 0 )
{
// take ownership of the abandoned block.
// note that it can happen at the same time as
// ThreadData::deleteData() marks the blocks as abandoned,
// so this part of the algorithm needs to be checked for data races
int idx = block->binIdx;
block->threadData = tls;
Block*& startPtr = tls->bins[idx][START];
if( startPtr )
{
block->next = startPtr;
block->prev = startPtr->prev;
block->next->prev = block->prev->next = block;
}
else
startPtr = tls->bins[idx][FREE] = tls->bins[idx][GC] = block;
}
}
}
}
#endif
}
CV_IMPL void cvSetMemoryManager( CvAllocFunc, CvFreeFunc, void * )
{
CV_Error( -1, "Custom memory allocator is not supported" );
}
CV_IMPL void* cvAlloc( size_t size )
{
return cv::fastMalloc( size );
}
CV_IMPL void cvFree_( void* ptr )
{
cv::fastFree( ptr );
}
/* End of file. */

1722
modules/core/src/arithm.cpp Normal file

File diff suppressed because it is too large Load Diff

3200
modules/core/src/array.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,980 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
namespace cv
{
/****************************************************************************************\
* split *
\****************************************************************************************/
template<typename T> static void
splitC2_( const Mat& srcmat, Mat* dstmat )
{
Size size = getContinuousSize( srcmat, dstmat[0], dstmat[1] );
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcmat.step*y);
T* dst0 = (T*)(dstmat[0].data + dstmat[0].step*y);
T* dst1 = (T*)(dstmat[1].data + dstmat[1].step*y);
for( int x = 0; x < size.width; x++ )
{
T t0 = src[x*2], t1 = src[x*2+1];
dst0[x] = t0; dst1[x] = t1;
}
}
}
template<typename T> static void
splitC3_( const Mat& srcmat, Mat* dstmat )
{
Size size = getContinuousSize( srcmat, dstmat[0], dstmat[1], dstmat[2] );
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcmat.step*y);
T* dst0 = (T*)(dstmat[0].data + dstmat[0].step*y);
T* dst1 = (T*)(dstmat[1].data + dstmat[1].step*y);
T* dst2 = (T*)(dstmat[2].data + dstmat[2].step*y);
for( int x = 0; x < size.width; x++ )
{
T t0 = src[x*3], t1 = src[x*3+1], t2 = src[x*3+2];
dst0[x] = t0; dst1[x] = t1; dst2[x] = t2;
}
}
}
template<typename T> static void
splitC4_( const Mat& srcmat, Mat* dstmat )
{
Size size = getContinuousSize( srcmat, dstmat[0], dstmat[1], dstmat[2], dstmat[3] );
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcmat.step*y);
T* dst0 = (T*)(dstmat[0].data + dstmat[0].step*y);
T* dst1 = (T*)(dstmat[1].data + dstmat[1].step*y);
T* dst2 = (T*)(dstmat[2].data + dstmat[2].step*y);
T* dst3 = (T*)(dstmat[3].data + dstmat[3].step*y);
for( int x = 0; x < size.width; x++ )
{
T t0 = src[x*4], t1 = src[x*4+1];
dst0[x] = t0; dst1[x] = t1;
t0 = src[x*4+2]; t1 = src[x*4+3];
dst2[x] = t0; dst3[x] = t1;
}
}
}
typedef void (*SplitFunc)(const Mat& src, Mat* dst);
void split(const Mat& src, Mat* mv)
{
static SplitFunc tab[] =
{
splitC2_<uchar>, splitC2_<ushort>, splitC2_<int>, 0, splitC2_<int64>,
splitC3_<uchar>, splitC3_<ushort>, splitC3_<int>, 0, splitC3_<int64>,
splitC4_<uchar>, splitC4_<ushort>, splitC4_<int>, 0, splitC4_<int64>
};
int i, depth = src.depth(), cn = src.channels();
Size size = src.size();
if( cn == 1 )
{
src.copyTo(mv[0]);
return;
}
for( i = 0; i < cn; i++ )
mv[i].create(src.size(), depth);
if( cn <= 4 )
{
SplitFunc func = tab[(cn-2)*5 + (src.elemSize1()>>1)];
CV_Assert( func != 0 );
func( src, mv );
}
else
{
vector<int> pairs(cn*2);
for( i = 0; i < cn; i++ )
{
pairs[i*2] = i;
pairs[i*2+1] = 0;
}
mixChannels( &src, 1, mv, cn, &pairs[0], cn );
}
}
/****************************************************************************************\
* merge *
\****************************************************************************************/
// input vector is made non-const to make sure that we do not copy Mat on each access
template<typename T> static void
mergeC2_( const Mat* srcmat, Mat& dstmat )
{
Size size = getContinuousSize( srcmat[0], srcmat[1], dstmat );
for( int y = 0; y < size.height; y++ )
{
const T* src0 = (const T*)(srcmat[0].data + srcmat[0].step*y);
const T* src1 = (const T*)(srcmat[1].data + srcmat[1].step*y);
T* dst = (T*)(dstmat.data + dstmat.step*y);
for( int x = 0; x < size.width; x++ )
{
T t0 = src0[x], t1 = src1[x];
dst[x*2] = t0; dst[x*2+1] = t1;
}
}
}
template<typename T> static void
mergeC3_( const Mat* srcmat, Mat& dstmat )
{
Size size = getContinuousSize( srcmat[0], srcmat[1], srcmat[2], dstmat );
for( int y = 0; y < size.height; y++ )
{
const T* src0 = (const T*)(srcmat[0].data + srcmat[0].step*y);
const T* src1 = (const T*)(srcmat[1].data + srcmat[1].step*y);
const T* src2 = (const T*)(srcmat[2].data + srcmat[2].step*y);
T* dst = (T*)(dstmat.data + dstmat.step*y);
for( int x = 0; x < size.width; x++ )
{
T t0 = src0[x], t1 = src1[x], t2 = src2[x];
dst[x*3] = t0; dst[x*3+1] = t1; dst[x*3+2] = t2;
}
}
}
template<typename T> static void
mergeC4_( const Mat* srcmat, Mat& dstmat )
{
Size size = getContinuousSize( srcmat[0], srcmat[1], srcmat[2], srcmat[3], dstmat );
for( int y = 0; y < size.height; y++ )
{
const T* src0 = (const T*)(srcmat[0].data + srcmat[0].step*y);
const T* src1 = (const T*)(srcmat[1].data + srcmat[1].step*y);
const T* src2 = (const T*)(srcmat[2].data + srcmat[2].step*y);
const T* src3 = (const T*)(srcmat[3].data + srcmat[3].step*y);
T* dst = (T*)(dstmat.data + dstmat.step*y);
for( int x = 0; x < size.width; x++ )
{
T t0 = src0[x], t1 = src1[x];
dst[x*4] = t0; dst[x*4+1] = t1;
t0 = src2[x]; t1 = src3[x];
dst[x*4+2] = t0; dst[x*4+3] = t1;
}
}
}
typedef void (*MergeFunc)(const Mat* src, Mat& dst);
void merge(const Mat* mv, size_t n, Mat& dst)
{
static MergeFunc tab[] =
{
mergeC2_<uchar>, mergeC2_<ushort>, mergeC2_<int>, 0, mergeC2_<int64>,
mergeC3_<uchar>, mergeC3_<ushort>, mergeC3_<int>, 0, mergeC3_<int64>,
mergeC4_<uchar>, mergeC4_<ushort>, mergeC4_<int>, 0, mergeC4_<int64>
};
size_t i;
CV_Assert( mv && n > 0 );
int depth = mv[0].depth();
bool allch1 = true;
int total = 0;
Size size = mv[0].size();
for( i = 0; i < n; i++ )
{
CV_Assert(mv[i].size() == size && mv[i].depth() == depth);
allch1 = allch1 && mv[i].channels() == 1;
total += mv[i].channels();
}
CV_Assert( 0 < total && total <= CV_CN_MAX );
if( total == 1 )
{
mv[0].copyTo(dst);
return;
}
dst.create(size, CV_MAKETYPE(depth, total));
if( allch1 && total <= 4 )
{
MergeFunc func = tab[(total-2)*5 + (CV_ELEM_SIZE(depth)>>1)];
CV_Assert( func != 0 );
func( mv, dst );
}
else
{
vector<int> pairs(total*2);
int j, k, ni=0;
for( i = 0, j = 0; i < n; i++, j += ni )
{
ni = mv[i].channels();
for( k = 0; k < ni; k++ )
{
pairs[(j+k)*2] = j + k;
pairs[(j+k)*2+1] = j + k;
}
}
mixChannels( mv, n, &dst, 1, &pairs[0], total );
}
}
/****************************************************************************************\
* Generalized split/merge: mixing channels *
\****************************************************************************************/
template<typename T> static void
mixChannels_( const void** _src, const int* sdelta0,
const int* sdelta1, void** _dst,
const int* ddelta0, const int* ddelta1,
int n, Size size )
{
const T** src = (const T**)_src;
T** dst = (T**)_dst;
int i, k;
int block_size0 = n == 1 ? size.width : 1024;
for( ; size.height--; )
{
int remaining = size.width;
for( ; remaining > 0; )
{
int block_size = MIN( remaining, block_size0 );
for( k = 0; k < n; k++ )
{
const T* s = src[k];
T* d = dst[k];
int ds = sdelta1[k], dd = ddelta1[k];
if( s )
{
for( i = 0; i <= block_size - 2; i += 2, s += ds*2, d += dd*2 )
{
T t0 = s[0], t1 = s[ds];
d[0] = t0; d[dd] = t1;
}
if( i < block_size )
d[0] = s[0], s += ds, d += dd;
src[k] = s;
}
else
{
for( i=0; i <= block_size-2; i+=2, d+=dd*2 )
d[0] = d[dd] = 0;
if( i < block_size )
d[0] = 0, d += dd;
}
dst[k] = d;
}
remaining -= block_size;
}
for( k = 0; k < n; k++ )
src[k] += sdelta0[k], dst[k] += ddelta0[k];
}
}
typedef void (*MixChannelsFunc)( const void** src, const int* sdelta0,
const int* sdelta1, void** dst, const int* ddelta0, const int* ddelta1, int n, Size size );
void mixChannels( const Mat* src, int nsrcs, Mat* dst, int ndsts, const int* fromTo, size_t npairs )
{
size_t i;
if( npairs == 0 )
return;
CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 );
int depth = dst[0].depth(), esz1 = (int)dst[0].elemSize1();
Size size = dst[0].size();
AutoBuffer<uchar> buf(npairs*(sizeof(void*)*2 + sizeof(int)*4));
void** srcs = (void**)(uchar*)buf;
void** dsts = srcs + npairs;
int *s0 = (int*)(dsts + npairs), *s1 = s0 + npairs, *d0 = s1 + npairs, *d1 = d0 + npairs;
bool isContinuous = true;
for( i = 0; i < npairs; i++ )
{
int i0 = fromTo[i*2], i1 = fromTo[i*2+1], j;
if( i0 >= 0 )
{
for( j = 0; j < nsrcs; i0 -= src[j].channels(), j++ )
if( i0 < src[j].channels() )
break;
CV_Assert(j < nsrcs && src[j].size() == size && src[j].depth() == depth);
isContinuous = isContinuous && src[j].isContinuous();
srcs[i] = src[j].data + i0*esz1;
s1[i] = src[j].channels(); s0[i] = (int)src[j].step/esz1 - size.width*src[j].channels();
}
else
{
srcs[i] = 0; s1[i] = s0[i] = 0;
}
for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ )
if( i1 < dst[j].channels() )
break;
CV_Assert(i1 >= 0 && j < ndsts && dst[j].size() == size && dst[j].depth() == depth);
isContinuous = isContinuous && dst[j].isContinuous();
dsts[i] = dst[j].data + i1*esz1;
d1[i] = dst[j].channels(); d0[i] = (int)dst[j].step/esz1 - size.width*dst[j].channels();
}
MixChannelsFunc func = 0;
if( esz1 == 1 )
func = mixChannels_<uchar>;
else if( esz1 == 2 )
func = mixChannels_<ushort>;
else if( esz1 == 4 )
func = mixChannels_<int>;
else if( esz1 == 8 )
func = mixChannels_<int64>;
else
CV_Error( CV_StsUnsupportedFormat, "" );
if( isContinuous )
{
size.width *= size.height;
size.height = 1;
}
func( (const void**)srcs, s0, s1, dsts, d0, d1, (int)npairs, size );
}
/****************************************************************************************\
* convertScale[Abs] *
\****************************************************************************************/
template<typename sT, typename dT> struct OpCvt
{
typedef sT type1;
typedef dT rtype;
rtype operator()(type1 x) const { return saturate_cast<rtype>(x); }
};
template<typename sT, typename dT, int _fbits> struct OpCvtFixPt
{
typedef sT type1;
typedef dT rtype;
enum { fbits = _fbits };
rtype operator()(type1 x) const
{
return saturate_cast<rtype>((x + (1<<(fbits-1)))>>fbits);
}
};
template<typename sT, typename dT> struct OpCvtAbs
{
typedef sT type1;
typedef dT rtype;
rtype operator()(type1 x) const { return saturate_cast<rtype>(std::abs(x)); }
};
template<typename sT, typename dT, int _fbits> struct OpCvtAbsFixPt
{
typedef sT type1;
typedef dT rtype;
enum { fbits = _fbits };
rtype operator()(type1 x) const
{
return saturate_cast<rtype>((std::abs(x) + (1<<(fbits-1)))>>fbits);
}
};
template<class Op> static void
cvtScaleLUT_( const Mat& srcmat, Mat& dstmat, double scale, double shift )
{
Op op;
typedef typename Op::rtype DT;
DT lut[256];
int i, sdepth = srcmat.depth(), ddepth = dstmat.depth();
double val = shift;
for( i = 0; i < 128; i++, val += scale )
lut[i] = op(val);
if( sdepth == CV_8S )
val = shift*2 - val;
for( ; i < 256; i++, val += scale )
lut[i] = op(val);
Mat _srcmat = srcmat;
if( sdepth == CV_8S )
_srcmat = Mat(srcmat.size(), CV_8UC(srcmat.channels()), srcmat.data, srcmat.step);
LUT(_srcmat, Mat(1, 256, ddepth, lut), dstmat);
}
template<typename T, class Op> static void
cvtScale_( const Mat& srcmat, Mat& dstmat, double _scale, double _shift )
{
Op op;
typedef typename Op::type1 WT;
typedef typename Op::rtype DT;
Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
WT scale = saturate_cast<WT>(_scale), shift = saturate_cast<WT>(_shift);
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcmat.step*y);
DT* dst = (DT*)(dstmat.data + dstmat.step*y);
int x = 0;
for( ; x <= size.width - 4; x += 4 )
{
DT t0, t1;
t0 = op(src[x]*scale + shift);
t1 = op(src[x+1]*scale + shift);
dst[x] = t0; dst[x+1] = t1;
t0 = op(src[x+2]*scale + shift);
t1 = op(src[x+3]*scale + shift);
dst[x+2] = t0; dst[x+3] = t1;
}
for( ; x < size.width; x++ )
dst[x] = op(src[x]*scale + shift);
}
}
template<typename T, class OpFixPt, class Op, int MAX_SHIFT> static void
cvtScaleInt_( const Mat& srcmat, Mat& dstmat, double _scale, double _shift )
{
if( std::abs(_scale) > 1 || std::abs(_shift) > MAX_SHIFT )
{
cvtScale_<T, Op>(srcmat, dstmat, _scale, _shift);
return;
}
OpFixPt op;
typedef typename OpFixPt::rtype DT;
Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
int scale = saturate_cast<int>(_scale*(1<<OpFixPt::fbits)),
shift = saturate_cast<int>(_shift*(1<<OpFixPt::fbits));
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcmat.step*y);
DT* dst = (DT*)(dstmat.data + dstmat.step*y);
int x = 0;
for( ; x <= size.width - 4; x += 4 )
{
DT t0, t1;
t0 = op(src[x]*scale + shift);
t1 = op(src[x+1]*scale + shift);
dst[x] = t0; dst[x+1] = t1;
t0 = op(src[x+2]*scale + shift);
t1 = op(src[x+3]*scale + shift);
dst[x+2] = t0; dst[x+3] = t1;
}
for( ; x < size.width; x++ )
dst[x] = op(src[x]*scale + shift);
}
}
template<typename T, typename DT> static void
cvt_( const Mat& srcmat, Mat& dstmat )
{
Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcmat.step*y);
DT* dst = (DT*)(dstmat.data + dstmat.step*y);
int x = 0;
for( ; x <= size.width - 4; x += 4 )
{
DT t0, t1;
t0 = saturate_cast<DT>(src[x]);
t1 = saturate_cast<DT>(src[x+1]);
dst[x] = t0; dst[x+1] = t1;
t0 = saturate_cast<DT>(src[x+2]);
t1 = saturate_cast<DT>(src[x+3]);
dst[x+2] = t0; dst[x+3] = t1;
}
for( ; x < size.width; x++ )
dst[x] = saturate_cast<DT>(src[x]);
}
}
static const int FBITS = 15;
#define ICV_SCALE(x) CV_DESCALE((x), FBITS)
typedef void (*CvtFunc)( const Mat& src, Mat& dst );
typedef void (*CvtScaleFunc)( const Mat& src, Mat& dst, double scale, double shift );
void convertScaleAbs( const Mat& src, Mat& dst, double scale, double shift )
{
static CvtScaleFunc tab[] =
{
cvtScaleLUT_<OpCvtAbs<double, uchar> >,
cvtScaleLUT_<OpCvtAbs<double, uchar> >,
cvtScaleInt_<ushort, OpCvtAbsFixPt<int, uchar, FBITS>, OpCvtAbs<float, uchar>, 0>,
cvtScaleInt_<short, OpCvtAbsFixPt<int, uchar, FBITS>, OpCvtAbs<float, uchar>, 1<<15>,
cvtScale_<int, OpCvtAbs<double, uchar> >,
cvtScale_<float, OpCvtAbs<float, uchar> >,
cvtScale_<double, OpCvtAbs<double, uchar> >, 0
};
Mat src0 = src;
dst.create( src.size(), CV_8UC(src.channels()) );
CvtScaleFunc func = tab[src0.depth()];
CV_Assert( func != 0 );
func( src0, dst, scale, shift );
}
void Mat::convertTo(Mat& dst, int _type, double alpha, double beta) const
{
static CvtFunc tab[8][8] =
{
{0, cvt_<uchar, schar>, cvt_<uchar, ushort>, cvt_<uchar, short>,
cvt_<uchar, int>, cvt_<uchar, float>, cvt_<uchar, double>, 0},
{cvt_<schar, uchar>, 0, cvt_<schar, ushort>, cvt_<schar, short>,
cvt_<schar, int>, cvt_<schar, float>, cvt_<schar, double>, 0},
{cvt_<ushort, uchar>, cvt_<ushort, schar>, 0, cvt_<ushort, short>,
cvt_<ushort, int>, cvt_<ushort, float>, cvt_<ushort, double>, 0},
{cvt_<short, uchar>, cvt_<short, schar>, cvt_<short, ushort>, 0,
cvt_<short, int>, cvt_<short, float>, cvt_<short, double>, 0},
{cvt_<int, uchar>, cvt_<int, schar>, cvt_<int, ushort>,
cvt_<int, short>, 0, cvt_<int, float>, cvt_<int, double>, 0},
{cvt_<float, uchar>, cvt_<float, schar>, cvt_<float, ushort>,
cvt_<float, short>, cvt_<float, int>, 0, cvt_<float, double>, 0},
{cvt_<double, uchar>, cvt_<double, schar>, cvt_<double, ushort>,
cvt_<double, short>, cvt_<double, int>, cvt_<double, float>, 0, 0},
{0,0,0,0,0,0,0,0}
};
static CvtScaleFunc stab[8][8] =
{
{
cvtScaleLUT_<OpCvt<double, uchar> >,
cvtScaleLUT_<OpCvt<double, schar> >,
cvtScaleLUT_<OpCvt<double, ushort> >,
cvtScaleLUT_<OpCvt<double, short> >,
cvtScaleLUT_<OpCvt<double, int> >,
cvtScaleLUT_<OpCvt<double, float> >,
cvtScaleLUT_<OpCvt<double, double> >, 0
},
{
// this is copy of the above section,
// since cvScaleLUT handles both 8u->? and 8s->? cases
cvtScaleLUT_<OpCvt<double, uchar> >,
cvtScaleLUT_<OpCvt<double, schar> >,
cvtScaleLUT_<OpCvt<double, ushort> >,
cvtScaleLUT_<OpCvt<double, short> >,
cvtScaleLUT_<OpCvt<double, int> >,
cvtScaleLUT_<OpCvt<double, float> >,
cvtScaleLUT_<OpCvt<double, double> >, 0,
},
{
cvtScaleInt_<ushort, OpCvtFixPt<int, uchar, FBITS>, OpCvt<float, uchar>, 0>,
cvtScaleInt_<ushort, OpCvtFixPt<int, schar, FBITS>, OpCvt<float, schar>, 0>,
cvtScaleInt_<ushort, OpCvtFixPt<int, ushort, FBITS>, OpCvt<float, ushort>, 0>,
cvtScaleInt_<ushort, OpCvtFixPt<int, short, FBITS>, OpCvt<float, short>, 0>,
cvtScale_<ushort, OpCvt<double, int> >,
cvtScale_<ushort, OpCvt<float, float> >,
cvtScale_<ushort, OpCvt<double, double> >, 0,
},
{
cvtScaleInt_<short, OpCvtFixPt<int, uchar, FBITS>, OpCvt<float, uchar>, 1<<15>,
cvtScaleInt_<short, OpCvtFixPt<int, schar, FBITS>, OpCvt<float, schar>, 1<<15>,
cvtScaleInt_<short, OpCvtFixPt<int, ushort, FBITS>, OpCvt<float, ushort>, 1<<15>,
cvtScaleInt_<short, OpCvtFixPt<int, short, FBITS>, OpCvt<float, short>, 1<<15>,
cvtScale_<short, OpCvt<double, int> >,
cvtScale_<short, OpCvt<float, float> >,
cvtScale_<short, OpCvt<double, double> >, 0,
},
{
cvtScale_<int, OpCvt<float, uchar> >,
cvtScale_<int, OpCvt<float, schar> >,
cvtScale_<int, OpCvt<double, ushort> >,
cvtScale_<int, OpCvt<double, short> >,
cvtScale_<int, OpCvt<double, int> >,
cvtScale_<int, OpCvt<float, float> >,
cvtScale_<int, OpCvt<double, double> >, 0,
},
{
cvtScale_<float, OpCvt<float, uchar> >,
cvtScale_<float, OpCvt<float, schar> >,
cvtScale_<float, OpCvt<float, ushort> >,
cvtScale_<float, OpCvt<float, short> >,
cvtScale_<float, OpCvt<float, int> >,
cvtScale_<float, OpCvt<float, float> >,
cvtScale_<float, OpCvt<double, double> >, 0,
},
{
cvtScale_<double, OpCvt<double, uchar> >,
cvtScale_<double, OpCvt<double, schar> >,
cvtScale_<double, OpCvt<double, ushort> >,
cvtScale_<double, OpCvt<double, short> >,
cvtScale_<double, OpCvt<double, int> >,
cvtScale_<double, OpCvt<double, float> >,
cvtScale_<double, OpCvt<double, double> >, 0,
}
};
bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
if( _type < 0 )
_type = type();
else
_type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
if( sdepth == ddepth && noScale )
{
copyTo(dst);
return;
}
Mat temp;
const Mat* psrc = this;
if( sdepth != ddepth && psrc == &dst )
psrc = &(temp = *this);
dst.create( size(), _type );
if( noScale )
{
CvtFunc func = tab[sdepth][ddepth];
CV_Assert( func != 0 );
func( *psrc, dst );
}
else
{
CvtScaleFunc func = stab[sdepth][ddepth];
CV_Assert( func != 0 );
func( *psrc, dst, alpha, beta );
}
}
/****************************************************************************************\
* LUT Transform *
\****************************************************************************************/
template<typename T> static void
LUT8u( const Mat& srcmat, Mat& dstmat, const Mat& lut )
{
int cn = lut.channels();
int max_block_size = (1 << 10)*cn;
const T* _lut = (const T*)lut.data;
T lutp[4][256];
int y, i, k;
Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
if( cn == 1 )
{
for( y = 0; y < size.height; y++ )
{
const uchar* src = srcmat.data + srcmat.step*y;
T* dst = (T*)(dstmat.data + dstmat.step*y);
for( i = 0; i < size.width; i++ )
dst[i] = _lut[src[i]];
}
return;
}
if( size.width*size.height < 256 )
{
for( y = 0; y < size.height; y++ )
{
const uchar* src = srcmat.data + srcmat.step*y;
T* dst = (T*)(dstmat.data + dstmat.step*y);
for( k = 0; k < cn; k++ )
for( i = 0; i < size.width; i += cn )
dst[i+k] = _lut[src[i+k]*cn+k];
}
return;
}
/* repack the lut to planar layout */
for( k = 0; k < cn; k++ )
for( i = 0; i < 256; i++ )
lutp[k][i] = _lut[i*cn+k];
for( y = 0; y < size.height; y++ )
{
const uchar* src = srcmat.data + srcmat.step*y;
T* dst = (T*)(dstmat.data + dstmat.step*y);
for( i = 0; i < size.width; )
{
int j, limit = std::min(size.width, i + max_block_size);
for( k = 0; k < cn; k++, src++, dst++ )
{
const T* lut = lutp[k];
for( j = i; j <= limit - cn*2; j += cn*2 )
{
T t0 = lut[src[j]];
T t1 = lut[src[j+cn]];
dst[j] = t0; dst[j+cn] = t1;
}
for( ; j < limit; j += cn )
dst[j] = lut[src[j]];
}
src -= cn;
dst -= cn;
i = limit;
}
}
}
typedef void (*LUTFunc)( const Mat& src, Mat& dst, const Mat& lut );
void LUT( const Mat& src, const Mat& lut, Mat& dst )
{
int cn = src.channels(), esz1 = (int)lut.elemSize1();
CV_Assert( (lut.channels() == cn || lut.channels() == 1) &&
lut.rows*lut.cols == 256 && lut.isContinuous() &&
(src.depth() == CV_8U || src.depth() == CV_8S) );
dst.create( src.size(), CV_MAKETYPE(lut.depth(), cn));
LUTFunc func = 0;
if( esz1 == 1 )
func = LUT8u<uchar>;
else if( esz1 == 2 )
func = LUT8u<ushort>;
else if( esz1 == 4 )
func = LUT8u<int>;
else if( esz1 == 8 )
func = LUT8u<int64>;
else
CV_Error(CV_StsUnsupportedFormat, "");
func( src, dst, lut );
}
void normalize( const Mat& src, Mat& dst, double a, double b,
int norm_type, int rtype, const Mat& mask )
{
double scale = 1, shift = 0;
if( norm_type == CV_MINMAX )
{
double smin = 0, smax = 0;
double dmin = MIN( a, b ), dmax = MAX( a, b );
minMaxLoc( src, &smin, &smax, 0, 0, mask );
scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
shift = dmin - smin*scale;
}
else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
{
scale = norm( src, norm_type, mask );
scale = scale > DBL_EPSILON ? a/scale : 0.;
shift = 0;
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
if( !mask.data )
src.convertTo( dst, rtype, scale, shift );
else
{
Mat temp;
src.convertTo( temp, rtype, scale, shift );
temp.copyTo( dst, mask );
}
}
}
CV_IMPL void
cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 )
{
void* dptrs[] = { dstarr0, dstarr1, dstarr2, dstarr3 };
cv::Mat src = cv::cvarrToMat(srcarr);
int i, j, nz = 0;
for( i = 0; i < 4; i++ )
nz += dptrs[i] != 0;
CV_Assert( nz > 0 );
cv::vector<cv::Mat> dvec(nz);
cv::vector<int> pairs(nz*2);
for( i = j = 0; i < 4; i++ )
{
if( dptrs[i] != 0 )
{
dvec[j] = cv::cvarrToMat(dptrs[i]);
CV_Assert( dvec[j].size() == src.size() &&
dvec[j].depth() == src.depth() &&
dvec[j].channels() == 1 && i < src.channels() );
pairs[j*2] = i;
pairs[j*2+1] = j;
j++;
}
}
if( nz == src.channels() )
cv::split( src, dvec );
else
{
cv::mixChannels( &src, 1, &dvec[0], nz, &pairs[0], nz );
}
}
CV_IMPL void
cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2,
const void* srcarr3, void* dstarr )
{
const void* sptrs[] = { srcarr0, srcarr1, srcarr2, srcarr3 };
cv::Mat dst = cv::cvarrToMat(dstarr);
int i, j, nz = 0;
for( i = 0; i < 4; i++ )
nz += sptrs[i] != 0;
CV_Assert( nz > 0 );
cv::vector<cv::Mat> svec(nz);
cv::vector<int> pairs(nz*2);
for( i = j = 0; i < 4; i++ )
{
if( sptrs[i] != 0 )
{
svec[j] = cv::cvarrToMat(sptrs[i]);
CV_Assert( svec[j].size() == dst.size() &&
svec[j].depth() == dst.depth() &&
svec[j].channels() == 1 && i < dst.channels() );
pairs[j*2] = j;
pairs[j*2+1] = i;
j++;
}
}
if( nz == dst.channels() )
cv::merge( svec, dst );
else
{
cv::mixChannels( &svec[0], nz, &dst, 1, &pairs[0], nz );
}
}
CV_IMPL void
cvMixChannels( const CvArr** src, int src_count,
CvArr** dst, int dst_count,
const int* from_to, int pair_count )
{
cv::AutoBuffer<cv::Mat, 32> buf;
int i;
for( i = 0; i < src_count; i++ )
buf[i] = cv::cvarrToMat(src[i]);
for( i = 0; i < dst_count; i++ )
buf[i+src_count] = cv::cvarrToMat(dst[i]);
cv::mixChannels(&buf[0], src_count, &buf[src_count], dst_count, from_to, pair_count);
}
CV_IMPL void
cvConvertScaleAbs( const void* srcarr, void* dstarr,
double scale, double shift )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
CV_Assert( src.size() == dst.size() && dst.type() == CV_8UC(src.channels()));
cv::convertScaleAbs( src, dst, scale, shift );
}
CV_IMPL void
cvConvertScale( const void* srcarr, void* dstarr,
double scale, double shift )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() );
src.convertTo(dst, dst.type(), scale, shift);
}
CV_IMPL void cvLUT( const void* srcarr, void* dstarr, const void* lutarr )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), lut = cv::cvarrToMat(lutarr);
CV_Assert( dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels()) );
cv::LUT( src, lut, dst );
}
CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr,
double a, double b, int norm_type, const CvArr* maskarr )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
if( maskarr )
mask = cv::cvarrToMat(maskarr);
CV_Assert( dst.size() == src.size() && src.channels() == dst.channels() );
cv::normalize( src, dst, a, b, norm_type, dst.type(), mask );
}
/* End of file. */

541
modules/core/src/copy.cpp Normal file
View File

@@ -0,0 +1,541 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/* ////////////////////////////////////////////////////////////////////
//
// Mat basic operations: Copy, Set
//
// */
#include "precomp.hpp"
namespace cv
{
template<typename T> static void
copyMask_(const Mat& srcmat, Mat& dstmat, const Mat& maskmat)
{
const uchar* mask = maskmat.data;
size_t sstep = srcmat.step;
size_t dstep = dstmat.step;
size_t mstep = maskmat.step;
Size size = getContinuousSize(srcmat, dstmat, maskmat);
for( int y = 0; y < size.height; y++, mask += mstep )
{
const T* src = (const T*)(srcmat.data + sstep*y);
T* dst = (T*)(dstmat.data + dstep*y);
int x = 0;
for( ; x <= size.width - 4; x += 4 )
{
if( mask[x] )
dst[x] = src[x];
if( mask[x+1] )
dst[x+1] = src[x+1];
if( mask[x+2] )
dst[x+2] = src[x+2];
if( mask[x+3] )
dst[x+3] = src[x+3];
}
for( ; x < size.width; x++ )
if( mask[x] )
dst[x] = src[x];
}
}
template<typename T> static void
setMask_(const void* _scalar, Mat& dstmat, const Mat& maskmat)
{
T scalar = *(T*)_scalar;
const uchar* mask = maskmat.data;
size_t dstep = dstmat.step;
size_t mstep = maskmat.step;
Size size = dstmat.size();
if( dstmat.isContinuous() && maskmat.isContinuous() )
{
size.width *= size.height;
size.height = 1;
}
for( int y = 0; y < size.height; y++, mask += mstep )
{
T* dst = (T*)(dstmat.data + dstep*y);
int x = 0;
for( ; x <= size.width - 4; x += 4 )
{
if( mask[x] )
dst[x] = scalar;
if( mask[x+1] )
dst[x+1] = scalar;
if( mask[x+2] )
dst[x+2] = scalar;
if( mask[x+3] )
dst[x+3] = scalar;
}
for( ; x < size.width; x++ )
if( mask[x] )
dst[x] = scalar;
}
}
typedef void (*SetMaskFunc)(const void* scalar, Mat& dst, const Mat& mask);
CopyMaskFunc g_copyMaskFuncTab[] =
{
0,
copyMask_<uchar>, // 1
copyMask_<ushort>, // 2
copyMask_<Vec<uchar,3> >, // 3
copyMask_<int>, // 4
0,
copyMask_<Vec<ushort,3> >, // 6
0,
copyMask_<int64>, // 8
0, 0, 0,
copyMask_<Vec<int,3> >, // 12
0, 0, 0,
copyMask_<Vec<int64,2> >, // 16
0, 0, 0, 0, 0, 0, 0,
copyMask_<Vec<int64,3> >, // 24
0, 0, 0, 0, 0, 0, 0,
copyMask_<Vec<int64,4> > // 32
};
static SetMaskFunc setMaskFuncTab[] =
{
0,
setMask_<uchar>, // 1
setMask_<ushort>, // 2
setMask_<Vec<uchar,3> >, // 3
setMask_<int>, // 4
0,
setMask_<Vec<ushort,3> >, // 6
0,
setMask_<int64>, // 8
0, 0, 0,
setMask_<Vec<int,3> >, // 12
0, 0, 0,
setMask_<Vec<int64,2> >, // 16
0, 0, 0, 0, 0, 0, 0,
setMask_<Vec<int64,3> >, // 24
0, 0, 0, 0, 0, 0, 0,
setMask_<Vec<int64,4> > // 32
};
/* dst = src */
void Mat::copyTo( Mat& dst ) const
{
if( data == dst.data )
return;
dst.create( rows, cols, type() );
Size sz = size();
const uchar* sptr = data;
uchar* dptr = dst.data;
sz.width *= (int)elemSize();
if( isContinuous() && dst.isContinuous() )
{
sz.width *= sz.height;
sz.height = 1;
}
for( ; sz.height--; sptr += step, dptr += dst.step )
memcpy( dptr, sptr, sz.width );
}
void Mat::copyTo( Mat& dst, const Mat& mask ) const
{
if( !mask.data )
{
copyTo(dst);
return;
}
uchar* data0 = dst.data;
dst.create( size(), type() );
if( dst.data != data0 ) // do not leave dst uninitialized
dst = Scalar(0);
getCopyMaskFunc((int)elemSize())(*this, dst, mask);
}
Mat& Mat::operator = (const Scalar& s)
{
Size sz = size();
uchar* dst = data;
sz.width *= (int)elemSize();
if( isContinuous() )
{
sz.width *= sz.height;
sz.height = 1;
}
if( s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0 )
{
for( ; sz.height--; dst += step )
memset( dst, 0, sz.width );
}
else
{
int t = type(), esz1 = (int)elemSize1();
double scalar[12];
scalarToRawData(s, scalar, t, 12);
int copy_len = 12*esz1;
uchar* dst_limit = dst + sz.width;
if( sz.height-- )
{
while( dst + copy_len <= dst_limit )
{
memcpy( dst, scalar, copy_len );
dst += copy_len;
}
memcpy( dst, scalar, dst_limit - dst );
}
if( sz.height > 0 )
{
dst = dst_limit - sz.width + step;
for( ; sz.height--; dst += step )
memcpy( dst, data, sz.width );
}
}
return *this;
}
Mat& Mat::setTo(const Scalar& s, const Mat& mask)
{
if( !mask.data )
*this = s;
else
{
CV_Assert( channels() <= 4 );
SetMaskFunc func = setMaskFuncTab[elemSize()];
CV_Assert( func != 0 );
double buf[4];
scalarToRawData(s, buf, type(), 0);
func(buf, *this, mask);
}
return *this;
}
template<typename T> static void
flipHoriz_( const Mat& srcmat, Mat& dstmat, bool flipv )
{
uchar* dst0 = dstmat.data;
size_t srcstep = srcmat.step;
int dststep = (int)dstmat.step;
Size size = srcmat.size();
if( flipv )
{
dst0 += (size.height - 1)*dststep;
dststep = -dststep;
}
for( int y = 0; y < size.height; y++ )
{
const T* src = (const T*)(srcmat.data + srcstep*y);
T* dst = (T*)(dst0 + dststep*y);
for( int i = 0; i < (size.width + 1)/2; i++ )
{
T t0 = src[i], t1 = src[size.width - i - 1];
dst[i] = t1; dst[size.width - i - 1] = t0;
}
}
}
typedef void (*FlipHorizFunc)( const Mat& src, Mat& dst, bool flipv );
static void
flipVert( const Mat& srcmat, Mat& dstmat )
{
const uchar* src = srcmat.data;
uchar* dst = dstmat.data;
size_t srcstep = srcmat.step, dststep = dstmat.step;
Size size = srcmat.size();
const uchar* src1 = src + (size.height - 1)*srcstep;
uchar* dst1 = dst + (size.height - 1)*dststep;
size.width *= (int)srcmat.elemSize();
for( int y = 0; y < (size.height + 1)/2; y++, src += srcstep, src1 -= srcstep,
dst += dststep, dst1 -= dststep )
{
int i = 0;
if( ((size_t)(src)|(size_t)(dst)|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
{
for( ; i <= size.width - 16; i += 16 )
{
int t0 = ((int*)(src + i))[0];
int t1 = ((int*)(src1 + i))[0];
((int*)(dst + i))[0] = t1;
((int*)(dst1 + i))[0] = t0;
t0 = ((int*)(src + i))[1];
t1 = ((int*)(src1 + i))[1];
((int*)(dst + i))[1] = t1;
((int*)(dst1 + i))[1] = t0;
t0 = ((int*)(src + i))[2];
t1 = ((int*)(src1 + i))[2];
((int*)(dst + i))[2] = t1;
((int*)(dst1 + i))[2] = t0;
t0 = ((int*)(src + i))[3];
t1 = ((int*)(src1 + i))[3];
((int*)(dst + i))[3] = t1;
((int*)(dst1 + i))[3] = t0;
}
for( ; i <= size.width - 4; i += 4 )
{
int t0 = ((int*)(src + i))[0];
int t1 = ((int*)(src1 + i))[0];
((int*)(dst + i))[0] = t1;
((int*)(dst1 + i))[0] = t0;
}
}
for( ; i < size.width; i++ )
{
uchar t0 = src[i];
uchar t1 = src1[i];
dst[i] = t1;
dst1[i] = t0;
}
}
}
void flip( const Mat& src, Mat& dst, int flip_mode )
{
static FlipHorizFunc tab[] =
{
0,
flipHoriz_<uchar>, // 1
flipHoriz_<ushort>, // 2
flipHoriz_<Vec<uchar,3> >, // 3
flipHoriz_<int>, // 4
0,
flipHoriz_<Vec<ushort,3> >, // 6
0,
flipHoriz_<int64>, // 8
0, 0, 0,
flipHoriz_<Vec<int,3> >, // 12
0, 0, 0,
flipHoriz_<Vec<int64,2> >, // 16
0, 0, 0, 0, 0, 0, 0,
flipHoriz_<Vec<int64,3> >, // 24
0, 0, 0, 0, 0, 0, 0,
flipHoriz_<Vec<int64,4> > // 32
};
dst.create( src.size(), src.type() );
if( flip_mode == 0 )
flipVert( src, dst );
else
{
int esz = (int)src.elemSize();
CV_Assert( esz <= 32 );
FlipHorizFunc func = tab[esz];
CV_Assert( func != 0 );
if( flip_mode > 0 )
func( src, dst, false );
else if( src.data != dst.data )
func( src, dst, true );
else
{
func( dst, dst, false );
flipVert( dst, dst );
}
}
}
void repeat(const Mat& src, int ny, int nx, Mat& dst)
{
dst.create(src.rows*ny, src.cols*nx, src.type());
Size ssize = src.size(), dsize = dst.size();
int esz = (int)src.elemSize();
int x, y;
ssize.width *= esz; dsize.width *= esz;
for( y = 0; y < ssize.height; y++ )
{
for( x = 0; x < dsize.width; x += ssize.width )
memcpy( dst.data + y*dst.step + x, src.data + y*src.step, ssize.width );
}
for( ; y < dsize.height; y++ )
memcpy( dst.data + y*dst.step, dst.data + (y - ssize.height)*dst.step, dsize.width );
}
}
/* dst = src */
CV_IMPL void
cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
{
if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
{
CV_Assert( maskarr == 0 );
CvSparseMat* src1 = (CvSparseMat*)srcarr;
CvSparseMat* dst1 = (CvSparseMat*)dstarr;
CvSparseMatIterator iterator;
CvSparseNode* node;
dst1->dims = src1->dims;
memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
dst1->valoffset = src1->valoffset;
dst1->idxoffset = src1->idxoffset;
cvClearSet( dst1->heap );
if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
{
cvFree( &dst1->hashtable );
dst1->hashsize = src1->hashsize;
dst1->hashtable =
(void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
}
memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));
for( node = cvInitSparseMatIterator( src1, &iterator );
node != 0; node = cvGetNextSparseNode( &iterator ))
{
CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
int tabidx = node->hashval & (dst1->hashsize - 1);
CV_MEMCPY_AUTO( node_copy, node, dst1->heap->elem_size );
node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
dst1->hashtable[tabidx] = node_copy;
}
return;
}
cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
CV_Assert( src.depth() == dst.depth() && src.size() == dst.size() );
int coi1 = 0, coi2 = 0;
if( CV_IS_IMAGE(srcarr) )
coi1 = cvGetImageCOI((const IplImage*)srcarr);
if( CV_IS_IMAGE(dstarr) )
coi2 = cvGetImageCOI((const IplImage*)dstarr);
if( coi1 || coi2 )
{
CV_Assert( (coi1 != 0 || src.channels() == 1) &&
(coi2 != 0 || dst.channels() == 1) );
int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
return;
}
else
CV_Assert( src.channels() == dst.channels() );
if( !maskarr )
src.copyTo(dst);
else
src.copyTo(dst, cv::cvarrToMat(maskarr));
}
CV_IMPL void
cvSet( void* arr, CvScalar value, const void* maskarr )
{
cv::Mat m = cv::cvarrToMat(arr);
if( !maskarr )
m = value;
else
m.setTo(value, cv::cvarrToMat(maskarr));
}
CV_IMPL void
cvSetZero( CvArr* arr )
{
if( CV_IS_SPARSE_MAT(arr) )
{
CvSparseMat* mat1 = (CvSparseMat*)arr;
cvClearSet( mat1->heap );
if( mat1->hashtable )
memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0]));
return;
}
cv::Mat m = cv::cvarrToMat(arr);
m = cv::Scalar(0);
}
CV_IMPL void
cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode )
{
cv::Mat src = cv::cvarrToMat(srcarr);
cv::Mat dst;
if (!dstarr)
dst = src;
else
dst = cv::cvarrToMat(dstarr);
CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
cv::flip( src, dst, flip_mode );
}
CV_IMPL void
cvRepeat( const CvArr* srcarr, CvArr* dstarr )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
CV_Assert( src.type() == dst.type() &&
dst.rows % src.rows == 0 && dst.cols % src.cols == 0 );
cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst);
}
/* End of file. */

File diff suppressed because it is too large Load Diff

2334
modules/core/src/drawing.cpp Normal file

File diff suppressed because it is too large Load Diff

2634
modules/core/src/dxt.cpp Normal file

File diff suppressed because it is too large Load Diff

211
modules/core/src/flann.cpp Normal file
View File

@@ -0,0 +1,211 @@
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
#include "precomp.hpp"
#include "flann/flann.hpp"
namespace cv
{
namespace flann {
::flann::Index* LinearIndexParams::createIndex(const Mat& dataset) const
{
CV_Assert(dataset.type() == CV_32F);
CV_Assert(dataset.isContinuous());
// TODO: fix ::flann::Matrix class so it can be constructed with a const float*
::flann::Matrix<float> mat(dataset.rows, dataset.cols, (float*)dataset.ptr<float>(0));
return new ::flann::Index(mat, ::flann::LinearIndexParams());
}
::flann::Index* KDTreeIndexParams::createIndex(const Mat& dataset) const
{
CV_Assert(dataset.type() == CV_32F);
CV_Assert(dataset.isContinuous());
// TODO: fix ::flann::Matrix class so it can be constructed with a const float*
::flann::Matrix<float> mat(dataset.rows, dataset.cols, (float*)dataset.ptr<float>(0));
return new ::flann::Index(mat, ::flann::KDTreeIndexParams(trees));
}
::flann::Index* KMeansIndexParams::createIndex(const Mat& dataset) const
{
CV_Assert(dataset.type() == CV_32F);
CV_Assert(dataset.isContinuous());
// TODO: fix ::flann::Matrix class so it can be constructed with a const float*
::flann::Matrix<float> mat(dataset.rows, dataset.cols, (float*)dataset.ptr<float>(0));
return new ::flann::Index(mat, ::flann::KMeansIndexParams(branching,iterations, (::flann_centers_init_t)centers_init, cb_index));
}
::flann::Index* CompositeIndexParams::createIndex(const Mat& dataset) const
{
CV_Assert(dataset.type() == CV_32F);
CV_Assert(dataset.isContinuous());
// TODO: fix ::flann::Matrix class so it can be constructed with a const float*
::flann::Matrix<float> mat(dataset.rows, dataset.cols, (float*)dataset.ptr<float>(0));
return new ::flann::Index(mat, ::flann::CompositeIndexParams(trees, branching, iterations, (::flann_centers_init_t)centers_init, cb_index));
}
::flann::Index* AutotunedIndexParams::createIndex(const Mat& dataset) const
{
CV_Assert(dataset.type() == CV_32F);
CV_Assert(dataset.isContinuous());
// TODO: fix ::flann::Matrix class so it can be constructed with a const float*
::flann::Matrix<float> mat(dataset.rows, dataset.cols, (float*)dataset.ptr<float>(0));
return new ::flann::Index(mat, ::flann::AutotunedIndexParams(target_precision, build_weight, memory_weight, sample_fraction));
}
::flann::Index* SavedIndexParams::createIndex(const Mat& dataset) const
{
CV_Assert(dataset.type() == CV_32F);
CV_Assert(dataset.isContinuous());
// TODO: fix ::flann::Matrix class so it can be constructed with a const float*
::flann::Matrix<float> mat(dataset.rows, dataset.cols, (float*)dataset.ptr<float>(0));
return new ::flann::Index(mat, ::flann::SavedIndexParams(filename));
}
Index::Index(const Mat& dataset, const IndexParams& params)
{
nnIndex = params.createIndex(dataset);
}
Index::~Index()
{
delete nnIndex;
}
void Index::knnSearch(const vector<float>& query, vector<int>& indices, vector<float>& dists, int knn, const SearchParams& searchParams)
{
::flann::Matrix<float> m_query(1, query.size(), (float*)&query[0]);
::flann::Matrix<int> m_indices(1, indices.size(), &indices[0]);
::flann::Matrix<float> m_dists(1, dists.size(), &dists[0]);
nnIndex->knnSearch(m_query,m_indices,m_dists,knn,::flann::SearchParams(searchParams.checks));
}
void Index::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const SearchParams& searchParams)
{
CV_Assert(queries.type() == CV_32F);
CV_Assert(queries.isContinuous());
::flann::Matrix<float> m_queries(queries.rows, queries.cols, (float*)queries.ptr<float>(0));
CV_Assert(indices.type() == CV_32S);
CV_Assert(indices.isContinuous());
::flann::Matrix<int> m_indices(indices.rows, indices.cols, (int*)indices.ptr<int>(0));
CV_Assert(dists.type() == CV_32F);
CV_Assert(dists.isContinuous());
::flann::Matrix<float> m_dists(dists.rows, dists.cols, (float*)dists.ptr<float>(0));
nnIndex->knnSearch(m_queries,m_indices,m_dists,knn,::flann::SearchParams(searchParams.checks));
}
int Index::radiusSearch(const vector<float>& query, vector<int>& indices, vector<float>& dists, float radius, const SearchParams& searchParams)
{
::flann::Matrix<float> m_query(1, query.size(), (float*)&query[0]);
::flann::Matrix<int> m_indices(1, indices.size(), &indices[0]);
::flann::Matrix<float> m_dists(1, dists.size(), &dists[0]);
return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,::flann::SearchParams(searchParams.checks));
}
int Index::radiusSearch(const Mat& query, Mat& indices, Mat& dists, float radius, const SearchParams& searchParams)
{
CV_Assert(query.type() == CV_32F);
CV_Assert(query.isContinuous());
::flann::Matrix<float> m_query(query.rows, query.cols, (float*)query.ptr<float>(0));
CV_Assert(indices.type() == CV_32S);
CV_Assert(indices.isContinuous());
::flann::Matrix<int> m_indices(indices.rows, indices.cols, (int*)indices.ptr<int>(0));
CV_Assert(dists.type() == CV_32F);
CV_Assert(dists.isContinuous());
::flann::Matrix<float> m_dists(dists.rows, dists.cols, (float*)dists.ptr<float>(0));
return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,::flann::SearchParams(searchParams.checks));
}
void Index::save(string filename)
{
nnIndex->save(filename);
}
int Index::size() const
{
return nnIndex->size();
}
int Index::veclen() const
{
return nnIndex->veclen();
}
int hierarchicalClustering(const Mat& features, Mat& centers, const KMeansIndexParams& params)
{
CV_Assert(features.type() == CV_32F);
CV_Assert(features.isContinuous());
::flann::Matrix<float> m_features(features.rows, features.cols, (float*)features.ptr<float>(0));
CV_Assert(features.type() == CV_32F);
CV_Assert(features.isContinuous());
::flann::Matrix<float> m_centers(centers.rows, centers.cols, (float*)centers.ptr<float>(0));
return ::flann::hierarchicalClustering(m_features, m_centers, ::flann::KMeansIndexParams(params.branching, params.iterations,
(::flann_centers_init_t)params.centers_init, params.cb_index));
}
}
}

1466
modules/core/src/lapack.cpp Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

3037
modules/core/src/matmul.cpp Normal file

File diff suppressed because it is too large Load Diff

3117
modules/core/src/matrix.cpp Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
/* End of file. */

View File

@@ -0,0 +1,410 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef _CXCORE_INTERNAL_H_
#define _CXCORE_INTERNAL_H_
#if defined _MSC_VER && _MSC_VER >= 1200
// disable warnings related to inline functions
#pragma warning( disable: 4251 4711 4710 4514 )
#endif
#include "opencv2/core/core.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/core/internal.hpp"
#include <assert.h>
#include <ctype.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define CV_MEMCPY_CHAR( dst, src, len ) \
{ \
size_t _icv_memcpy_i_, _icv_memcpy_len_ = (len); \
char* _icv_memcpy_dst_ = (char*)(dst); \
const char* _icv_memcpy_src_ = (const char*)(src); \
\
for( _icv_memcpy_i_ = 0; _icv_memcpy_i_ < _icv_memcpy_len_; _icv_memcpy_i_++ ) \
_icv_memcpy_dst_[_icv_memcpy_i_] = _icv_memcpy_src_[_icv_memcpy_i_]; \
}
#define CV_MEMCPY_INT( dst, src, len ) \
{ \
size_t _icv_memcpy_i_, _icv_memcpy_len_ = (len); \
int* _icv_memcpy_dst_ = (int*)(dst); \
const int* _icv_memcpy_src_ = (const int*)(src); \
assert( ((size_t)_icv_memcpy_src_&(sizeof(int)-1)) == 0 && \
((size_t)_icv_memcpy_dst_&(sizeof(int)-1)) == 0 ); \
\
for(_icv_memcpy_i_=0;_icv_memcpy_i_<_icv_memcpy_len_;_icv_memcpy_i_++) \
_icv_memcpy_dst_[_icv_memcpy_i_] = _icv_memcpy_src_[_icv_memcpy_i_];\
}
#define CV_MEMCPY_AUTO( dst, src, len ) \
{ \
size_t _icv_memcpy_i_, _icv_memcpy_len_ = (len); \
char* _icv_memcpy_dst_ = (char*)(dst); \
const char* _icv_memcpy_src_ = (const char*)(src); \
if( (_icv_memcpy_len_ & (sizeof(int)-1)) == 0 ) \
{ \
assert( ((size_t)_icv_memcpy_src_&(sizeof(int)-1)) == 0 && \
((size_t)_icv_memcpy_dst_&(sizeof(int)-1)) == 0 ); \
for( _icv_memcpy_i_ = 0; _icv_memcpy_i_ < _icv_memcpy_len_; \
_icv_memcpy_i_+=sizeof(int) ) \
{ \
*(int*)(_icv_memcpy_dst_+_icv_memcpy_i_) = \
*(const int*)(_icv_memcpy_src_+_icv_memcpy_i_); \
} \
} \
else \
{ \
for(_icv_memcpy_i_ = 0; _icv_memcpy_i_ < _icv_memcpy_len_; _icv_memcpy_i_++)\
_icv_memcpy_dst_[_icv_memcpy_i_] = _icv_memcpy_src_[_icv_memcpy_i_]; \
} \
}
#define CV_ZERO_CHAR( dst, len ) \
{ \
size_t _icv_memcpy_i_, _icv_memcpy_len_ = (len); \
char* _icv_memcpy_dst_ = (char*)(dst); \
\
for( _icv_memcpy_i_ = 0; _icv_memcpy_i_ < _icv_memcpy_len_; _icv_memcpy_i_++ ) \
_icv_memcpy_dst_[_icv_memcpy_i_] = '\0'; \
}
#define CV_ZERO_INT( dst, len ) \
{ \
size_t _icv_memcpy_i_, _icv_memcpy_len_ = (len); \
int* _icv_memcpy_dst_ = (int*)(dst); \
assert( ((size_t)_icv_memcpy_dst_&(sizeof(int)-1)) == 0 ); \
\
for(_icv_memcpy_i_=0;_icv_memcpy_i_<_icv_memcpy_len_;_icv_memcpy_i_++) \
_icv_memcpy_dst_[_icv_memcpy_i_] = 0; \
}
namespace cv
{
// -128.f ... 255.f
extern const float g_8x32fTab[];
#define CV_8TO32F(x) cv::g_8x32fTab[(x)+128]
extern const ushort g_8x16uSqrTab[];
#define CV_SQR_8U(x) cv::g_8x16uSqrTab[(x)+255]
extern const char* g_HersheyGlyphs[];
extern const uchar g_Saturate8u[];
#define CV_FAST_CAST_8U(t) (assert(-256 <= (t) && (t) <= 512), cv::g_Saturate8u[(t)+256])
#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
typedef void (*CopyMaskFunc)(const Mat& src, Mat& dst, const Mat& mask);
extern CopyMaskFunc g_copyMaskFuncTab[];
static inline CopyMaskFunc getCopyMaskFunc(int esz)
{
CV_Assert( (unsigned)esz <= 32U );
CopyMaskFunc func = g_copyMaskFuncTab[esz];
CV_Assert( func != 0 );
return func;
}
#if defined WIN32 || defined _WIN32
void deleteThreadAllocData();
void deleteThreadRNGData();
#endif
template<typename T1, typename T2=T1, typename T3=T1> struct OpAdd
{
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(a + b); }
};
template<typename T1, typename T2=T1, typename T3=T1> struct OpSub
{
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(a - b); }
};
template<typename T1, typename T2=T1, typename T3=T1> struct OpRSub
{
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(b - a); }
};
template<typename T1, typename T2=T1, typename T3=T1> struct OpMul
{
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(a * b); }
};
template<typename T1, typename T2=T1, typename T3=T1> struct OpDiv
{
typedef T1 type1;
typedef T2 type2;
typedef T3 rtype;
T3 operator ()(T1 a, T2 b) const { return saturate_cast<T3>(a / b); }
};
template<typename T> struct OpMin
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(T a, T b) const { return std::min(a, b); }
};
template<typename T> struct OpMax
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(T a, T b) const { return std::max(a, b); }
};
inline Size getContinuousSize( const Mat& m1, int widthScale=1 )
{
return m1.isContinuous() ? Size(m1.cols*m1.rows*widthScale, 1) :
Size(m1.cols*widthScale, m1.rows);
}
inline Size getContinuousSize( const Mat& m1, const Mat& m2, int widthScale=1 )
{
return (m1.flags & m2.flags & Mat::CONTINUOUS_FLAG) != 0 ?
Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows);
}
inline Size getContinuousSize( const Mat& m1, const Mat& m2,
const Mat& m3, int widthScale=1 )
{
return (m1.flags & m2.flags & m3.flags & Mat::CONTINUOUS_FLAG) != 0 ?
Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows);
}
inline Size getContinuousSize( const Mat& m1, const Mat& m2,
const Mat& m3, const Mat& m4,
int widthScale=1 )
{
return (m1.flags & m2.flags & m3.flags & m4.flags & Mat::CONTINUOUS_FLAG) != 0 ?
Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows);
}
inline Size getContinuousSize( const Mat& m1, const Mat& m2,
const Mat& m3, const Mat& m4,
const Mat& m5, int widthScale=1 )
{
return (m1.flags & m2.flags & m3.flags & m4.flags & m5.flags & Mat::CONTINUOUS_FLAG) != 0 ?
Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows);
}
struct NoVec
{
int operator()(const void*, const void*, void*, int) const { return 0; }
};
template<class Op, class VecOp> static void
binaryOpC1_( const Mat& srcmat1, const Mat& srcmat2, Mat& dstmat )
{
Op op; VecOp vecOp;
typedef typename Op::type1 T1;
typedef typename Op::type2 T2;
typedef typename Op::rtype DT;
const T1* src1 = (const T1*)srcmat1.data;
const T2* src2 = (const T2*)srcmat2.data;
DT* dst = (DT*)dstmat.data;
size_t step1 = srcmat1.step/sizeof(src1[0]);
size_t step2 = srcmat2.step/sizeof(src2[0]);
size_t step = dstmat.step/sizeof(dst[0]);
Size size = getContinuousSize( srcmat1, srcmat2, dstmat, dstmat.channels() );
if( size.width == 1 )
{
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
dst[0] = op( src1[0], src2[0] );
return;
}
for( ; size.height--; src1 += step1, src2 += step2, dst += step )
{
int x = vecOp(src1, src2, dst, size.width);
for( ; x <= size.width - 4; x += 4 )
{
DT f0, f1;
f0 = op( src1[x], src2[x] );
f1 = op( src1[x+1], src2[x+1] );
dst[x] = f0;
dst[x+1] = f1;
f0 = op(src1[x+2], src2[x+2]);
f1 = op(src1[x+3], src2[x+3]);
dst[x+2] = f0;
dst[x+3] = f1;
}
for( ; x < size.width; x++ )
dst[x] = op( src1[x], src2[x] );
}
}
typedef void (*BinaryFunc)(const Mat& src1, const Mat& src2, Mat& dst);
template<class Op> static void
binarySOpCn_( const Mat& srcmat, Mat& dstmat, const Scalar& _scalar )
{
Op op;
typedef typename Op::type1 T;
typedef typename Op::type2 WT;
typedef typename Op::rtype DT;
const T* src0 = (const T*)srcmat.data;
DT* dst0 = (DT*)dstmat.data;
size_t step1 = srcmat.step/sizeof(src0[0]);
size_t step = dstmat.step/sizeof(dst0[0]);
int cn = dstmat.channels();
Size size = getContinuousSize( srcmat, dstmat, cn );
WT scalar[12];
_scalar.convertTo(scalar, cn, 12);
for( ; size.height--; src0 += step1, dst0 += step )
{
int i, len = size.width;
const T* src = src0;
T* dst = dst0;
for( ; (len -= 12) >= 0; dst += 12, src += 12 )
{
DT t0 = op(src[0], scalar[0]);
DT t1 = op(src[1], scalar[1]);
dst[0] = t0; dst[1] = t1;
t0 = op(src[2], scalar[2]);
t1 = op(src[3], scalar[3]);
dst[2] = t0; dst[3] = t1;
t0 = op(src[4], scalar[4]);
t1 = op(src[5], scalar[5]);
dst[4] = t0; dst[5] = t1;
t0 = op(src[6], scalar[6]);
t1 = op(src[7], scalar[7]);
dst[6] = t0; dst[7] = t1;
t0 = op(src[8], scalar[8]);
t1 = op(src[9], scalar[9]);
dst[8] = t0; dst[9] = t1;
t0 = op(src[10], scalar[10]);
t1 = op(src[11], scalar[11]);
dst[10] = t0; dst[11] = t1;
}
for( (len) += 12, i = 0; i < (len); i++ )
dst[i] = op((WT)src[i], scalar[i]);
}
}
template<class Op> static void
binarySOpC1_( const Mat& srcmat, Mat& dstmat, double _scalar )
{
Op op;
typedef typename Op::type1 T;
typedef typename Op::type2 WT;
typedef typename Op::rtype DT;
WT scalar = saturate_cast<WT>(_scalar);
const T* src = (const T*)srcmat.data;
DT* dst = (DT*)dstmat.data;
size_t step1 = srcmat.step/sizeof(src[0]);
size_t step = dstmat.step/sizeof(dst[0]);
Size size = srcmat.size();
size.width *= srcmat.channels();
if( srcmat.isContinuous() && dstmat.isContinuous() )
{
size.width *= size.height;
size.height = 1;
}
for( ; size.height--; src += step1, dst += step )
{
int x;
for( x = 0; x <= size.width - 4; x += 4 )
{
DT f0 = op( src[x], scalar );
DT f1 = op( src[x+1], scalar );
dst[x] = f0;
dst[x+1] = f1;
f0 = op( src[x+2], scalar );
f1 = op( src[x+3], scalar );
dst[x+2] = f0;
dst[x+3] = f1;
}
for( ; x < size.width; x++ )
dst[x] = op( src[x], scalar );
}
}
typedef void (*BinarySFuncCn)(const Mat& src1, Mat& dst, const Scalar& scalar);
typedef void (*BinarySFuncC1)(const Mat& src1, Mat& dst, double scalar);
}
#endif /*_CXCORE_INTERNAL_H_*/

753
modules/core/src/rand.cpp Normal file
View File

@@ -0,0 +1,753 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/* ////////////////////////////////////////////////////////////////////
//
// Filling CvMat/IplImage instances with random numbers
//
// */
#include "precomp.hpp"
namespace cv
{
///////////////////////////// Functions Declaration //////////////////////////////////////
/*
Multiply-with-carry generator is used here:
temp = ( A*X(n) + carry )
X(n+1) = temp mod (2^32)
carry = temp / (2^32)
*/
#define RNG_NEXT(x) ((uint64)(unsigned)(x)*RNG::A + ((x) >> 32))
/***************************************************************************************\
* Pseudo-Random Number Generators (PRNGs) *
\***************************************************************************************/
template<typename T> static void
RandBits_( Mat& _arr, uint64* state, const void* _param )
{
uint64 temp = *state;
const int* param = (const int*)_param;
int small_flag = (param[12]|param[13]|param[14]|param[15]) <= 255;
Size size = getContinuousSize(_arr,_arr.channels());
for( int y = 0; y < size.height; y++ )
{
T* arr = (T*)(_arr.data + _arr.step*y);
int i, k = 3;
const int* p = param;
if( !small_flag )
{
for( i = 0; i <= size.width - 4; i += 4 )
{
int t0, t1;
temp = RNG_NEXT(temp);
t0 = ((int)temp & p[i + 12]) + p[i];
temp = RNG_NEXT(temp);
t1 = ((int)temp & p[i + 13]) + p[i+1];
arr[i] = saturate_cast<T>(t0);
arr[i+1] = saturate_cast<T>(t1);
temp = RNG_NEXT(temp);
t0 = ((int)temp & p[i + 14]) + p[i+2];
temp = RNG_NEXT(temp);
t1 = ((int)temp & p[i + 15]) + p[i+3];
arr[i+2] = saturate_cast<T>(t0);
arr[i+3] = saturate_cast<T>(t1);
if( !--k )
{
k = 3;
p -= 12;
}
}
}
else
{
for( i = 0; i <= size.width - 4; i += 4 )
{
int t0, t1, t;
temp = RNG_NEXT(temp);
t = (int)temp;
t0 = (t & p[i + 12]) + p[i];
t1 = ((t >> 8) & p[i + 13]) + p[i+1];
arr[i] = saturate_cast<T>(t0);
arr[i+1] = saturate_cast<T>(t1);
t0 = ((t >> 16) & p[i + 14]) + p[i + 2];
t1 = ((t >> 24) & p[i + 15]) + p[i + 3];
arr[i+2] = saturate_cast<T>(t0);
arr[i+3] = saturate_cast<T>(t1);
if( !--k )
{
k = 3;
p -= 12;
}
}
}
for( ; i < size.width; i++ )
{
int t0;
temp = RNG_NEXT(temp);
t0 = ((int)temp & p[i + 12]) + p[i];
arr[i] = saturate_cast<T>(t0);
}
}
*state = temp;
}
struct DivStruct
{
unsigned d;
unsigned M;
int sh1, sh2;
int delta;
};
template<typename T> static void
Randi_( Mat& _arr, uint64* state, const void* _param )
{
uint64 temp = *state;
const int* param = (const int*)_param;
Size size = getContinuousSize(_arr,_arr.channels());
int i, k, cn = _arr.channels();
DivStruct ds[12];
for( k = 0; k < cn; k++ )
{
ds[k].delta = param[k];
ds[k].d = (unsigned)(param[k+12] - param[k]);
int l = 0;
while(((uint64)1 << l) < ds[k].d)
l++;
ds[k].M = (unsigned)(((uint64)1 << 32)*(((uint64)1 << l) - ds[k].d)/ds[k].d) + 1;
ds[k].sh1 = min(l, 1);
ds[k].sh2 = max(l - 1, 0);
}
for( ; k < 12; k++ )
ds[k] = ds[k - cn];
for( int y = 0; y < size.height; y++ )
{
T* arr = (T*)(_arr.data + _arr.step*y);
const DivStruct* p = ds;
unsigned t0, t1, v0, v1;
for( i = 0, k = 3; i <= size.width - 4; i += 4 )
{
temp = RNG_NEXT(temp);
t0 = (unsigned)temp;
temp = RNG_NEXT(temp);
t1 = (unsigned)temp;
v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32);
v1 = (unsigned)(((uint64)t1 * p[i+1].M) >> 32);
v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2;
v1 = (v1 + ((t1 - v1) >> p[i+1].sh1)) >> p[i+1].sh2;
v0 = t0 - v0*p[i].d + p[i].delta;
v1 = t1 - v1*p[i+1].d + p[i+1].delta;
arr[i] = saturate_cast<T>((int)v0);
arr[i+1] = saturate_cast<T>((int)v1);
temp = RNG_NEXT(temp);
t0 = (unsigned)temp;
temp = RNG_NEXT(temp);
t1 = (unsigned)temp;
v0 = (unsigned)(((uint64)t0 * p[i+2].M) >> 32);
v1 = (unsigned)(((uint64)t1 * p[i+3].M) >> 32);
v0 = (v0 + ((t0 - v0) >> p[i+2].sh1)) >> p[i+2].sh2;
v1 = (v1 + ((t1 - v1) >> p[i+3].sh1)) >> p[i+3].sh2;
v0 = t0 - v0*p[i+2].d + p[i+2].delta;
v1 = t1 - v1*p[i+3].d + p[i+3].delta;
arr[i+2] = saturate_cast<T>((int)v0);
arr[i+3] = saturate_cast<T>((int)v1);
if( !--k )
{
k = 3;
p -= 12;
}
}
for( ; i < size.width; i++ )
{
temp = RNG_NEXT(temp);
t0 = (unsigned)temp;
v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32);
v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2;
v0 = t0 - v0*p[i].d + p[i].delta;
arr[i] = saturate_cast<T>((int)v0);
}
}
*state = temp;
}
static void Randf_( Mat& _arr, uint64* state, const void* _param )
{
uint64 temp = *state;
const float* param = (const float*)_param;
Size size = getContinuousSize(_arr,_arr.channels());
for( int y = 0; y < size.height; y++ )
{
float* arr = (float*)(_arr.data + _arr.step*y);
int i, k = 3;
const float* p = param;
for( i = 0; i <= size.width - 4; i += 4 )
{
float f0, f1;
temp = RNG_NEXT(temp);
f0 = (int)temp*p[i+12] + p[i];
temp = RNG_NEXT(temp);
f1 = (int)temp*p[i+13] + p[i+1];
arr[i] = f0; arr[i+1] = f1;
temp = RNG_NEXT(temp);
f0 = (int)temp*p[i+14] + p[i+2];
temp = RNG_NEXT(temp);
f1 = (int)temp*p[i+15] + p[i+3];
arr[i+2] = f0; arr[i+3] = f1;
if( !--k )
{
k = 3;
p -= 12;
}
}
for( ; i < size.width; i++ )
{
temp = RNG_NEXT(temp);
arr[i] = (int)temp*p[i+12] + p[i];
}
}
*state = temp;
}
static void
Randd_( Mat& _arr, uint64* state, const void* _param )
{
uint64 temp = *state;
const double* param = (const double*)_param;
Size size = getContinuousSize(_arr,_arr.channels());
int64 v = 0;
for( int y = 0; y < size.height; y++ )
{
double* arr = (double*)(_arr.data + _arr.step*y);
int i, k = 3;
const double* p = param;
for( i = 0; i <= size.width - 4; i += 4 )
{
double f0, f1;
temp = RNG_NEXT(temp);
v = (temp >> 32)|(temp << 32);
f0 = v*p[i+12] + p[i];
temp = RNG_NEXT(temp);
v = (temp >> 32)|(temp << 32);
f1 = v*p[i+13] + p[i+1];
arr[i] = f0; arr[i+1] = f1;
temp = RNG_NEXT(temp);
v = (temp >> 32)|(temp << 32);
f0 = v*p[i+14] + p[i+2];
temp = RNG_NEXT(temp);
v = (temp >> 32)|(temp << 32);
f1 = v*p[i+15] + p[i+3];
arr[i+2] = f0; arr[i+3] = f1;
if( !--k )
{
k = 3;
p -= 12;
}
}
for( ; i < size.width; i++ )
{
temp = RNG_NEXT(temp);
v = (temp >> 32)|(temp << 32);
arr[i] = v*p[i+12] + p[i];
}
}
*state = temp;
}
/*
The code below implements the algorithm described in
"The Ziggurat Method for Generating Random Variables"
by Marsaglia and Tsang, Journal of Statistical Software.
*/
static void
Randn_0_1_32f_C1R( float* arr, int len, uint64* state )
{
const float r = 3.442620f; // The start of the right tail
const float rng_flt = 2.3283064365386962890625e-10f; // 2^-32
static unsigned kn[128];
static float wn[128], fn[128];
uint64 temp = *state;
static bool initialized=false;
int i;
if( !initialized )
{
const double m1 = 2147483648.0;
double dn = 3.442619855899, tn = dn, vn = 9.91256303526217e-3;
// Set up the tables
double q = vn/std::exp(-.5*dn*dn);
kn[0] = (unsigned)((dn/q)*m1);
kn[1] = 0;
wn[0] = (float)(q/m1);
wn[127] = (float)(dn/m1);
fn[0] = 1.f;
fn[127] = (float)std::exp(-.5*dn*dn);
for(i=126;i>=1;i--)
{
dn = std::sqrt(-2.*std::log(vn/dn+std::exp(-.5*dn*dn)));
kn[i+1] = (unsigned)((dn/tn)*m1);
tn = dn;
fn[i] = (float)std::exp(-.5*dn*dn);
wn[i] = (float)(dn/m1);
}
initialized = true;
}
for( i = 0; i < len; i++ )
{
float x, y;
for(;;)
{
int hz = (int)temp;
temp = RNG_NEXT(temp);
int iz = hz & 127;
x = hz*wn[iz];
if( (unsigned)std::abs(hz) < kn[iz] )
break;
if( iz == 0) // iz==0, handles the base strip
{
do
{
x = (unsigned)temp*rng_flt;
temp = RNG_NEXT(temp);
y = (unsigned)temp*rng_flt;
temp = RNG_NEXT(temp);
x = (float)(-std::log(x+FLT_MIN)*0.2904764);
y = (float)-std::log(y+FLT_MIN);
} // .2904764 is 1/r
while( y + y < x*x );
x = hz > 0 ? r + x : -r - x;
break;
}
// iz > 0, handle the wedges of other strips
y = (unsigned)temp*rng_flt;
temp = RNG_NEXT(temp);
if( fn[iz] + y*(fn[iz - 1] - fn[iz]) < std::exp(-.5*x*x) )
break;
}
arr[i] = x;
}
*state = temp;
}
double RNG::gaussian(double sigma)
{
float temp;
Randn_0_1_32f_C1R( &temp, 1, &state );
return temp*sigma;
}
template<typename T, typename PT> static void
Randn_( Mat& _arr, uint64* state, const void* _param )
{
const int RAND_BUF_SIZE = 96;
float buffer[RAND_BUF_SIZE];
const PT* param = (const PT*)_param;
Size size = getContinuousSize(_arr, _arr.channels());
for( int y = 0; y < size.height; y++ )
{
T* arr = (T*)(_arr.data + _arr.step*y);
int i, j, len = RAND_BUF_SIZE;
for( i = 0; i < size.width; i += RAND_BUF_SIZE )
{
int k = 3;
const PT* p = param;
if( i + len > size.width )
len = size.width - i;
Randn_0_1_32f_C1R( buffer, len, state );
for( j = 0; j <= len - 4; j += 4 )
{
PT f0, f1;
f0 = buffer[j]*p[j+12] + p[j];
f1 = buffer[j+1]*p[j+13] + p[j+1];
arr[i+j] = saturate_cast<T>(f0);
arr[i+j+1] = saturate_cast<T>(f1);
f0 = buffer[j+2]*p[j+14] + p[j+2];
f1 = buffer[j+3]*p[j+15] + p[j+3];
arr[i+j+2] = saturate_cast<T>(f0);
arr[i+j+3] = saturate_cast<T>(f1);
if( --k == 0 )
{
k = 3;
p -= 12;
}
}
for( ; j < len; j++ )
arr[i+j] = saturate_cast<T>(buffer[j]*p[j+12] + p[j]);
}
}
}
typedef void (*RandFunc)(Mat& dst, uint64* state, const void* param);
void RNG::fill( Mat& mat, int disttype, const Scalar& param1, const Scalar& param2 )
{
static RandFunc rngtab[3][8] =
{
{
RandBits_<uchar>,
RandBits_<schar>,
RandBits_<ushort>,
RandBits_<short>,
RandBits_<int>, 0, 0, 0},
{Randi_<uchar>,
Randi_<schar>,
Randi_<ushort>,
Randi_<short>,
Randi_<int>,
Randf_, Randd_, 0},
{Randn_<uchar,float>,
Randn_<schar,float>,
Randn_<ushort,float>,
Randn_<short,float>,
Randn_<int,float>,
Randn_<float,float>,
Randn_<double,double>, 0}
};
int depth = mat.depth(), channels = mat.channels();
double dparam[2][12];
float fparam[2][12];
int iparam[2][12];
void* param = dparam;
int i, fast_int_mode = 0;
RandFunc func = 0;
CV_Assert( channels <= 4 );
if( disttype == UNIFORM )
{
if( depth <= CV_32S )
{
for( i = 0, fast_int_mode = 1; i < channels; i++ )
{
double a = min(param1.val[i], param2.val[i]);
double b = max(param1.val[i], param2.val[i]);
int t0 = iparam[0][i] = cvCeil(a);
int t1 = iparam[1][i] = cvFloor(b);
double diff = b - a;
fast_int_mode &= diff <= 4294967296. && ((t1-t0) & (t1-t0-1)) == 0;
}
if( fast_int_mode )
{
for( i = 0; i < channels; i++ )
iparam[1][i] = iparam[1][i] > iparam[0][i] ? iparam[1][i] - iparam[0][i] - 1 : 0;
}
for( ; i < 12; i++ )
{
int t0 = iparam[0][i - channels];
int t1 = iparam[1][i - channels];
iparam[0][i] = t0;
iparam[1][i] = t1;
}
func = rngtab[!fast_int_mode][depth];
param = iparam;
}
else
{
double scale = depth == CV_64F ?
5.4210108624275221700372640043497e-20 : // 2**-64
2.3283064365386962890625e-10; // 2**-32
// for each channel i compute such dparam[0][i] & dparam[1][i],
// so that a signed 32/64-bit integer X is transformed to
// the range [param1.val[i], param2.val[i]) using
// dparam[1][i]*X + dparam[0][i]
for( i = 0; i < channels; i++ )
{
double t0 = param1.val[i];
double t1 = param2.val[i];
dparam[0][i] = (t1 + t0)*0.5;
dparam[1][i] = (t1 - t0)*scale;
}
func = rngtab[1][depth];
param = dparam;
}
}
else if( disttype == CV_RAND_NORMAL )
{
for( i = 0; i < channels; i++ )
{
double t0 = param1.val[i];
double t1 = param2.val[i];
dparam[0][i] = t0;
dparam[1][i] = t1;
}
func = rngtab[2][depth];
param = dparam;
}
else
CV_Error( CV_StsBadArg, "Unknown distribution type" );
if( param == dparam )
{
for( i = channels; i < 12; i++ )
{
double t0 = dparam[0][i - channels];
double t1 = dparam[1][i - channels];
dparam[0][i] = t0;
dparam[1][i] = t1;
}
if( depth != CV_64F )
{
for( i = 0; i < 12; i++ )
{
fparam[0][i] = (float)dparam[0][i];
fparam[1][i] = (float)dparam[1][i];
}
param = fparam;
}
}
CV_Assert( func != 0);
func( mat, &state, param );
}
void RNG::fill( MatND& mat, int disttype, const Scalar& param1, const Scalar& param2 )
{
NAryMatNDIterator it(mat);
for( int i = 0; i < it.nplanes; i++, ++it )
fill( it.planes[0], disttype, param1, param2 );
}
#ifdef WIN32
#ifdef WINCE
# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif
static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES;
void deleteThreadRNGData()
{
if( tlsRNGKey != TLS_OUT_OF_INDEXES )
delete (RNG*)TlsGetValue( tlsRNGKey );
}
RNG& theRNG()
{
if( tlsRNGKey == TLS_OUT_OF_INDEXES )
{
tlsRNGKey = TlsAlloc();
CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
}
RNG* rng = (RNG*)TlsGetValue( tlsRNGKey );
if( !rng )
{
rng = new RNG;
TlsSetValue( tlsRNGKey, rng );
}
return *rng;
}
#else
static pthread_key_t tlsRNGKey = 0;
static void deleteRNG(void* data)
{
delete (RNG*)data;
}
RNG& theRNG()
{
if( !tlsRNGKey )
{
int errcode = pthread_key_create(&tlsRNGKey, deleteRNG);
CV_Assert(errcode == 0);
}
RNG* rng = (RNG*)pthread_getspecific(tlsRNGKey);
if( !rng )
{
rng = new RNG;
pthread_setspecific(tlsRNGKey, rng);
}
return *rng;
}
#endif
template<typename T> static void
randShuffle_( Mat& _arr, RNG& rng, double iterFactor )
{
int sz = _arr.rows*_arr.cols, iters = cvRound(iterFactor*sz);
if( _arr.isContinuous() )
{
T* arr = (T*)_arr.data;
for( int i = 0; i < iters; i++ )
{
int j = (unsigned)rng % sz, k = (unsigned)rng % sz;
std::swap( arr[j], arr[k] );
}
}
else
{
uchar* data = _arr.data;
size_t step = _arr.step;
int cols = _arr.cols;
for( int i = 0; i < iters; i++ )
{
int j1 = (unsigned)rng % sz, k1 = (unsigned)rng % sz;
int j0 = j1/cols, k0 = k1/cols;
j1 -= j0*cols; k1 -= k0*cols;
std::swap( ((T*)(data + step*j0))[j1], ((T*)(data + step*k0))[k1] );
}
}
}
typedef void (*RandShuffleFunc)( Mat& dst, RNG& rng, double iterFactor );
void randShuffle( Mat& dst, double iterFactor, RNG* _rng )
{
RandShuffleFunc tab[] =
{
0,
randShuffle_<uchar>, // 1
randShuffle_<ushort>, // 2
randShuffle_<Vec<uchar,3> >, // 3
randShuffle_<int>, // 4
0,
randShuffle_<Vec<ushort,3> >, // 6
0,
randShuffle_<int64>, // 8
0, 0, 0,
randShuffle_<Vec<int,3> >, // 12
0, 0, 0,
randShuffle_<Vec<int64,2> >, // 16
0, 0, 0, 0, 0, 0, 0,
randShuffle_<Vec<int64,3> >, // 24
0, 0, 0, 0, 0, 0, 0,
randShuffle_<Vec<int64,4> > // 32
};
RNG& rng = _rng ? *_rng : theRNG();
CV_Assert( dst.elemSize() <= 32 );
RandShuffleFunc func = tab[dst.elemSize()];
CV_Assert( func != 0 );
func( dst, rng, iterFactor );
}
}
CV_IMPL void
cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 )
{
cv::Mat mat = cv::cvarrToMat(arr);
// !!! this will only work for current 64-bit MWC RNG !!!
cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
rng.fill(mat, disttype == CV_RAND_NORMAL ?
cv::RNG::NORMAL : cv::RNG::UNIFORM, param1, param2 );
}
CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor )
{
cv::Mat dst = cv::cvarrToMat(arr);
cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
cv::randShuffle( dst, iter_factor, &rng );
}
/* End of file. */

1342
modules/core/src/stat.cpp Normal file

File diff suppressed because it is too large Load Diff

720
modules/core/src/system.cpp Normal file
View File

@@ -0,0 +1,720 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if defined WIN32 || defined WIN64 || defined _WIN64 || defined WINCE
#include <tchar.h>
#if defined _MSC_VER
#if _MSC_VER >= 1400
#include <intrin.h>
#elif defined _M_IX86
static void __cpuid(int* cpuid_data, int)
{
__asm
{
push ebx
push edi
mov edi, cpuid_data
mov eax, 1
cpuid
mov [edi], eax
mov [edi + 4], ebx
mov [edi + 8], ecx
mov [edi + 12], edx
pop edi
pop ebx
}
}
#endif
#endif
#else
#include <pthread.h>
#include <sys/time.h>
#include <time.h>
#ifdef __MACH__
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
#endif
#ifdef _OPENMP
#include "omp.h"
#endif
#include <stdarg.h>
namespace cv
{
struct HWFeatures
{
enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE };
HWFeatures()
{
memset( have, 0, sizeof(have) );
x86_family = 0;
}
static HWFeatures initialize()
{
HWFeatures f;
int cpuid_data[4]={0,0,0,0};
#if defined _MSC_VER && (defined _M_IX86 || defined _M_X64)
__cpuid(cpuid_data, 1);
#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
#ifdef __x86_64__
asm __volatile__
(
"movl $1, %%eax\n\t"
"cpuid\n\t"
:[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3])
:
: "cc"
);
#else
asm volatile
(
"pushl %%ebx\n\t"
"movl $1,%%eax\n\t"
"cpuid\n\t"
"popl %%ebx\n\t"
: "=a"(cpuid_data[0]), "=c"(cpuid_data[2]), "=d"(cpuid_data[3])
:
: "cc"
);
#endif
#endif
f.x86_family = (cpuid_data[0] >> 8) & 15;
if( f.x86_family >= 6 )
{
f.have[CV_CPU_MMX] = (cpuid_data[3] & (1 << 23)) != 0;
f.have[CV_CPU_SSE] = (cpuid_data[3] & (1<<25)) != 0;
f.have[CV_CPU_SSE2] = (cpuid_data[3] & (1<<26)) != 0;
f.have[CV_CPU_SSE3] = (cpuid_data[2] & (1<<0)) != 0;
f.have[CV_CPU_SSSE3] = (cpuid_data[2] & (1<<9)) != 0;
f.have[CV_CPU_SSE4_1] = (cpuid_data[2] & (1<<19)) != 0;
f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0;
f.have[CV_CPU_AVX] = (cpuid_data[2] & (1<<28)) != 0;
}
return f;
}
int x86_family;
bool have[MAX_FEATURE+1];
};
static HWFeatures featuresEnabled = HWFeatures::initialize(), featuresDisabled = HWFeatures();
static HWFeatures* currentFeatures = &featuresEnabled;
bool checkHardwareSupport(int feature)
{
CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE );
return currentFeatures->have[feature];
}
#ifdef HAVE_IPP
volatile bool useOptimizedFlag = true;
struct IPPInitializer
{
IPPInitializer() { ippStaticInit(); }
};
IPPInitializer ippInitializer;
#else
volatile bool useOptimizedFlag = false;
#endif
void setUseOptimized( bool flag )
{
useOptimizedFlag = flag;
currentFeatures = flag ? &featuresEnabled : &featuresDisabled;
}
bool useOptimized()
{
return useOptimizedFlag;
}
int64 getTickCount()
{
#if defined WIN32 || defined WIN64 || defined _WIN64 || defined WINCE
LARGE_INTEGER counter;
QueryPerformanceCounter( &counter );
return (int64)counter.QuadPart;
#elif defined __linux || defined __linux__
struct timespec tp;
clock_gettime(CLOCK_MONOTONIC, &tp);
return (int64)tp.tv_sec*1000000000 + tp.tv_nsec;
#elif defined __MACH__
return (int64)mach_absolute_time();
#else
struct timeval tv;
struct timezone tz;
gettimeofday( &tv, &tz );
return (int64)tv.tv_sec*1000000 + tv.tv_usec;
#endif
}
double getTickFrequency()
{
#if defined WIN32 || defined WIN64 || defined _WIN64 || defined WINCE
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
return (double)freq.QuadPart;
#elif defined __linux || defined __linux__
return 1e9;
#elif defined __MACH__
static double freq = 0;
if( freq == 0 )
{
mach_timebase_info_data_t sTimebaseInfo;
mach_timebase_info(&sTimebaseInfo);
freq = sTimebaseInfo.denom*1e9/sTimebaseInfo.numer;
}
return freq;
#else
return 1e6;
#endif
}
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__ || defined __ppc__)
#if defined(__i386__)
int64 getCPUTickCount(void)
{
int64 x;
__asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
return x;
}
#elif defined(__x86_64__)
int64 getCPUTickCount(void)
{
unsigned hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return (int64)lo | ((int64)hi << 32);
}
#elif defined(__ppc__)
int64 getCPUTickCount(void)
{
int64 result=0;
unsigned upper, lower, tmp;
__asm__ volatile(
"0: \n"
"\tmftbu %0 \n"
"\tmftb %1 \n"
"\tmftbu %2 \n"
"\tcmpw %2,%0 \n"
"\tbne 0b \n"
: "=r"(upper),"=r"(lower),"=r"(tmp)
);
return lower | ((int64)upper << 32);
}
#else
#error "RDTSC not defined"
#endif
#elif defined _MSC_VER && defined WIN32 && !defined _WIN64
int64 getCPUTickCount(void)
{
__asm _emit 0x0f;
__asm _emit 0x31;
}
#else
int64 getCPUTickCount()
{
return getTickCount();
}
#endif
static int numThreads = 0;
static int numProcs = 0;
int getNumThreads(void)
{
if( !numProcs )
setNumThreads(0);
return numThreads;
}
void setNumThreads( int
#ifdef _OPENMP
threads
#endif
)
{
if( !numProcs )
{
#ifdef _OPENMP
numProcs = omp_get_num_procs();
#else
numProcs = 1;
#endif
}
#ifdef _OPENMP
if( threads <= 0 )
threads = numProcs;
else
threads = MIN( threads, numProcs );
numThreads = threads;
#else
numThreads = 1;
#endif
}
int getThreadNum(void)
{
#ifdef _OPENMP
return omp_get_thread_num();
#else
return 0;
#endif
}
string format( const char* fmt, ... )
{
char buf[1 << 16];
va_list args;
va_start( args, fmt );
vsprintf( buf, fmt, args );
return string(buf);
}
static CvErrorCallback customErrorCallback = 0;
static void* customErrorCallbackData = 0;
static bool breakOnError = false;
bool setBreakOnError(bool value)
{
bool prevVal = breakOnError;
breakOnError = value;
return prevVal;
}
void error( const Exception& exc )
{
if (customErrorCallback != 0)
customErrorCallback(exc.code, exc.func.c_str(), exc.err.c_str(),
exc.file.c_str(), exc.line, customErrorCallbackData);
else
{
const char* errorStr = cvErrorStr(exc.code);
char buf[1 << 16];
sprintf( buf, "OpenCV Error: %s (%s) in %s, file %s, line %d",
errorStr, exc.err.c_str(), exc.func.size() > 0 ?
exc.func.c_str() : "unknown function", exc.file.c_str(), exc.line );
fprintf( stderr, "%s\n", buf );
fflush( stderr );
}
if(breakOnError)
{
static volatile int* p = 0;
*p = 0;
}
throw exc;
}
CvErrorCallback
redirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata)
{
if( prevUserdata )
*prevUserdata = customErrorCallbackData;
CvErrorCallback prevCallback = customErrorCallback;
customErrorCallback = errCallback;
customErrorCallbackData = userdata;
return prevCallback;
}
}
/*CV_IMPL int
cvGuiBoxReport( int code, const char *func_name, const char *err_msg,
const char *file, int line, void* )
{
#if (!defined WIN32 && !defined WIN64) || defined WINCE
return cvStdErrReport( code, func_name, err_msg, file, line, 0 );
#else
if( code != CV_StsBackTrace && code != CV_StsAutoTrace )
{
size_t msg_len = strlen(err_msg ? err_msg : "") + 1024;
char* message = (char*)alloca(msg_len);
char title[100];
wsprintf( message, "%s (%s)\nin function %s, %s(%d)\n\n"
"Press \"Abort\" to terminate application.\n"
"Press \"Retry\" to debug (if the app is running under debugger).\n"
"Press \"Ignore\" to continue (this is not safe).\n",
cvErrorStr(code), err_msg ? err_msg : "no description",
func_name, file, line );
wsprintf( title, "OpenCV GUI Error Handler" );
int answer = MessageBox( NULL, message, title, MB_ICONERROR|MB_ABORTRETRYIGNORE|MB_SYSTEMMODAL );
if( answer == IDRETRY )
{
CV_DBG_BREAK();
}
return answer != IDIGNORE;
}
return 0;
#endif
}*/
CV_IMPL int cvCheckHardwareSupport(int feature)
{
CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE );
return cv::currentFeatures->have[feature];
}
CV_IMPL int cvUseOptimized( int flag )
{
int prevMode = cv::useOptimizedFlag;
cv::setUseOptimized( flag != 0 );
return prevMode;
}
CV_IMPL int64 cvGetTickCount(void)
{
return cv::getTickCount();
}
CV_IMPL double cvGetTickFrequency(void)
{
return cv::getTickFrequency()*1e-6;
}
CV_IMPL void cvSetNumThreads(int nt)
{
cv::setNumThreads(nt);
}
CV_IMPL int cvGetNumThreads()
{
return cv::getNumThreads();
}
CV_IMPL int cvGetThreadNum()
{
return cv::getThreadNum();
}
CV_IMPL CvErrorCallback
cvRedirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata)
{
return cv::redirectError(errCallback, userdata, prevUserdata);
}
CV_IMPL int cvNulDevReport( int, const char*, const char*,
const char*, int, void* )
{
return 0;
}
CV_IMPL int cvStdErrReport( int, const char*, const char*,
const char*, int, void* )
{
return 0;
}
CV_IMPL int cvGuiBoxReport( int, const char*, const char*,
const char*, int, void* )
{
return 0;
}
CV_IMPL int cvGetErrInfo( const char**, const char**, const char**, int* )
{
return 0;
}
CV_IMPL const char* cvErrorStr( int status )
{
static char buf[256];
switch (status)
{
case CV_StsOk : return "No Error";
case CV_StsBackTrace : return "Backtrace";
case CV_StsError : return "Unspecified error";
case CV_StsInternal : return "Internal error";
case CV_StsNoMem : return "Insufficient memory";
case CV_StsBadArg : return "Bad argument";
case CV_StsNoConv : return "Iterations do not converge";
case CV_StsAutoTrace : return "Autotrace call";
case CV_StsBadSize : return "Incorrect size of input array";
case CV_StsNullPtr : return "Null pointer";
case CV_StsDivByZero : return "Division by zero occured";
case CV_BadStep : return "Image step is wrong";
case CV_StsInplaceNotSupported : return "Inplace operation is not supported";
case CV_StsObjectNotFound : return "Requested object was not found";
case CV_BadDepth : return "Input image depth is not supported by function";
case CV_StsUnmatchedFormats : return "Formats of input arguments do not match";
case CV_StsUnmatchedSizes : return "Sizes of input arguments do not match";
case CV_StsOutOfRange : return "One of arguments\' values is out of range";
case CV_StsUnsupportedFormat : return "Unsupported format or combination of formats";
case CV_BadCOI : return "Input COI is not supported";
case CV_BadNumChannels : return "Bad number of channels";
case CV_StsBadFlag : return "Bad flag (parameter or structure field)";
case CV_StsBadPoint : return "Bad parameter of type CvPoint";
case CV_StsBadMask : return "Bad type of mask argument";
case CV_StsParseError : return "Parsing error";
case CV_StsNotImplemented : return "The function/feature is not implemented";
case CV_StsBadMemBlock : return "Memory block has been corrupted";
case CV_StsAssert : return "Assertion failed";
};
sprintf(buf, "Unknown %s code %d", status >= 0 ? "status":"error", status);
return buf;
}
CV_IMPL int cvGetErrMode(void)
{
return 0;
}
CV_IMPL int cvSetErrMode(int)
{
return 0;
}
CV_IMPL int cvGetErrStatus()
{
return 0;
}
CV_IMPL void cvSetErrStatus(int)
{
}
CV_IMPL void cvError( int code, const char* func_name,
const char* err_msg,
const char* file_name, int line )
{
cv::error(cv::Exception(code, err_msg, func_name, file_name, line));
}
/* function, which converts int to int */
CV_IMPL int
cvErrorFromIppStatus( int status )
{
switch (status)
{
case CV_BADSIZE_ERR: return CV_StsBadSize;
case CV_BADMEMBLOCK_ERR: return CV_StsBadMemBlock;
case CV_NULLPTR_ERR: return CV_StsNullPtr;
case CV_DIV_BY_ZERO_ERR: return CV_StsDivByZero;
case CV_BADSTEP_ERR: return CV_BadStep ;
case CV_OUTOFMEM_ERR: return CV_StsNoMem;
case CV_BADARG_ERR: return CV_StsBadArg;
case CV_NOTDEFINED_ERR: return CV_StsError;
case CV_INPLACE_NOT_SUPPORTED_ERR: return CV_StsInplaceNotSupported;
case CV_NOTFOUND_ERR: return CV_StsObjectNotFound;
case CV_BADCONVERGENCE_ERR: return CV_StsNoConv;
case CV_BADDEPTH_ERR: return CV_BadDepth;
case CV_UNMATCHED_FORMATS_ERR: return CV_StsUnmatchedFormats;
case CV_UNSUPPORTED_COI_ERR: return CV_BadCOI;
case CV_UNSUPPORTED_CHANNELS_ERR: return CV_BadNumChannels;
case CV_BADFLAG_ERR: return CV_StsBadFlag;
case CV_BADRANGE_ERR: return CV_StsBadArg;
case CV_BADCOEF_ERR: return CV_StsBadArg;
case CV_BADFACTOR_ERR: return CV_StsBadArg;
case CV_BADPOINT_ERR: return CV_StsBadPoint;
default: return CV_StsError;
}
}
static CvModuleInfo cxcore_info = { 0, "cxcore", CV_VERSION, 0 };
CvModuleInfo *CvModule::first = 0, *CvModule::last = 0;
CvModule::CvModule( CvModuleInfo* _info )
{
cvRegisterModule( _info );
info = last;
}
CvModule::~CvModule()
{
if( info )
{
CvModuleInfo* p = first;
for( ; p != 0 && p->next != info; p = p->next )
;
if( p )
p->next = info->next;
if( first == info )
first = info->next;
if( last == info )
last = p;
free( info );
info = 0;
}
}
CV_IMPL int
cvRegisterModule( const CvModuleInfo* module )
{
CV_Assert( module != 0 && module->name != 0 && module->version != 0 );
size_t name_len = strlen(module->name);
size_t version_len = strlen(module->version);
CvModuleInfo* module_copy = (CvModuleInfo*)malloc( sizeof(*module_copy) +
name_len + 1 + version_len + 1 );
*module_copy = *module;
module_copy->name = (char*)(module_copy + 1);
module_copy->version = (char*)(module_copy + 1) + name_len + 1;
memcpy( (void*)module_copy->name, module->name, name_len + 1 );
memcpy( (void*)module_copy->version, module->version, version_len + 1 );
module_copy->next = 0;
if( CvModule::first == 0 )
CvModule::first = module_copy;
else
CvModule::last->next = module_copy;
CvModule::last = module_copy;
return 0;
}
CvModule cxcore_module( &cxcore_info );
CV_IMPL void
cvGetModuleInfo( const char* name, const char **version, const char **plugin_list )
{
static char joint_verinfo[1024] = "";
static char plugin_list_buf[1024] = "";
if( version )
*version = 0;
if( plugin_list )
*plugin_list = 0;
CvModuleInfo* module;
if( version )
{
if( name )
{
size_t i, name_len = strlen(name);
for( module = CvModule::first; module != 0; module = module->next )
{
if( strlen(module->name) == name_len )
{
for( i = 0; i < name_len; i++ )
{
int c0 = toupper(module->name[i]), c1 = toupper(name[i]);
if( c0 != c1 )
break;
}
if( i == name_len )
break;
}
}
if( !module )
CV_Error( CV_StsObjectNotFound, "The module is not found" );
*version = module->version;
}
else
{
char* ptr = joint_verinfo;
for( module = CvModule::first; module != 0; module = module->next )
{
sprintf( ptr, "%s: %s%s", module->name, module->version, module->next ? ", " : "" );
ptr += strlen(ptr);
}
*version = joint_verinfo;
}
}
if( plugin_list )
*plugin_list = plugin_list_buf;
}
#if defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE
BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID )
{
if( fdwReason == DLL_THREAD_DETACH || fdwReason == DLL_PROCESS_DETACH )
{
cv::deleteThreadAllocData();
cv::deleteThreadRNGData();
}
return TRUE;
}
#endif
/* End of file. */

3512
modules/core/src/tables.cpp Normal file

File diff suppressed because it is too large Load Diff