Merge branch 2.4

This commit is contained in:
Andrey Kamaev 2013-02-04 17:15:55 +04:00
commit c527340cb6
33 changed files with 448 additions and 318 deletions

View File

@ -360,7 +360,7 @@ CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_id
if (is_buf_16u) if (is_buf_16u)
{ {
unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols + unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
vi*sample_count + data_root->offset); vi*sample_count + data_root->offset);
for( int i = 0; i < num_valid; i++ ) for( int i = 0; i < num_valid; i++ )
{ {
@ -373,7 +373,7 @@ CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_id
} }
else else
{ {
int* idst_idx = buf->data.i + root->buf_idx*buf->cols + int* idst_idx = buf->data.i + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset; vi*sample_count + root->offset;
for( int i = 0; i < num_valid; i++ ) for( int i = 0; i < num_valid; i++ )
{ {
@ -390,14 +390,14 @@ CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_id
const int* src_lbls = get_cv_labels(data_root, (int*)(uchar*)inn_buf); const int* src_lbls = get_cv_labels(data_root, (int*)(uchar*)inn_buf);
if (is_buf_16u) if (is_buf_16u)
{ {
unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols + unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
(workVarCount-1)*sample_count + root->offset); (workVarCount-1)*sample_count + root->offset);
for( int i = 0; i < count; i++ ) for( int i = 0; i < count; i++ )
udst[i] = (unsigned short)src_lbls[sidx[i]]; udst[i] = (unsigned short)src_lbls[sidx[i]];
} }
else else
{ {
int* idst = buf->data.i + root->buf_idx*buf->cols + int* idst = buf->data.i + root->buf_idx*get_length_subbuf() +
(workVarCount-1)*sample_count + root->offset; (workVarCount-1)*sample_count + root->offset;
for( int i = 0; i < count; i++ ) for( int i = 0; i < count; i++ )
idst[i] = src_lbls[sidx[i]]; idst[i] = src_lbls[sidx[i]];
@ -407,14 +407,14 @@ CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_id
const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf); const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf);
if (is_buf_16u) if (is_buf_16u)
{ {
unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols + unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
workVarCount*sample_count + root->offset); workVarCount*sample_count + root->offset);
for( int i = 0; i < count; i++ ) for( int i = 0; i < count; i++ )
sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]]; sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]];
} }
else else
{ {
int* sample_idx_dst = buf->data.i + root->buf_idx*buf->cols + int* sample_idx_dst = buf->data.i + root->buf_idx*get_length_subbuf() +
workVarCount*sample_count + root->offset; workVarCount*sample_count + root->offset;
for( int i = 0; i < count; i++ ) for( int i = 0; i < count; i++ )
sample_idx_dst[i] = sample_idx_src[sidx[i]]; sample_idx_dst[i] = sample_idx_src[sidx[i]];
@ -489,6 +489,10 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
int* idst = 0; int* idst = 0;
unsigned short* udst = 0; unsigned short* udst = 0;
uint64 effective_buf_size = 0;
int effective_buf_height = 0, effective_buf_width = 0;
clear(); clear();
shared = true; shared = true;
have_labels = true; have_labels = true;
@ -548,13 +552,28 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
var_type->data.i[var_count] = cat_var_count; var_type->data.i[var_count] = cat_var_count;
var_type->data.i[var_count+1] = cat_var_count+1; var_type->data.i[var_count+1] = cat_var_count+1;
work_var_count = ( cat_var_count ? 0 : numPrecalcIdx ) + 1/*cv_lables*/; work_var_count = ( cat_var_count ? 0 : numPrecalcIdx ) + 1/*cv_lables*/;
buf_size = (work_var_count + 1) * sample_count/*sample_indices*/;
buf_count = 2; buf_count = 2;
if ( is_buf_16u ) buf_size = -1; // the member buf_size is obsolete
buf = cvCreateMat( buf_count, buf_size, CV_16UC1 );
effective_buf_size = (uint64)(work_var_count + 1)*(uint64)sample_count * buf_count; // this is the total size of "CvMat buf" to be allocated
effective_buf_width = sample_count;
effective_buf_height = work_var_count+1;
if (effective_buf_width >= effective_buf_height)
effective_buf_height *= buf_count;
else else
buf = cvCreateMat( buf_count, buf_size, CV_32SC1 ); effective_buf_width *= buf_count;
if ((uint64)effective_buf_width * (uint64)effective_buf_height != effective_buf_size)
{
CV_Error(CV_StsBadArg, "The memory buffer cannot be allocated since its size exceeds integer fields limit");
}
if ( is_buf_16u )
buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_16UC1 );
else
buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_32SC1 );
cat_count = cvCreateMat( 1, cat_var_count + 1, CV_32SC1 ); cat_count = cvCreateMat( 1, cat_var_count + 1, CV_32SC1 );
@ -609,7 +628,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
priors_mult = cvCloneMat( priors ); priors_mult = cvCloneMat( priors );
counts = cvCreateMat( 1, get_num_classes(), CV_32SC1 ); counts = cvCreateMat( 1, get_num_classes(), CV_32SC1 );
direction = cvCreateMat( 1, sample_count, CV_8UC1 ); direction = cvCreateMat( 1, sample_count, CV_8UC1 );
split_buf = cvCreateMat( 1, sample_count, CV_32SC1 ); split_buf = cvCreateMat( 1, sample_count, CV_32SC1 );//TODO: make a pointer
} }
void CvCascadeBoostTrainData::free_train_data() void CvCascadeBoostTrainData::free_train_data()
@ -652,10 +671,10 @@ void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* o
if ( vi < numPrecalcIdx ) if ( vi < numPrecalcIdx )
{ {
if( !is_buf_16u ) if( !is_buf_16u )
*sortedIndices = buf->data.i + n->buf_idx*buf->cols + vi*sample_count + n->offset; *sortedIndices = buf->data.i + n->buf_idx*get_length_subbuf() + vi*sample_count + n->offset;
else else
{ {
const unsigned short* shortIndices = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols + const unsigned short* shortIndices = (const unsigned short*)(buf->data.s + n->buf_idx*get_length_subbuf() +
vi*sample_count + n->offset ); vi*sample_count + n->offset );
for( int i = 0; i < nodeSampleCount; i++ ) for( int i = 0; i < nodeSampleCount; i++ )
sortedIndicesBuf[i] = shortIndices[i]; sortedIndicesBuf[i] = shortIndices[i];
@ -1027,6 +1046,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
int newBufIdx = data->get_child_buf_idx( node ); int newBufIdx = data->get_child_buf_idx( node );
int workVarCount = data->get_work_var_count(); int workVarCount = data->get_work_var_count();
CvMat* buf = data->buf; CvMat* buf = data->buf;
size_t length_buf_row = data->get_length_subbuf();
cv::AutoBuffer<uchar> inn_buf(n*(3*sizeof(int)+sizeof(float))); cv::AutoBuffer<uchar> inn_buf(n*(3*sizeof(int)+sizeof(float)));
int* tempBuf = (int*)(uchar*)inn_buf; int* tempBuf = (int*)(uchar*)inn_buf;
bool splitInputData; bool splitInputData;
@ -1070,7 +1090,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
ushort *ldst, *rdst; ushort *ldst, *rdst;
ldst = (ushort*)(buf->data.s + left->buf_idx*buf->cols + ldst = (ushort*)(buf->data.s + left->buf_idx*length_buf_row +
vi*scount + left->offset); vi*scount + left->offset);
rdst = (ushort*)(ldst + nl); rdst = (ushort*)(ldst + nl);
@ -1096,9 +1116,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
else else
{ {
int *ldst, *rdst; int *ldst, *rdst;
ldst = buf->data.i + left->buf_idx*buf->cols + ldst = buf->data.i + left->buf_idx*length_buf_row +
vi*scount + left->offset; vi*scount + left->offset;
rdst = buf->data.i + right->buf_idx*buf->cols + rdst = buf->data.i + right->buf_idx*length_buf_row +
vi*scount + right->offset; vi*scount + right->offset;
// split sorted // split sorted
@ -1131,9 +1151,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols + unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*length_buf_row +
(workVarCount-1)*scount + left->offset); (workVarCount-1)*scount + left->offset);
unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols + unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*length_buf_row +
(workVarCount-1)*scount + right->offset); (workVarCount-1)*scount + right->offset);
for( int i = 0; i < n; i++ ) for( int i = 0; i < n; i++ )
@ -1154,9 +1174,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
} }
else else
{ {
int *ldst = buf->data.i + left->buf_idx*buf->cols + int *ldst = buf->data.i + left->buf_idx*length_buf_row +
(workVarCount-1)*scount + left->offset; (workVarCount-1)*scount + left->offset;
int *rdst = buf->data.i + right->buf_idx*buf->cols + int *rdst = buf->data.i + right->buf_idx*length_buf_row +
(workVarCount-1)*scount + right->offset; (workVarCount-1)*scount + right->offset;
for( int i = 0; i < n; i++ ) for( int i = 0; i < n; i++ )
@ -1184,9 +1204,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols + unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*length_buf_row +
workVarCount*scount + left->offset); workVarCount*scount + left->offset);
unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols + unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*length_buf_row +
workVarCount*scount + right->offset); workVarCount*scount + right->offset);
for (int i = 0; i < n; i++) for (int i = 0; i < n; i++)
{ {
@ -1205,9 +1225,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
} }
else else
{ {
int* ldst = buf->data.i + left->buf_idx*buf->cols + int* ldst = buf->data.i + left->buf_idx*length_buf_row +
workVarCount*scount + left->offset; workVarCount*scount + left->offset;
int* rdst = buf->data.i + right->buf_idx*buf->cols + int* rdst = buf->data.i + right->buf_idx*length_buf_row +
workVarCount*scount + right->offset; workVarCount*scount + right->offset;
for (int i = 0; i < n; i++) for (int i = 0; i < n; i++)
{ {
@ -1352,6 +1372,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
sampleIdx = data->get_sample_indices( data->data_root, sampleIdxBuf ); sampleIdx = data->get_sample_indices( data->data_root, sampleIdxBuf );
} }
CvMat* buf = data->buf; CvMat* buf = data->buf;
size_t length_buf_row = data->get_length_subbuf();
if( !tree ) // before training the first tree, initialize weights and other parameters if( !tree ) // before training the first tree, initialize weights and other parameters
{ {
int* classLabelsBuf = (int*)cur_inn_buf_pos; cur_inn_buf_pos = (uchar*)(classLabelsBuf + n); int* classLabelsBuf = (int*)cur_inn_buf_pos; cur_inn_buf_pos = (uchar*)(classLabelsBuf + n);
@ -1375,7 +1396,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols + unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*length_buf_row +
data->data_root->offset + (data->work_var_count-1)*data->sample_count); data->data_root->offset + (data->work_var_count-1)*data->sample_count);
for( int i = 0; i < n; i++ ) for( int i = 0; i < n; i++ )
{ {
@ -1393,7 +1414,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
} }
else else
{ {
int* labels = buf->data.i + data->data_root->buf_idx*buf->cols + int* labels = buf->data.i + data->data_root->buf_idx*length_buf_row +
data->data_root->offset + (data->work_var_count-1)*data->sample_count; data->data_root->offset + (data->work_var_count-1)*data->sample_count;
for( int i = 0; i < n; i++ ) for( int i = 0; i < n; i++ )

View File

@ -89,14 +89,20 @@ define add_opencv_camera_module
include $(PREBUILT_SHARED_LIBRARY) include $(PREBUILT_SHARED_LIBRARY)
endef endef
ifeq ($(OPENCV_INSTALL_MODULES),on) ifeq ($(OPENCV_MK_ALREADY_INCLUDED),)
$(foreach module,$(OPENCV_LIBS),$(eval $(call add_opencv_module,$(module)))) ifeq ($(OPENCV_INSTALL_MODULES),on)
endif $(foreach module,$(OPENCV_LIBS),$(eval $(call add_opencv_module,$(module))))
$(foreach module,$(OPENCV_3RDPARTY_COMPONENTS),$(eval $(call add_opencv_3rdparty_component,$(module)))) endif
$(foreach module,$(OPENCV_CAMERA_MODULES),$(eval $(call add_opencv_camera_module,$(module))))
ifneq ($(OPENCV_BASEDIR),) $(foreach module,$(OPENCV_3RDPARTY_COMPONENTS),$(eval $(call add_opencv_3rdparty_component,$(module))))
OPENCV_LOCAL_C_INCLUDES += $(foreach mod, $(OPENCV_MODULES), $(OPENCV_BASEDIR)/modules/$(mod)/include) $(foreach module,$(OPENCV_CAMERA_MODULES),$(eval $(call add_opencv_camera_module,$(module))))
ifneq ($(OPENCV_BASEDIR),)
OPENCV_LOCAL_C_INCLUDES += $(foreach mod, $(OPENCV_MODULES), $(OPENCV_BASEDIR)/modules/$(mod)/include)
endif
#turn off module installation to prevent their redefinition
OPENCV_MK_ALREADY_INCLUDED:=on
endif endif
ifeq ($(OPENCV_LOCAL_CFLAGS),) ifeq ($(OPENCV_LOCAL_CFLAGS),)

View File

@ -622,7 +622,6 @@ void ChamferMatcher::Matching::followContour(Mat& templ_img, template_coords_t&
{ {
const int dir[][2] = { {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} }; const int dir[][2] = { {-1,-1}, {-1,0}, {-1,1}, {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1} };
coordinate_t next; coordinate_t next;
coordinate_t next_temp;
unsigned char ptr; unsigned char ptr;
assert (direction==-1 || !coords.empty()); assert (direction==-1 || !coords.empty());
@ -931,15 +930,13 @@ void ChamferMatcher::Template::show() const
void ChamferMatcher::Matching::addTemplateFromImage(Mat& templ, float scale) void ChamferMatcher::Matching::addTemplateFromImage(Mat& templ, float scale)
{ {
Template* cmt = new Template(templ, scale); Template* cmt = new Template(templ, scale);
if(templates.size() > 0) templates.clear();
templates.clear();
templates.push_back(cmt); templates.push_back(cmt);
cmt->show(); cmt->show();
} }
void ChamferMatcher::Matching::addTemplate(Template& template_){ void ChamferMatcher::Matching::addTemplate(Template& template_){
if(templates.size() > 0) templates.clear();
templates.clear();
templates.push_back(&template_); templates.push_back(&template_);
} }
/** /**

View File

@ -2507,7 +2507,7 @@ static void generateRandomCenter(const vector<Vec2f>& box, float* center, RNG& r
center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0]; center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0];
} }
class KMeansPPDistanceComputer class KMeansPPDistanceComputer : public ParallelLoopBody
{ {
public: public:
KMeansPPDistanceComputer( float *_tdist2, KMeansPPDistanceComputer( float *_tdist2,
@ -2523,10 +2523,10 @@ public:
step(_step), step(_step),
stepci(_stepci) { } stepci(_stepci) { }
void operator()( const cv::BlockedRange& range ) const void operator()( const cv::Range& range ) const
{ {
const int begin = range.begin(); const int begin = range.start;
const int end = range.end(); const int end = range.end;
for ( int i = begin; i<end; i++ ) for ( int i = begin; i<end; i++ )
{ {
@ -2582,7 +2582,7 @@ static void generateCentersPP(const Mat& _data, Mat& _out_centers,
break; break;
int ci = i; int ci = i;
parallel_for(BlockedRange(0, N), parallel_for_(Range(0, N),
KMeansPPDistanceComputer(tdist2, data, dist, dims, step, step*ci)); KMeansPPDistanceComputer(tdist2, data, dist, dims, step, step*ci));
for( i = 0; i < N; i++ ) for( i = 0; i < N; i++ )
{ {
@ -2610,7 +2610,7 @@ static void generateCentersPP(const Mat& _data, Mat& _out_centers,
} }
} }
class KMeansDistanceComputer class KMeansDistanceComputer : public ParallelLoopBody
{ {
public: public:
KMeansDistanceComputer( double *_distances, KMeansDistanceComputer( double *_distances,
@ -2624,10 +2624,10 @@ public:
{ {
} }
void operator()( const BlockedRange& range ) const void operator()( const Range& range ) const
{ {
const int begin = range.begin(); const int begin = range.start;
const int end = range.end(); const int end = range.end;
const int K = centers.rows; const int K = centers.rows;
const int dims = centers.cols; const int dims = centers.cols;
@ -2884,7 +2884,7 @@ double cv::kmeans( InputArray _data, int K,
// assign labels // assign labels
Mat dists(1, N, CV_64F); Mat dists(1, N, CV_64F);
double* dist = dists.ptr<double>(0); double* dist = dists.ptr<double>(0);
parallel_for(BlockedRange(0, N), parallel_for_(Range(0, N),
KMeansDistanceComputer(dist, labels, data, centers)); KMeansDistanceComputer(dist, labels, data, centers));
compactness = 0; compactness = 0;
for( i = 0; i < N; i++ ) for( i = 0; i < N; i++ )

View File

@ -1726,7 +1726,7 @@ typedef void (*BatchDistFunc)(const uchar* src1, const uchar* src2, size_t step2
int nvecs, int len, uchar* dist, const uchar* mask); int nvecs, int len, uchar* dist, const uchar* mask);
struct BatchDistInvoker struct BatchDistInvoker : public ParallelLoopBody
{ {
BatchDistInvoker( const Mat& _src1, const Mat& _src2, BatchDistInvoker( const Mat& _src1, const Mat& _src2,
Mat& _dist, Mat& _nidx, int _K, Mat& _dist, Mat& _nidx, int _K,
@ -1743,12 +1743,12 @@ struct BatchDistInvoker
func = _func; func = _func;
} }
void operator()(const BlockedRange& range) const void operator()(const Range& range) const
{ {
AutoBuffer<int> buf(src2->rows); AutoBuffer<int> buf(src2->rows);
int* bufptr = buf; int* bufptr = buf;
for( int i = range.begin(); i < range.end(); i++ ) for( int i = range.start; i < range.end; i++ )
{ {
func(src1->ptr(i), src2->ptr(), src2->step, src2->rows, src2->cols, func(src1->ptr(i), src2->ptr(), src2->step, src2->rows, src2->cols,
K > 0 ? (uchar*)bufptr : dist->ptr(i), mask->data ? mask->ptr(i) : 0); K > 0 ? (uchar*)bufptr : dist->ptr(i), mask->data ? mask->ptr(i) : 0);
@ -1899,8 +1899,8 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
("The combination of type=%d, dtype=%d and normType=%d is not supported", ("The combination of type=%d, dtype=%d and normType=%d is not supported",
type, dtype, normType)); type, dtype, normType));
parallel_for(BlockedRange(0, src1.rows), parallel_for_(Range(0, src1.rows),
BatchDistInvoker(src1, src2, dist, nidx, K, mask, update, func)); BatchDistInvoker(src1, src2, dist, nidx, K, mask, update, func));
} }

View File

@ -79,6 +79,7 @@ The current implementation supports the following types of a descriptor extracto
* ``"SIFT"`` -- :ocv:class:`SIFT` * ``"SIFT"`` -- :ocv:class:`SIFT`
* ``"SURF"`` -- :ocv:class:`SURF` * ``"SURF"`` -- :ocv:class:`SURF`
* ``"ORB"`` -- :ocv:class:`ORB` * ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor` * ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` -- A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --

View File

@ -269,7 +269,7 @@ Brute-force matcher constructor.
.. ocv:function:: BFMatcher::BFMatcher( int normType=NORM_L2, bool crossCheck=false ) .. ocv:function:: BFMatcher::BFMatcher( int normType=NORM_L2, bool crossCheck=false )
:param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description). :param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB, BRISK and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
:param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. :param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.

View File

@ -127,6 +127,7 @@ The following detector types are supported:
* ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module) * ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module)
* ``"SURF"`` -- :ocv:class:`SURF` (nonfree module) * ``"SURF"`` -- :ocv:class:`SURF` (nonfree module)
* ``"ORB"`` -- :ocv:class:`ORB` * ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRISK"`` -- :ocv:class:`BRISK`
* ``"MSER"`` -- :ocv:class:`MSER` * ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector` * ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled * ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled

View File

@ -96,6 +96,58 @@ Finds keypoints in an image and computes their descriptors
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them. :param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
BRISK
-----
.. ocv:class:: BRISK : public Feature2D
Class implementing the BRISK keypoint detector and descriptor extractor, described in [LCS11]_.
.. [LCS11] Stefan Leutenegger, Margarita Chli and Roland Siegwart: BRISK: Binary Robust Invariant Scalable Keypoints. ICCV 2011: 2548-2555.
BRISK::BRISK
------------
The BRISK constructor
.. ocv:function:: BRISK::BRISK(int thresh=30, int octaves=3, float patternScale=1.0f)
:param thresh: FAST/AGAST detection threshold score.
:param octaves: detection octaves. Use 0 to do single scale.
:param patternScale: apply this scale to the pattern used for sampling the neighbourhood of a keypoint.
BRISK::BRISK
------------
The BRISK constructor for a custom pattern
.. ocv:function:: BRISK::BRISK(std::vector<float> &radiusList, std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f, std::vector<int> indexChange=std::vector<int>())
:param radiusList: defines the radii (in pixels) where the samples around a keypoint are taken (for keypoint scale 1).
:param numberList: defines the number of sampling points on the sampling circle. Must be the same size as radiusList..
:param dMax: threshold for the short pairings used for descriptor formation (in pixels for keypoint scale 1).
:param dMin: threshold for the long pairings used for orientation determination (in pixels for keypoint scale 1).
:param indexChanges: index remapping of the bits.
BRISK::operator()
-----------------
Finds keypoints in an image and computes their descriptors
.. ocv:function:: void BRISK::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const
:param image: The input 8-bit grayscale image.
:param mask: The operation mask.
:param keypoints: The output vector of keypoints.
:param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
FREAK FREAK
----- -----
.. ocv:class:: FREAK : public DescriptorExtractor .. ocv:class:: FREAK : public DescriptorExtractor

View File

@ -309,10 +309,9 @@ BRISK::generateKernel(std::vector<float> &radiusList, std::vector<int> &numberLi
{ {
indexChange.resize(points_ * (points_ - 1) / 2); indexChange.resize(points_ * (points_ - 1) / 2);
indSize = (unsigned int)indexChange.size(); indSize = (unsigned int)indexChange.size();
}
for (unsigned int i = 0; i < indSize; i++) for (unsigned int i = 0; i < indSize; i++)
{ indexChange[i] = i;
indexChange[i] = i;
} }
const float dMin_sq = dMin_ * dMin_; const float dMin_sq = dMin_ * dMin_;
const float dMax_sq = dMax_ * dMax_; const float dMax_sq = dMax_ * dMax_;

View File

@ -241,7 +241,6 @@ static void filterEllipticKeyPointsByImageSize( vector<EllipticKeyPoint>& keypoi
struct IntersectAreaCounter struct IntersectAreaCounter
{ {
IntersectAreaCounter() : bua(0), bna(0) {}
IntersectAreaCounter( float _dr, int _minx, IntersectAreaCounter( float _dr, int _minx,
int _miny, int _maxy, int _miny, int _maxy,
const Point2f& _diff, const Point2f& _diff,
@ -257,6 +256,9 @@ struct IntersectAreaCounter
void operator()( const BlockedRange& range ) void operator()( const BlockedRange& range )
{ {
CV_Assert( miny < maxy );
CV_Assert( dr > FLT_EPSILON );
int temp_bua = bua, temp_bna = bna; int temp_bua = bua, temp_bna = bna;
for( int i = range.begin(); i != range.end(); i++ ) for( int i = range.begin(); i != range.end(); i++ )
{ {
@ -461,7 +463,7 @@ void cv::evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H
keypoints2 = _keypoints2 != 0 ? _keypoints2 : &buf2; keypoints2 = _keypoints2 != 0 ? _keypoints2 : &buf2;
if( (keypoints1->empty() || keypoints2->empty()) && fdetector.empty() ) if( (keypoints1->empty() || keypoints2->empty()) && fdetector.empty() )
CV_Error( CV_StsBadArg, "fdetector must be no empty when keypoints1 or keypoints2 is empty" ); CV_Error( CV_StsBadArg, "fdetector must not be empty when keypoints1 or keypoints2 is empty" );
if( keypoints1->empty() ) if( keypoints1->empty() )
fdetector->detect( img1, *keypoints1 ); fdetector->detect( img1, *keypoints1 );
@ -572,10 +574,10 @@ void cv::evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, con
correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2; correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2;
if( keypoints1.empty() ) if( keypoints1.empty() )
CV_Error( CV_StsBadArg, "keypoints1 must be no empty" ); CV_Error( CV_StsBadArg, "keypoints1 must not be empty" );
if( matches1to2->empty() && dmatcher.empty() ) if( matches1to2->empty() && dmatcher.empty() )
CV_Error( CV_StsBadArg, "dmatch must be no empty when matches1to2 is empty" ); CV_Error( CV_StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
bool computeKeypoints2ByPrj = keypoints2.empty(); bool computeKeypoints2ByPrj = keypoints2.empty();
if( computeKeypoints2ByPrj ) if( computeKeypoints2ByPrj )

View File

@ -778,8 +778,6 @@ NCVStatus loadFromXML(const std::string &filename,
haar.bNeedsTiltedII = false; haar.bNeedsTiltedII = false;
Ncv32u curMaxTreeDepth; Ncv32u curMaxTreeDepth;
std::vector<char> xmlFileCont;
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes; std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
haarStages.resize(0); haarStages.resize(0);
haarClassifierNodes.resize(0); haarClassifierNodes.resize(0);

View File

@ -3120,6 +3120,7 @@ protected:
void init(); void init();
int index, width, height,fourcc; int index, width, height,fourcc;
int widthSet, heightSet;
IplImage* frame; IplImage* frame;
static videoInput VI; static videoInput VI;
}; };
@ -3138,6 +3139,7 @@ CvCaptureCAM_DShow::CvCaptureCAM_DShow()
index = -1; index = -1;
frame = 0; frame = 0;
width = height = fourcc = -1; width = height = fourcc = -1;
widthSet = heightSet = -1;
CoInitialize(0); CoInitialize(0);
} }
@ -3155,7 +3157,7 @@ void CvCaptureCAM_DShow::close()
index = -1; index = -1;
cvReleaseImage(&frame); cvReleaseImage(&frame);
} }
width = height = -1; widthSet = heightSet = width = height = -1;
} }
// Initialize camera input // Initialize camera input
@ -3282,9 +3284,12 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
{ {
VI.stopDevice(index); VI.stopDevice(index);
VI.setIdealFramerate(index,fps); VI.setIdealFramerate(index,fps);
VI.setupDevice(index); if (widthSet > 0 && heightSet > 0)
VI.setupDevice(index, widthSet, heightSet);
else
VI.setupDevice(index);
} }
break; return VI.isDeviceSetup(index);
} }
@ -3299,8 +3304,15 @@ bool CvCaptureCAM_DShow::setProperty( int property_id, double value )
VI.setIdealFramerate(index, fps); VI.setIdealFramerate(index, fps);
VI.setupDeviceFourcc(index, width, height, fourcc); VI.setupDeviceFourcc(index, width, height, fourcc);
} }
width = height = fourcc = -1;
return VI.isDeviceSetup(index); bool success = VI.isDeviceSetup(index);
if (success)
{
widthSet = width;
heightSet = height;
width = height = fourcc = -1;
}
return success;
} }
return true; return true;
} }

View File

@ -2592,39 +2592,36 @@ cvCompareHist( const CvHistogram* hist1,
CV_IMPL void CV_IMPL void
cvCopyHist( const CvHistogram* src, CvHistogram** _dst ) cvCopyHist( const CvHistogram* src, CvHistogram** _dst )
{ {
int eq = 0;
int is_sparse;
int i, dims1, dims2;
int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1;
float* ranges[CV_MAX_DIM];
float** thresh = 0;
CvHistogram* dst;
if( !_dst ) if( !_dst )
CV_Error( CV_StsNullPtr, "Destination double pointer is NULL" ); CV_Error( CV_StsNullPtr, "Destination double pointer is NULL" );
dst = *_dst; CvHistogram* dst = *_dst;
if( !CV_IS_HIST(src) || (dst && !CV_IS_HIST(dst)) ) if( !CV_IS_HIST(src) || (dst && !CV_IS_HIST(dst)) )
CV_Error( CV_StsBadArg, "Invalid histogram header[s]" ); CV_Error( CV_StsBadArg, "Invalid histogram header[s]" );
is_sparse = CV_IS_SPARSE_MAT(src->bins); bool eq = false;
dims1 = cvGetDims( src->bins, size1 ); int size1[CV_MAX_DIM];
for( i = 0; i < dims1; i++ ) bool is_sparse = CV_IS_SPARSE_MAT(src->bins);
total *= size1[i]; int dims1 = cvGetDims( src->bins, size1 );
if( dst && is_sparse == CV_IS_SPARSE_MAT(dst->bins)) if( dst && (is_sparse == CV_IS_SPARSE_MAT(dst->bins)))
{ {
dims2 = cvGetDims( dst->bins, size2 ); int size2[CV_MAX_DIM];
int dims2 = cvGetDims( dst->bins, size2 );
if( dims1 == dims2 ) if( dims1 == dims2 )
{ {
int i;
for( i = 0; i < dims1; i++ ) for( i = 0; i < dims1; i++ )
{
if( size1[i] != size2[i] ) if( size1[i] != size2[i] )
break; break;
}
eq = (i == dims1);
} }
eq = i == dims1;
} }
if( !eq ) if( !eq )
@ -2636,14 +2633,21 @@ cvCopyHist( const CvHistogram* src, CvHistogram** _dst )
if( CV_HIST_HAS_RANGES( src )) if( CV_HIST_HAS_RANGES( src ))
{ {
float* ranges[CV_MAX_DIM];
float** thresh = 0;
if( CV_IS_UNIFORM_HIST( src )) if( CV_IS_UNIFORM_HIST( src ))
{ {
for( i = 0; i < dims1; i++ ) for( int i = 0; i < dims1; i++ )
ranges[i] = (float*)src->thresh[i]; ranges[i] = (float*)src->thresh[i];
thresh = ranges; thresh = ranges;
} }
else else
{
thresh = src->thresh2; thresh = src->thresh2;
}
cvSetHistBinRanges( dst, thresh, CV_IS_UNIFORM_HIST(src)); cvSetHistBinRanges( dst, thresh, CV_IS_UNIFORM_HIST(src));
} }

View File

@ -327,11 +327,10 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
CV_Assert( std::abs(dsize.width - ssize.width*2) == dsize.width % 2 && CV_Assert( std::abs(dsize.width - ssize.width*2) == dsize.width % 2 &&
std::abs(dsize.height - ssize.height*2) == dsize.height % 2); std::abs(dsize.height - ssize.height*2) == dsize.height % 2);
int k, x, sy0 = -PU_SZ/2, sy = sy0, width0 = ssize.width - 1; int k, x, sy0 = -PU_SZ/2, sy = sy0;
ssize.width *= cn; ssize.width *= cn;
dsize.width *= cn; dsize.width *= cn;
width0 *= cn;
for( x = 0; x < ssize.width; x++ ) for( x = 0; x < ssize.width; x++ )
dtab[x] = (x/cn)*2*cn + x % cn; dtab[x] = (x/cn)*2*cn + x % cn;

View File

@ -85,11 +85,11 @@ public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
matSize = 100; matSize = 100;
truth = new DMatch[] { truth = new DMatch[] {
new DMatch(0, 0, 0, 1.0496940f), new DMatch(0, 0, 0, 0.6211397f),
new DMatch(1, 0, 0, 1.0984558f), new DMatch(1, 1, 0, 0.9177120f),
new DMatch(2, 1, 0, 0.4945875f), new DMatch(2, 1, 0, 0.3112163f),
new DMatch(3, 1, 0, 0.48435235f), new DMatch(3, 1, 0, 0.2925074f),
new DMatch(4, 0, 0, 1.0836693f) new DMatch(4, 1, 0, 0.9309178f)
}; };
} }

View File

@ -85,11 +85,11 @@ public class BruteForceL1DescriptorMatcherTest extends OpenCVTestCase {
matSize = 100; matSize = 100;
truth = new DMatch[] { truth = new DMatch[] {
new DMatch(0, 1, 0, 6.9202340f), new DMatch(0, 0, 0, 3.0975165f),
new DMatch(1, 1, 0, 6.1675916f), new DMatch(1, 1, 0, 3.5680308f),
new DMatch(2, 1, 0, 2.6798590f), new DMatch(2, 1, 0, 1.3722466f),
new DMatch(3, 1, 0, 2.6545324f), new DMatch(3, 1, 0, 1.3041023f),
new DMatch(4, 0, 0, 6.1294870f) new DMatch(4, 1, 0, 3.5970376f)
}; };
} }

View File

@ -90,11 +90,11 @@ public class BruteForceSL2DescriptorMatcherTest extends OpenCVTestCase {
matSize = 100; matSize = 100;
truth = new DMatch[] { truth = new DMatch[] {
new DMatch(0, 0, 0, 1.1018573f), new DMatch(0, 0, 0, 0.3858146f),
new DMatch(1, 0, 0, 1.2066052f), new DMatch(1, 1, 0, 0.8421953f),
new DMatch(2, 1, 0, 0.2446168f), new DMatch(2, 1, 0, 0.0968556f),
new DMatch(3, 1, 0, 0.23459719f), new DMatch(3, 1, 0, 0.0855606f),
new DMatch(4, 0, 0, 1.174339f) new DMatch(4, 1, 0, 0.8666080f)
}; };
} }

View File

@ -158,11 +158,11 @@ public class FlannBasedDescriptorMatcherTest extends OpenCVTestCase {
matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
matSize = 100; matSize = 100;
truth = new DMatch[] { truth = new DMatch[] {
new DMatch(0, 0, 0, 1.049694f), new DMatch(0, 0, 0, 0.6211397f),
new DMatch(1, 0, 0, 1.0984558f), new DMatch(1, 1, 0, 0.9177120f),
new DMatch(2, 1, 0, 0.4945875f), new DMatch(2, 1, 0, 0.3112163f),
new DMatch(3, 1, 0, 0.48435235f), new DMatch(3, 1, 0, 0.2925075f),
new DMatch(4, 0, 0, 1.0836693f) new DMatch(4, 1, 0, 0.9309179f)
}; };
} }

View File

@ -27,7 +27,12 @@ public class SURFDescriptorExtractorTest extends OpenCVTestCase {
@Override @Override
protected void setUp() throws Exception { protected void setUp() throws Exception {
super.setUp(); super.setUp();
extractor = DescriptorExtractor.create(DescriptorExtractor.SURF); extractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
String filename = OpenCVTestRunner.getTempFileName("yml");
writeFile(filename, "%YAML:1.0\nextended: 1\nhessianThreshold: 100.\nnOctaveLayers: 2\nnOctaves: 4\nupright: 0");
extractor.read(filename);
matSize = 100; matSize = 100;
} }
@ -46,19 +51,19 @@ public class SURFDescriptorExtractorTest extends OpenCVTestCase {
Mat truth = new Mat(1, 128, CvType.CV_32FC1) { Mat truth = new Mat(1, 128, CvType.CV_32FC1) {
{ {
put(0, 0, put(0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.045382127, 0.075976953, -0.031969212, 0.035002094, 0.012224297, 0, 0, 0, 0, 0, 0, 0, 0, 0.058821894, 0.058821894, -0.045962855, 0.046261817, 0.0085156476,
0.012286193, -0.0088025155, 0.0088025155, 0.00017225844, 0.00017225844, 0, 0, 8.2743405e-05, 0.0085754395, -0.0064509804, 0.0064509804, 0.00044069235, 0.00044069235, 0, 0, 0.00025723741,
8.2743405e-05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.2743405e-05, 8.2743405e-05, -0.00017225844, 0.00025723741, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00025723741, 0.00025723741, -0.00044069235,
0.00017225844, 0, 0, 0.31723264, 0.42715758, -0.19872268, 0.23621935, 0.033304065, 0.033918764, 0.00044069235, 0, 0, 0.36278215, 0.36278215, -0.24688604, 0.26173124, 0.052068226, 0.052662034,
-0.021780485, 0.021780485, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0088025145, -0.032815345, 0.032815345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0064523756,
0.0088025145, 0.012224296, 0.012286192, -0.045382123, 0.075976953, 0.031969212, 0.035002094, 0.0064523756, 0.0082002236, 0.0088908644, -0.059001274, 0.059001274, 0.045789491, 0.04648013,
0.10047197, 0.21463872, -0.0012294546, 0.18176091, -0.075555265, 0.35627601, 0.01270232, 0.11961588, 0.22789426, -0.01322381, 0.18291828, -0.14042182, 0.23973691, 0.073782086, 0.23769434,
0.20058797, -0.037658721, 0.037658721, 0.064850949, 0.064850949, -0.27688536, 0.44229308, -0.027880307, 0.027880307, 0.049587864, 0.049587864, -0.33991757, 0.33991757, 0.21437603, 0.21437603,
0.14888979, 0.14888979, -0.0031531656, 0.0031531656, 0.0068481555, 0.0072466261, -0.034193151, -0.0020763327, 0.0020763327, 0.006245892, 0.006245892, -0.04067041, 0.04067041, 0.019361559,
0.040314503, 0.01108359, 0.023398584, -0.00071876607, 0.00071876607, -0.0031819802, 0.019361559, 0, 0, -0.0035977389, 0.0035977389, 0, 0, -0.00099993451, 0.00099993451, 0.040670406,
0.0031819802, 0, 0, -0.0013680183, 0.0013680183, 0.034193147, 0.040314503, -0.01108359, 0.040670406, -0.019361559, 0.019361559, 0.006245892, 0.006245892, -0.0020763327, 0.0020763327,
0.023398584, 0.006848156, 0.0072466265, -0.0031531656, 0.0031531656, 0, 0, 0, 0, 0, 0, 0, 0, -0.00034532088, 0.00034532088, 0, 0, 0, 0, 0.00034532088, 0.00034532088, -0.00099993451,
-0.0013680183, 0.0013680183, 0, 0, 0.00071876607, 0.00071876607, 0.0031819802, 0.0031819802 0.00099993451, 0, 0, 0, 0, 0.0035977389, 0.0035977389
); );
} }
}; };

View File

@ -150,7 +150,7 @@ public class SURFFeatureDetectorTest extends OpenCVTestCase {
detector.write(filename); detector.write(filename);
String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.SURF</name>\n<extended>1</extended>\n<hessianThreshold>100.</hessianThreshold>\n<nOctaveLayers>2</nOctaveLayers>\n<nOctaves>4</nOctaves>\n<upright>0</upright>\n</opencv_storage>\n"; String truth = "<?xml version=\"1.0\"?>\n<opencv_storage>\n<name>Feature2D.SURF</name>\n<extended>0</extended>\n<hessianThreshold>100.</hessianThreshold>\n<nOctaveLayers>3</nOctaveLayers>\n<nOctaves>4</nOctaves>\n<upright>0</upright>\n</opencv_storage>\n";
assertEquals(truth, readFile(filename)); assertEquals(truth, readFile(filename));
} }
@ -159,7 +159,7 @@ public class SURFFeatureDetectorTest extends OpenCVTestCase {
detector.write(filename); detector.write(filename);
String truth = "%YAML:1.0\nname: \"Feature2D.SURF\"\nextended: 1\nhessianThreshold: 100.\nnOctaveLayers: 2\nnOctaves: 4\nupright: 0\n"; String truth = "%YAML:1.0\nname: \"Feature2D.SURF\"\nextended: 0\nhessianThreshold: 100.\nnOctaveLayers: 3\nnOctaves: 4\nupright: 0\n";
assertEquals(truth, readFile(filename)); assertEquals(truth, readFile(filename));
} }

View File

@ -796,7 +796,7 @@ struct CV_EXPORTS CvDTreeTrainData
const CvMat* responses; const CvMat* responses;
CvMat* responses_copy; // used in Boosting CvMat* responses_copy; // used in Boosting
int buf_count, buf_size; int buf_count, buf_size; // buf_size is obsolete, please do not use it, use expression ((int64)buf->rows * (int64)buf->cols / buf_count) instead
bool shared; bool shared;
int is_buf_16u; int is_buf_16u;
@ -806,6 +806,12 @@ struct CV_EXPORTS CvDTreeTrainData
CvMat* counts; CvMat* counts;
CvMat* buf; CvMat* buf;
inline size_t get_length_subbuf() const
{
size_t res = (size_t)(work_var_count + 1) * (size_t)sample_count;
return res;
}
CvMat* direction; CvMat* direction;
CvMat* split_buf; CvMat* split_buf;

View File

@ -1135,13 +1135,13 @@ CvBoost::update_weights( CvBoostTree* tree )
int *sample_idx_buf; int *sample_idx_buf;
const int* sample_idx = 0; const int* sample_idx = 0;
cv::AutoBuffer<uchar> inn_buf; cv::AutoBuffer<uchar> inn_buf;
size_t _buf_size = (params.boost_type == LOGIT) || (params.boost_type == GENTLE) ? data->sample_count*sizeof(int) : 0; size_t _buf_size = (params.boost_type == LOGIT) || (params.boost_type == GENTLE) ? (size_t)(data->sample_count)*sizeof(int) : 0;
if( !tree ) if( !tree )
_buf_size += n*sizeof(int); _buf_size += n*sizeof(int);
else else
{ {
if( have_subsample ) if( have_subsample )
_buf_size += data->buf->cols*(sizeof(float)+sizeof(uchar)); _buf_size += data->get_length_subbuf()*(sizeof(float)+sizeof(uchar));
} }
inn_buf.allocate(_buf_size); inn_buf.allocate(_buf_size);
uchar* cur_buf_pos = (uchar*)inn_buf; uchar* cur_buf_pos = (uchar*)inn_buf;
@ -1156,6 +1156,7 @@ CvBoost::update_weights( CvBoostTree* tree )
sample_idx = data->get_sample_indices( data->data_root, sample_idx_buf ); sample_idx = data->get_sample_indices( data->data_root, sample_idx_buf );
} }
CvMat* dtree_data_buf = data->buf; CvMat* dtree_data_buf = data->buf;
size_t length_buf_row = data->get_length_subbuf();
if( !tree ) // before training the first tree, initialize weights and other parameters if( !tree ) // before training the first tree, initialize weights and other parameters
{ {
int* class_labels_buf = (int*)cur_buf_pos; int* class_labels_buf = (int*)cur_buf_pos;
@ -1195,7 +1196,7 @@ CvBoost::update_weights( CvBoostTree* tree )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short* labels = (unsigned short*)(dtree_data_buf->data.s + data->data_root->buf_idx*dtree_data_buf->cols + unsigned short* labels = (unsigned short*)(dtree_data_buf->data.s + data->data_root->buf_idx*length_buf_row +
data->data_root->offset + (data->work_var_count-1)*data->sample_count); data->data_root->offset + (data->work_var_count-1)*data->sample_count);
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
{ {
@ -1213,7 +1214,7 @@ CvBoost::update_weights( CvBoostTree* tree )
} }
else else
{ {
int* labels = dtree_data_buf->data.i + data->data_root->buf_idx*dtree_data_buf->cols + int* labels = dtree_data_buf->data.i + data->data_root->buf_idx*length_buf_row +
data->data_root->offset + (data->work_var_count-1)*data->sample_count; data->data_root->offset + (data->work_var_count-1)*data->sample_count;
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
@ -1260,9 +1261,10 @@ CvBoost::update_weights( CvBoostTree* tree )
if( have_subsample ) if( have_subsample )
{ {
float* values = (float*)cur_buf_pos; float* values = (float*)cur_buf_pos;
cur_buf_pos = (uchar*)(values + data->buf->cols); cur_buf_pos = (uchar*)(values + data->get_length_subbuf());
uchar* missing = cur_buf_pos; uchar* missing = cur_buf_pos;
cur_buf_pos = missing + data->buf->step; cur_buf_pos = missing + data->get_length_subbuf() * (size_t)CV_ELEM_SIZE(data->buf->type);
CvMat _sample, _mask; CvMat _sample, _mask;
// invert the subsample mask // invert the subsample mask

View File

@ -80,6 +80,9 @@ void CvERTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
char err[100]; char err[100];
const int *sidx = 0, *vidx = 0; const int *sidx = 0, *vidx = 0;
uint64 effective_buf_size = 0;
int effective_buf_height = 0, effective_buf_width = 0;
if ( _params.use_surrogates ) if ( _params.use_surrogates )
CV_ERROR(CV_StsBadArg, "CvERTrees do not support surrogate splits"); CV_ERROR(CV_StsBadArg, "CvERTrees do not support surrogate splits");
@ -179,18 +182,34 @@ void CvERTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
have_labels = cv_n > 0 || (ord_var_count == 1 && cat_var_count == 0) || _add_labels; have_labels = cv_n > 0 || (ord_var_count == 1 && cat_var_count == 0) || _add_labels;
work_var_count = cat_var_count + (is_classifier ? 1 : 0) + (have_labels ? 1 : 0); work_var_count = cat_var_count + (is_classifier ? 1 : 0) + (have_labels ? 1 : 0);
buf_size = (work_var_count + 1)*sample_count;
shared = _shared; shared = _shared;
buf_count = shared ? 2 : 1; buf_count = shared ? 2 : 1;
buf_size = -1; // the member buf_size is obsolete
effective_buf_size = (uint64)(work_var_count + 1)*(uint64)sample_count * buf_count; // this is the total size of "CvMat buf" to be allocated
effective_buf_width = sample_count;
effective_buf_height = work_var_count+1;
if (effective_buf_width >= effective_buf_height)
effective_buf_height *= buf_count;
else
effective_buf_width *= buf_count;
if ((uint64)effective_buf_width * (uint64)effective_buf_height != effective_buf_size)
{
CV_Error(CV_StsBadArg, "The memory buffer cannot be allocated since its size exceeds integer fields limit");
}
if ( is_buf_16u ) if ( is_buf_16u )
{ {
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_16UC1 )); CV_CALL( buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_16UC1 ));
CV_CALL( pair16u32s_ptr = (CvPair16u32s*)cvAlloc( sample_count*sizeof(pair16u32s_ptr[0]) )); CV_CALL( pair16u32s_ptr = (CvPair16u32s*)cvAlloc( sample_count*sizeof(pair16u32s_ptr[0]) ));
} }
else else
{ {
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_32SC1 )); CV_CALL( buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_32SC1 ));
CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) )); CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) ));
} }
@ -293,13 +312,13 @@ void CvERTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
for( i = 0; i < sample_count; i++ ) for( i = 0; i < sample_count; i++ )
{ {
int val = INT_MAX, si = sidx ? sidx[i] : i; int val = INT_MAX, si = sidx ? sidx[i] : i;
if( !mask || !mask[si*m_step] ) if( !mask || !mask[(size_t)si*m_step] )
{ {
if( idata ) if( idata )
val = idata[si*step]; val = idata[(size_t)si*step];
else else
{ {
float t = fdata[si*step]; float t = fdata[(size_t)si*step];
val = cvRound(t); val = cvRound(t);
if( val != t ) if( val != t )
{ {
@ -405,12 +424,12 @@ void CvERTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
{ {
float val = ord_nan; float val = ord_nan;
int si = sidx ? sidx[i] : i; int si = sidx ? sidx[i] : i;
if( !mask || !mask[si*m_step] ) if( !mask || !mask[(size_t)si*m_step] )
{ {
if( idata ) if( idata )
val = (float)idata[si*step]; val = (float)idata[(size_t)si*step];
else else
val = fdata[si*step]; val = fdata[(size_t)si*step];
if( fabs(val) >= ord_nan ) if( fabs(val) >= ord_nan )
{ {
@ -578,9 +597,9 @@ const int* CvERTreeTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* cat
int ci = get_var_type( vi); int ci = get_var_type( vi);
const int* cat_values = 0; const int* cat_values = 0;
if( !is_buf_16u ) if( !is_buf_16u )
cat_values = buf->data.i + n->buf_idx*buf->cols + ci*sample_count + n->offset; cat_values = buf->data.i + n->buf_idx*get_length_subbuf() + ci*sample_count + n->offset;
else { else {
const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols + const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*get_length_subbuf() +
ci*sample_count + n->offset); ci*sample_count + n->offset);
for( int i = 0; i < n->sample_count; i++ ) for( int i = 0; i < n->sample_count; i++ )
cat_values_buf[i] = short_values[i]; cat_values_buf[i] = short_values[i];
@ -1333,6 +1352,7 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
CvDTreeNode *left = 0, *right = 0; CvDTreeNode *left = 0, *right = 0;
int new_buf_idx = data->get_child_buf_idx( node ); int new_buf_idx = data->get_child_buf_idx( node );
CvMat* buf = data->buf; CvMat* buf = data->buf;
size_t length_buf_row = data->get_length_subbuf();
cv::AutoBuffer<int> temp_buf(n); cv::AutoBuffer<int> temp_buf(n);
complete_node_dir(node); complete_node_dir(node);
@ -1385,9 +1405,9 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols + unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*length_buf_row +
ci*scount + left->offset); ci*scount + left->offset);
unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols + unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*length_buf_row +
ci*scount + right->offset); ci*scount + right->offset);
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
@ -1415,9 +1435,9 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
} }
else else
{ {
int *ldst = buf->data.i + left->buf_idx*buf->cols + int *ldst = buf->data.i + left->buf_idx*length_buf_row +
ci*scount + left->offset; ci*scount + left->offset;
int *rdst = buf->data.i + right->buf_idx*buf->cols + int *rdst = buf->data.i + right->buf_idx*length_buf_row +
ci*scount + right->offset; ci*scount + right->offset;
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
@ -1460,9 +1480,9 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols + unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*length_buf_row +
pos*scount + left->offset); pos*scount + left->offset);
unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols + unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*length_buf_row +
pos*scount + right->offset); pos*scount + right->offset);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
@ -1483,9 +1503,9 @@ void CvForestERTree::split_node_data( CvDTreeNode* node )
} }
else else
{ {
int* ldst = buf->data.i + left->buf_idx*buf->cols + int* ldst = buf->data.i + left->buf_idx*length_buf_row +
pos*scount + left->offset; pos*scount + left->offset;
int* rdst = buf->data.i + right->buf_idx*buf->cols + int* rdst = buf->data.i + right->buf_idx*length_buf_row +
pos*scount + right->offset; pos*scount + right->offset;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
{ {

View File

@ -100,9 +100,9 @@ bool CvKNearest::train( const CvMat* _train_data, const CvMat* _responses,
__BEGIN__; __BEGIN__;
CvVectors* _samples; CvVectors* _samples = 0;
float** _data; float** _data = 0;
int _count, _dims, _dims_all, _rsize; int _count = 0, _dims = 0, _dims_all = 0, _rsize = 0;
if( !_update_base ) if( !_update_base )
clear(); clear();
@ -114,6 +114,9 @@ bool CvKNearest::train( const CvMat* _train_data, const CvMat* _responses,
_responses, CV_VAR_ORDERED, 0, _sample_idx, true, (const float***)&_data, _responses, CV_VAR_ORDERED, 0, _sample_idx, true, (const float***)&_data,
&_count, &_dims, &_dims_all, &responses, 0, 0 )); &_count, &_dims, &_dims_all, &responses, 0, 0 ));
if( !responses )
CV_ERROR( CV_StsNoMem, "Could not allocate memory for responses" );
if( _update_base && _dims != var_count ) if( _update_base && _dims != var_count )
CV_ERROR( CV_StsBadArg, "The newly added data have different dimensionality" ); CV_ERROR( CV_StsBadArg, "The newly added data have different dimensionality" );

View File

@ -50,7 +50,8 @@ static const int block_size_delta = 1 << 10;
CvDTreeTrainData::CvDTreeTrainData() CvDTreeTrainData::CvDTreeTrainData()
{ {
var_idx = var_type = cat_count = cat_ofs = cat_map = var_idx = var_type = cat_count = cat_ofs = cat_map =
priors = priors_mult = counts = buf = direction = split_buf = responses_copy = 0; priors = priors_mult = counts = direction = split_buf = responses_copy = 0;
buf = 0;
tree_storage = temp_storage = 0; tree_storage = temp_storage = 0;
clear(); clear();
@ -64,7 +65,8 @@ CvDTreeTrainData::CvDTreeTrainData( const CvMat* _train_data, int _tflag,
bool _shared, bool _add_labels ) bool _shared, bool _add_labels )
{ {
var_idx = var_type = cat_count = cat_ofs = cat_map = var_idx = var_type = cat_count = cat_ofs = cat_map =
priors = priors_mult = counts = buf = direction = split_buf = responses_copy = 0; priors = priors_mult = counts = direction = split_buf = responses_copy = 0;
buf = 0;
tree_storage = temp_storage = 0; tree_storage = temp_storage = 0;
@ -157,6 +159,9 @@ void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
char err[100]; char err[100];
const int *sidx = 0, *vidx = 0; const int *sidx = 0, *vidx = 0;
uint64 effective_buf_size = 0;
int effective_buf_height = 0, effective_buf_width = 0;
if( _update_data && data_root ) if( _update_data && data_root )
{ {
data = new CvDTreeTrainData( _train_data, _tflag, _responses, _var_idx, data = new CvDTreeTrainData( _train_data, _tflag, _responses, _var_idx,
@ -285,18 +290,35 @@ void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
work_var_count = var_count + (is_classifier ? 1 : 0) // for responses class_labels work_var_count = var_count + (is_classifier ? 1 : 0) // for responses class_labels
+ (have_labels ? 1 : 0); // for cv_labels + (have_labels ? 1 : 0); // for cv_labels
buf_size = (work_var_count + 1 /*for sample_indices*/) * sample_count;
shared = _shared; shared = _shared;
buf_count = shared ? 2 : 1; buf_count = shared ? 2 : 1;
buf_size = -1; // the member buf_size is obsolete
effective_buf_size = (uint64)(work_var_count + 1)*(uint64)sample_count * buf_count; // this is the total size of "CvMat buf" to be allocated
effective_buf_width = sample_count;
effective_buf_height = work_var_count+1;
if (effective_buf_width >= effective_buf_height)
effective_buf_height *= buf_count;
else
effective_buf_width *= buf_count;
if ((uint64)effective_buf_width * (uint64)effective_buf_height != effective_buf_size)
{
CV_Error(CV_StsBadArg, "The memory buffer cannot be allocated since its size exceeds integer fields limit");
}
if ( is_buf_16u ) if ( is_buf_16u )
{ {
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_16UC1 )); CV_CALL( buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_16UC1 ));
CV_CALL( pair16u32s_ptr = (CvPair16u32s*)cvAlloc( sample_count*sizeof(pair16u32s_ptr[0]) )); CV_CALL( pair16u32s_ptr = (CvPair16u32s*)cvAlloc( sample_count*sizeof(pair16u32s_ptr[0]) ));
} }
else else
{ {
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_32SC1 )); CV_CALL( buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_32SC1 ));
CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) )); CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) ));
} }
@ -356,7 +378,7 @@ void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
{ {
int ci; int ci;
const uchar* mask = 0; const uchar* mask = 0;
int m_step = 0, step; int64 m_step = 0, step;
const int* idata = 0; const int* idata = 0;
const float* fdata = 0; const float* fdata = 0;
int num_valid = 0; int num_valid = 0;
@ -399,13 +421,13 @@ void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
for( i = 0; i < sample_count; i++ ) for( i = 0; i < sample_count; i++ )
{ {
int val = INT_MAX, si = sidx ? sidx[i] : i; int val = INT_MAX, si = sidx ? sidx[i] : i;
if( !mask || !mask[si*m_step] ) if( !mask || !mask[(size_t)si*m_step] )
{ {
if( idata ) if( idata )
val = idata[si*step]; val = idata[(size_t)si*step];
else else
{ {
float t = fdata[si*step]; float t = fdata[(size_t)si*step];
val = cvRound(t); val = cvRound(t);
if( fabs(t - val) > FLT_EPSILON ) if( fabs(t - val) > FLT_EPSILON )
{ {
@ -515,12 +537,12 @@ void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
{ {
float val = ord_nan; float val = ord_nan;
int si = sidx ? sidx[i] : i; int si = sidx ? sidx[i] : i;
if( !mask || !mask[si*m_step] ) if( !mask || !mask[(size_t)si*m_step] )
{ {
if( idata ) if( idata )
val = (float)idata[si*step]; val = (float)idata[(size_t)si*step];
else else
val = fdata[si*step]; val = fdata[(size_t)si*step];
if( fabs(val) >= ord_nan ) if( fabs(val) >= ord_nan )
{ {
@ -532,7 +554,7 @@ void CvDTreeTrainData::set_data( const CvMat* _train_data, int _tflag,
} }
if (is_buf_16u) if (is_buf_16u)
udst[i] = (unsigned short)i; udst[i] = (unsigned short)i; // TODO: memory corruption may be here
else else
idst[i] = i; idst[i] = i;
_fdst[i] = val; _fdst[i] = val;
@ -751,7 +773,7 @@ CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
if (is_buf_16u) if (is_buf_16u)
{ {
unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols + unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset); vi*sample_count + root->offset);
for( i = 0; i < count; i++ ) for( i = 0; i < count; i++ )
{ {
@ -762,7 +784,7 @@ CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
} }
else else
{ {
int* idst = buf->data.i + root->buf_idx*buf->cols + int* idst = buf->data.i + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset; vi*sample_count + root->offset;
for( i = 0; i < count; i++ ) for( i = 0; i < count; i++ )
{ {
@ -788,7 +810,7 @@ CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
if (is_buf_16u) if (is_buf_16u)
{ {
unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols + unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
vi*sample_count + data_root->offset); vi*sample_count + data_root->offset);
for( i = 0; i < num_valid; i++ ) for( i = 0; i < num_valid; i++ )
{ {
@ -812,7 +834,7 @@ CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
} }
else else
{ {
int* idst_idx = buf->data.i + root->buf_idx*buf->cols + int* idst_idx = buf->data.i + root->buf_idx*get_length_subbuf() +
vi*sample_count + root->offset; vi*sample_count + root->offset;
for( i = 0; i < num_valid; i++ ) for( i = 0; i < num_valid; i++ )
{ {
@ -840,14 +862,14 @@ CvDTreeNode* CvDTreeTrainData::subsample_data( const CvMat* _subsample_idx )
const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf); const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf);
if (is_buf_16u) if (is_buf_16u)
{ {
unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols + unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*get_length_subbuf() +
workVarCount*sample_count + root->offset); workVarCount*sample_count + root->offset);
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]]; sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]];
} }
else else
{ {
int* sample_idx_dst = buf->data.i + root->buf_idx*buf->cols + int* sample_idx_dst = buf->data.i + root->buf_idx*get_length_subbuf() +
workVarCount*sample_count + root->offset; workVarCount*sample_count + root->offset;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
sample_idx_dst[i] = sample_idx_src[sidx[i]]; sample_idx_dst[i] = sample_idx_src[sidx[i]];
@ -1158,10 +1180,10 @@ void CvDTreeTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ord_valu
const int* sample_indices = get_sample_indices(n, sample_indices_buf); const int* sample_indices = get_sample_indices(n, sample_indices_buf);
if( !is_buf_16u ) if( !is_buf_16u )
*sorted_indices = buf->data.i + n->buf_idx*buf->cols + *sorted_indices = buf->data.i + n->buf_idx*get_length_subbuf() +
vi*sample_count + n->offset; vi*sample_count + n->offset;
else { else {
const unsigned short* short_indices = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols + const unsigned short* short_indices = (const unsigned short*)(buf->data.s + n->buf_idx*get_length_subbuf() +
vi*sample_count + n->offset ); vi*sample_count + n->offset );
for( int i = 0; i < node_sample_count; i++ ) for( int i = 0; i < node_sample_count; i++ )
sorted_indices_buf[i] = short_indices[i]; sorted_indices_buf[i] = short_indices[i];
@ -1232,10 +1254,10 @@ const int* CvDTreeTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* cat_
{ {
const int* cat_values = 0; const int* cat_values = 0;
if( !is_buf_16u ) if( !is_buf_16u )
cat_values = buf->data.i + n->buf_idx*buf->cols + cat_values = buf->data.i + n->buf_idx*get_length_subbuf() +
vi*sample_count + n->offset; vi*sample_count + n->offset;
else { else {
const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols + const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*get_length_subbuf() +
vi*sample_count + n->offset); vi*sample_count + n->offset);
for( int i = 0; i < n->sample_count; i++ ) for( int i = 0; i < n->sample_count; i++ )
cat_values_buf[i] = short_values[i]; cat_values_buf[i] = short_values[i];
@ -3004,6 +3026,7 @@ void CvDTree::split_node_data( CvDTreeNode* node )
int new_buf_idx = data->get_child_buf_idx( node ); int new_buf_idx = data->get_child_buf_idx( node );
int work_var_count = data->get_work_var_count(); int work_var_count = data->get_work_var_count();
CvMat* buf = data->buf; CvMat* buf = data->buf;
size_t length_buf_row = data->get_length_subbuf();
cv::AutoBuffer<uchar> inn_buf(n*(3*sizeof(int) + sizeof(float))); cv::AutoBuffer<uchar> inn_buf(n*(3*sizeof(int) + sizeof(float)));
int* temp_buf = (int*)(uchar*)inn_buf; int* temp_buf = (int*)(uchar*)inn_buf;
@ -3049,7 +3072,7 @@ void CvDTree::split_node_data( CvDTreeNode* node )
{ {
unsigned short *ldst, *rdst, *ldst0, *rdst0; unsigned short *ldst, *rdst, *ldst0, *rdst0;
//unsigned short tl, tr; //unsigned short tl, tr;
ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols + ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*length_buf_row +
vi*scount + left->offset); vi*scount + left->offset);
rdst0 = rdst = (unsigned short*)(ldst + nl); rdst0 = rdst = (unsigned short*)(ldst + nl);
@ -3095,9 +3118,9 @@ void CvDTree::split_node_data( CvDTreeNode* node )
else else
{ {
int *ldst0, *ldst, *rdst0, *rdst; int *ldst0, *ldst, *rdst0, *rdst;
ldst0 = ldst = buf->data.i + left->buf_idx*buf->cols + ldst0 = ldst = buf->data.i + left->buf_idx*length_buf_row +
vi*scount + left->offset; vi*scount + left->offset;
rdst0 = rdst = buf->data.i + right->buf_idx*buf->cols + rdst0 = rdst = buf->data.i + right->buf_idx*length_buf_row +
vi*scount + right->offset; vi*scount + right->offset;
// split sorted // split sorted
@ -3158,9 +3181,9 @@ void CvDTree::split_node_data( CvDTreeNode* node )
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols + unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*length_buf_row +
vi*scount + left->offset); vi*scount + left->offset);
unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols + unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*length_buf_row +
vi*scount + right->offset); vi*scount + right->offset);
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
@ -3188,9 +3211,9 @@ void CvDTree::split_node_data( CvDTreeNode* node )
} }
else else
{ {
int *ldst = buf->data.i + left->buf_idx*buf->cols + int *ldst = buf->data.i + left->buf_idx*length_buf_row +
vi*scount + left->offset; vi*scount + left->offset;
int *rdst = buf->data.i + right->buf_idx*buf->cols + int *rdst = buf->data.i + right->buf_idx*length_buf_row +
vi*scount + right->offset; vi*scount + right->offset;
for( i = 0; i < n; i++ ) for( i = 0; i < n; i++ )
@ -3230,9 +3253,9 @@ void CvDTree::split_node_data( CvDTreeNode* node )
int pos = data->get_work_var_count(); int pos = data->get_work_var_count();
if (data->is_buf_16u) if (data->is_buf_16u)
{ {
unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols + unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*length_buf_row +
pos*scount + left->offset); pos*scount + left->offset);
unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols + unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*length_buf_row +
pos*scount + right->offset); pos*scount + right->offset);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
{ {
@ -3252,9 +3275,9 @@ void CvDTree::split_node_data( CvDTreeNode* node )
} }
else else
{ {
int* ldst = buf->data.i + left->buf_idx*buf->cols + int* ldst = buf->data.i + left->buf_idx*length_buf_row +
pos*scount + left->offset; pos*scount + left->offset;
int* rdst = buf->data.i + right->buf_idx*buf->cols + int* rdst = buf->data.i + right->buf_idx*length_buf_row +
pos*scount + right->offset; pos*scount + right->offset;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
{ {
@ -3310,7 +3333,7 @@ float CvDTree::calc_error( CvMLData* _data, int type, vector<float> *resp )
float r = (float)predict( &sample, missing ? &miss : 0 )->value; float r = (float)predict( &sample, missing ? &miss : 0 )->value;
if( pred_resp ) if( pred_resp )
pred_resp[i] = r; pred_resp[i] = r;
int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; int d = fabs((double)r - response->data.fl[(size_t)si*r_step]) <= FLT_EPSILON ? 0 : 1;
err += d; err += d;
} }
err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX; err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX;
@ -3327,7 +3350,7 @@ float CvDTree::calc_error( CvMLData* _data, int type, vector<float> *resp )
float r = (float)predict( &sample, missing ? &miss : 0 )->value; float r = (float)predict( &sample, missing ? &miss : 0 )->value;
if( pred_resp ) if( pred_resp )
pred_resp[i] = r; pred_resp[i] = r;
float d = r - response->data.fl[si*r_step]; float d = r - response->data.fl[(size_t)si*r_step];
err += d*d; err += d*d;
} }
err = sample_count ? err / (float)sample_count : -FLT_MAX; err = sample_count ? err / (float)sample_count : -FLT_MAX;
@ -3633,8 +3656,8 @@ CvDTreeNode* CvDTree::predict( const CvMat* _sample,
int vi = split->var_idx; int vi = split->var_idx;
int ci = vtype[vi]; int ci = vtype[vi];
i = vidx ? vidx[vi] : vi; i = vidx ? vidx[vi] : vi;
float val = sample[i*step]; float val = sample[(size_t)i*step];
if( m && m[i*mstep] ) if( m && m[(size_t)i*mstep] )
continue; continue;
if( ci < 0 ) // ordered if( ci < 0 ) // ordered
dir = val <= split->ord.c ? -1 : 1; dir = val <= split->ord.c ? -1 : 1;

View File

@ -1935,20 +1935,14 @@ icvLoadCascadeCART( const char** input_cascade, int n, CvSize orig_window_size )
CV_IMPL CvHaarClassifierCascade* CV_IMPL CvHaarClassifierCascade*
cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size ) cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
{ {
const char** input_cascade = 0;
CvHaarClassifierCascade *cascade = 0;
int i, n;
const char* slash;
char name[_MAX_PATH];
int size = 0;
char* ptr = 0;
if( !directory ) if( !directory )
CV_Error( CV_StsNullPtr, "Null path is passed" ); CV_Error( CV_StsNullPtr, "Null path is passed" );
n = (int)strlen(directory)-1; char name[_MAX_PATH];
slash = directory[n] == '\\' || directory[n] == '/' ? "" : "/";
int n = (int)strlen(directory)-1;
const char* slash = directory[n] == '\\' || directory[n] == '/' ? "" : "/";
int size = 0;
/* try to read the classifier from directory */ /* try to read the classifier from directory */
for( n = 0; ; n++ ) for( n = 0; ; n++ )
@ -1969,10 +1963,14 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
CV_Error( CV_StsBadArg, "Invalid path" ); CV_Error( CV_StsBadArg, "Invalid path" );
size += (n+1)*sizeof(char*); size += (n+1)*sizeof(char*);
input_cascade = (const char**)cvAlloc( size ); const char** input_cascade = (const char**)cvAlloc( size );
ptr = (char*)(input_cascade + n + 1);
if( !input_cascade )
CV_Error( CV_StsNoMem, "Could not allocate memory for input_cascade" );
char* ptr = (char*)(input_cascade + n + 1);
for( i = 0; i < n; i++ ) for( int i = 0; i < n; i++ )
{ {
sprintf( name, "%s/%d/AdaBoostCARTHaarClassifier.txt", directory, i ); sprintf( name, "%s/%d/AdaBoostCARTHaarClassifier.txt", directory, i );
FILE* f = fopen( name, "rb" ); FILE* f = fopen( name, "rb" );
@ -1990,7 +1988,8 @@ cvLoadHaarClassifierCascade( const char* directory, CvSize orig_window_size )
} }
input_cascade[n] = 0; input_cascade[n] = 0;
cascade = icvLoadCascadeCART( input_cascade, n, orig_window_size );
CvHaarClassifierCascade* cascade = icvLoadCascadeCART( input_cascade, n, orig_window_size );
if( input_cascade ) if( input_cascade )
cvFree( &input_cascade ); cvFree( &input_cascade );

View File

@ -1396,7 +1396,7 @@ static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObjec
const int n, const int bx, const int by, const int n, const int bx, const int by,
const int threadsNum, int *kLevels, int **processingLevels) const int threadsNum, int *kLevels, int **processingLevels)
{ {
int rootFilterDim, sumPartFiltersDim, i, numLevels, dbx, dby, numDotProducts; int rootFilterDim, sumPartFiltersDim, i, numLevels, dbx, dby;
int j, minValue, argMin, lambda, maxValue, k; int j, minValue, argMin, lambda, maxValue, k;
int *dotProd, *weights, *disp; int *dotProd, *weights, *disp;
if (H == NULL || all_F == NULL) if (H == NULL || all_F == NULL)
@ -1420,8 +1420,6 @@ static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObjec
// of feature map with part filter // of feature map with part filter
dbx = 2 * bx; dbx = 2 * bx;
dby = 2 * by; dby = 2 * by;
// Total number of dot products for all levels
numDotProducts = 0;
lambda = LAMBDA; lambda = LAMBDA;
for (i = 0; i < numLevels; i++) for (i = 0; i < numLevels; i++)
{ {
@ -1429,7 +1427,6 @@ static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObjec
H->pyramid[i + lambda]->sizeY * rootFilterDim + H->pyramid[i + lambda]->sizeY * rootFilterDim +
(H->pyramid[i]->sizeX + dbx) * (H->pyramid[i]->sizeX + dbx) *
(H->pyramid[i]->sizeY + dby) * sumPartFiltersDim; (H->pyramid[i]->sizeY + dby) * sumPartFiltersDim;
numDotProducts += dotProd[i];
} }
// Allocation memory for saving dot product number performed by each thread // Allocation memory for saving dot product number performed by each thread
weights = (int *)malloc(sizeof(int) * threadsNum); weights = (int *)malloc(sizeof(int) * threadsNum);

View File

@ -84,20 +84,26 @@ namespace cv
//this function may be obsoleted //this function may be obsoleted
//CV_EXPORTS cl_device_id getDevice(); //CV_EXPORTS cl_device_id getDevice();
//the function must be called before any other cv::ocl::functions, it initialize ocl runtime //the function must be called before any other cv::ocl::functions, it initialize ocl runtime
//each Info relates to an OpenCL platform
//there is one or more devices in each platform, each one has a separate name
CV_EXPORTS int getDevice(std::vector<Info> &oclinfo, int devicetype = CVCL_DEVICE_TYPE_GPU); CV_EXPORTS int getDevice(std::vector<Info> &oclinfo, int devicetype = CVCL_DEVICE_TYPE_GPU);
//set device you want to use, optional function after getDevice be called //set device you want to use, optional function after getDevice be called
//the devnum is the index of the selected device in DeviceName vector of INfo
CV_EXPORTS void setDevice(Info &oclinfo, int devnum = 0); CV_EXPORTS void setDevice(Info &oclinfo, int devnum = 0);
//this function is not ready yet
//CV_EXPORTS void getComputeCapability(cl_device_id device, int &major, int &minor);
//optional function, if you want save opencl binary kernel to the file, set its path //optional function, if you want save opencl binary kernel to the file, set its path
CV_EXPORTS void setBinpath(const char *path); CV_EXPORTS void setBinpath(const char *path);
//The two functions below are used to get opencl runtime so that opencv can interactive with
//other opencl program
//The two functions below enable other opencl program to use ocl module's cl_context and cl_command_queue
CV_EXPORTS void* getoclContext(); CV_EXPORTS void* getoclContext();
CV_EXPORTS void* getoclCommandQueue(); CV_EXPORTS void* getoclCommandQueue();
//this function enable ocl module to use customized cl_context and cl_command_queue
//getDevice also need to be called before this function
CV_EXPORTS void setDeviceEx(Info &oclinfo, void *ctx, void *qu, int devnum = 0);
//////////////////////////////// Error handling //////////////////////// //////////////////////////////// Error handling ////////////////////////
CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func); CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func);

View File

@ -2205,7 +2205,6 @@ void cv::ocl::transpose(const oclMat &src, oclMat &dst)
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4 || src.type() == CV_8SC3 || src.type() == CV_8SC4 || CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4 || src.type() == CV_8SC3 || src.type() == CV_8SC4 ||
src.type() == CV_16UC2 || src.type() == CV_16SC2 || src.type() == CV_32SC1 || src.type() == CV_32FC1); src.type() == CV_16UC2 || src.type() == CV_16SC2 || src.type() == CV_32SC1 || src.type() == CV_32FC1);
stringstream idxstr;
oclMat emptyMat; oclMat emptyMat;
if( src.data == dst.data && dst.cols == dst.rows ) if( src.data == dst.data && dst.cols == dst.rows )

View File

@ -12,11 +12,13 @@
// //
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved. // Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. // Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// @Authors // @Authors
// Guoping Long, longguoping@gmail.com // Guoping Long, longguoping@gmail.com
// Niko Li, newlife20080214@gmail.com // Niko Li, newlife20080214@gmail.com
// Yao Wang, bitwangyaoyao@gmail.com
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met: // are permitted provided that the following conditions are met:
// //
@ -292,23 +294,12 @@ namespace cv
} }
return devcienums; return devcienums;
} }
void setDevice(Info &oclinfo, int devnum)
{
CV_Assert(devnum >= 0);
cl_int status = 0;
cl_context_properties cps[3] =
{
CL_CONTEXT_PLATFORM, (cl_context_properties)(oclinfo.impl->oclplatform), 0
};
oclinfo.impl->devnum = devnum;
oclinfo.impl->oclcontext = clCreateContext(cps, 1, &oclinfo.impl->devices[devnum], NULL, NULL, &status);
openCLVerifyCall(status);
//create the command queue using the first device of the list
oclinfo.impl->clCmdQueue = clCreateCommandQueue(oclinfo.impl->oclcontext, oclinfo.impl->devices[devnum],
CL_QUEUE_PROFILING_ENABLE, &status);
openCLVerifyCall(status);
static void fillClcontext(Info &oclinfo)
{
//get device information //get device information
size_t devnum = oclinfo.impl->devnum;
openCLSafeCall(clGetDeviceInfo(oclinfo.impl->devices[devnum], CL_DEVICE_MAX_WORK_GROUP_SIZE, openCLSafeCall(clGetDeviceInfo(oclinfo.impl->devices[devnum], CL_DEVICE_MAX_WORK_GROUP_SIZE,
sizeof(size_t), (void *)&oclinfo.impl->maxWorkGroupSize, NULL)); sizeof(size_t), (void *)&oclinfo.impl->maxWorkGroupSize, NULL));
openCLSafeCall(clGetDeviceInfo(oclinfo.impl->devices[devnum], CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, openCLSafeCall(clGetDeviceInfo(oclinfo.impl->devices[devnum], CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS,
@ -338,7 +329,41 @@ namespace cv
oclinfo.impl -> double_support = 1; oclinfo.impl -> double_support = 1;
} }
Context::setContext(oclinfo); Context::setContext(oclinfo);
} }
void setDevice(Info &oclinfo, int devnum)
{
CV_Assert(devnum >= 0);
cl_int status = 0;
cl_context_properties cps[3] =
{
CL_CONTEXT_PLATFORM, (cl_context_properties)(oclinfo.impl->oclplatform), 0
};
oclinfo.impl->devnum = devnum;
oclinfo.impl->oclcontext = clCreateContext(cps, 1, &oclinfo.impl->devices[devnum], NULL, NULL, &status);
openCLVerifyCall(status);
//create the command queue using the first device of the list
oclinfo.impl->clCmdQueue = clCreateCommandQueue(oclinfo.impl->oclcontext, oclinfo.impl->devices[devnum],
CL_QUEUE_PROFILING_ENABLE, &status);
openCLVerifyCall(status);
fillClcontext(oclinfo);
}
void setDeviceEx(Info &oclinfo, void *ctx, void *q, int devnum)
{
CV_Assert(devnum >= 0);
oclinfo.impl->devnum = devnum;
if(ctx && q)
{
oclinfo.impl->oclcontext = (cl_context)ctx;
oclinfo.impl->clCmdQueue = (cl_command_queue)q;
clRetainContext((cl_context)ctx);
clRetainCommandQueue((cl_command_queue)q);
fillClcontext(oclinfo);
}
}
void *getoclContext() void *getoclContext()
{ {
@ -440,87 +465,35 @@ namespace cv
Context *clcxt = Context::getContext(); Context *clcxt = Context::getContext();
clcxt->impl->Binpath = path; clcxt->impl->Binpath = path;
} }
int savetofile(const Context *clcxt, cl_program &program, const char *fileName)
int savetofile(const Context*, cl_program &program, const char *fileName)
{ {
//cl_int status; size_t binarySize;
size_t numDevices = 1;
cl_device_id *devices = clcxt->impl->devices;
//figure out the sizes of each of the binaries.
size_t *binarySizes = (size_t *)malloc( sizeof(size_t) * numDevices );
openCLSafeCall(clGetProgramInfo(program, openCLSafeCall(clGetProgramInfo(program,
CL_PROGRAM_BINARY_SIZES, CL_PROGRAM_BINARY_SIZES,
sizeof(size_t) * numDevices, sizeof(size_t),
binarySizes, NULL)); &binarySize, NULL));
char* binary = (char*)malloc(binarySize);
size_t i = 0; if(binary == NULL)
//copy over all of the generated binaries.
char **binaries = (char **)malloc( sizeof(char *) * numDevices );
if(binaries == NULL)
{ {
CV_Error(CV_StsNoMem, "Failed to allocate host memory.(binaries)\r\n"); CV_Error(CV_StsNoMem, "Failed to allocate host memory.");
}
for(i = 0; i < numDevices; i++)
{
if(binarySizes[i] != 0)
{
binaries[i] = (char *)malloc( sizeof(char) * binarySizes[i]);
if(binaries[i] == NULL)
{
CV_Error(CV_StsNoMem, "Failed to allocate host memory.(binaries[i])\r\n");
}
}
else
{
binaries[i] = NULL;
}
} }
openCLSafeCall(clGetProgramInfo(program, openCLSafeCall(clGetProgramInfo(program,
CL_PROGRAM_BINARIES, CL_PROGRAM_BINARIES,
sizeof(char *) * numDevices, sizeof(char *),
binaries, &binary,
NULL)); NULL));
//dump out each binary into its own separate file. FILE *fp = fopen(fileName, "wb+");
for(i = 0; i < numDevices; i++) if(fp != NULL)
{ {
if(binarySizes[i] != 0) fwrite(binary, binarySize, 1, fp);
{ free(binary);
char deviceName[1024]; fclose(fp);
openCLSafeCall(clGetDeviceInfo(devices[i],
CL_DEVICE_NAME,
sizeof(deviceName),
deviceName,
NULL));
printf( "%s binary kernel: %s\n", deviceName, fileName);
FILE *fp = fopen(fileName, "wb+");
if(fp == NULL)
{
char *temp = NULL;
sprintf(temp, "Failed to load kernel file : %s\r\n", fileName);
CV_Error(CV_GpuApiCallError, temp);
}
else
{
fwrite(binaries[i], binarySizes[i], 1, fp);
free(binaries[i]);
fclose(fp);
}
}
else
{
printf("Skipping %s since there is no binary data to write!\n",
fileName);
}
} }
free(binarySizes);
free(binaries);
return 1; return 1;
} }
cl_kernel openCLGetKernelFromSource(const Context *clCxt, const char **source, string kernelName, cl_kernel openCLGetKernelFromSource(const Context *clCxt, const char **source, string kernelName,
const char *build_options) const char *build_options)
{ {
@ -572,7 +545,7 @@ namespace cv
program = clCreateProgramWithSource( program = clCreateProgramWithSource(
clCxt->impl->clContext, 1, source, NULL, &status); clCxt->impl->clContext, 1, source, NULL, &status);
openCLVerifyCall(status); openCLVerifyCall(status);
status = clBuildProgram(program, 1, &(clCxt->impl->devices[0]), all_build_options, NULL, NULL); status = clBuildProgram(program, 1, &(clCxt->impl->devices), all_build_options, NULL, NULL);
if(status == CL_SUCCESS && clCxt->impl->Binpath.size()) if(status == CL_SUCCESS && clCxt->impl->Binpath.size())
savetofile(clCxt, program, filename.c_str()); savetofile(clCxt, program, filename.c_str());
} }
@ -587,13 +560,14 @@ namespace cv
cl_int status = 0; cl_int status = 0;
program = clCreateProgramWithBinary(clCxt->impl->clContext, program = clCreateProgramWithBinary(clCxt->impl->clContext,
1, 1,
&(clCxt->impl->devices[0]), &(clCxt->impl->devices),
(const size_t *)&binarySize, (const size_t *)&binarySize,
(const unsigned char **)&binary, (const unsigned char **)&binary,
NULL, NULL,
&status); &status);
openCLVerifyCall(status); openCLVerifyCall(status);
status = clBuildProgram(program, 1, &(clCxt->impl->devices[0]), all_build_options, NULL, NULL); status = clBuildProgram(program, 1, &(clCxt->impl->devices), all_build_options, NULL, NULL);
delete[] binary;
} }
if(status != CL_SUCCESS) if(status != CL_SUCCESS)
@ -604,14 +578,14 @@ namespace cv
char *buildLog = NULL; char *buildLog = NULL;
size_t buildLogSize = 0; size_t buildLogSize = 0;
logStatus = clGetProgramBuildInfo(program, logStatus = clGetProgramBuildInfo(program,
clCxt->impl->devices[0], CL_PROGRAM_BUILD_LOG, buildLogSize, clCxt->impl->devices, CL_PROGRAM_BUILD_LOG, buildLogSize,
buildLog, &buildLogSize); buildLog, &buildLogSize);
if(logStatus != CL_SUCCESS) if(logStatus != CL_SUCCESS)
cout << "Failed to build the program and get the build info." << endl; cout << "Failed to build the program and get the build info." << endl;
buildLog = new char[buildLogSize]; buildLog = new char[buildLogSize];
CV_DbgAssert(!!buildLog); CV_DbgAssert(!!buildLog);
memset(buildLog, 0, buildLogSize); memset(buildLog, 0, buildLogSize);
openCLSafeCall(clGetProgramBuildInfo(program, clCxt->impl->devices[0], openCLSafeCall(clGetProgramBuildInfo(program, clCxt->impl->devices,
CL_PROGRAM_BUILD_LOG, buildLogSize, buildLog, NULL)); CL_PROGRAM_BUILD_LOG, buildLogSize, buildLog, NULL));
cout << "\n\t\t\tBUILD LOG\n"; cout << "\n\t\t\tBUILD LOG\n";
cout << buildLog << endl; cout << buildLog << endl;
@ -633,7 +607,7 @@ namespace cv
void openCLVerifyKernel(const Context *clCxt, cl_kernel kernel, size_t *localThreads) void openCLVerifyKernel(const Context *clCxt, cl_kernel kernel, size_t *localThreads)
{ {
size_t kernelWorkGroupSize; size_t kernelWorkGroupSize;
openCLSafeCall(clGetKernelWorkGroupInfo(kernel, clCxt->impl->devices[0], openCLSafeCall(clGetKernelWorkGroupInfo(kernel, clCxt->impl->devices,
CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &kernelWorkGroupSize, 0)); CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &kernelWorkGroupSize, 0));
CV_Assert( (localThreads[0] <= clCxt->impl->maxWorkItemSizes[0]) && CV_Assert( (localThreads[0] <= clCxt->impl->maxWorkItemSizes[0]) &&
(localThreads[1] <= clCxt->impl->maxWorkItemSizes[1]) && (localThreads[1] <= clCxt->impl->maxWorkItemSizes[1]) &&
@ -795,15 +769,16 @@ namespace cv
Context *clcxt = getContext(); Context *clcxt = getContext();
clcxt->impl->clContext = oclinfo.impl->oclcontext; clcxt->impl->clContext = oclinfo.impl->oclcontext;
clcxt->impl->clCmdQueue = oclinfo.impl->clCmdQueue; clcxt->impl->clCmdQueue = oclinfo.impl->clCmdQueue;
clcxt->impl->devices = &oclinfo.impl->devices[oclinfo.impl->devnum]; clcxt->impl->devices = oclinfo.impl->devices[oclinfo.impl->devnum];
clcxt->impl->devName = oclinfo.impl->devName[oclinfo.impl->devnum]; clcxt->impl->devName = oclinfo.impl->devName[oclinfo.impl->devnum];
clcxt->impl->maxDimensions = oclinfo.impl->maxDimensions; clcxt->impl->maxDimensions = oclinfo.impl->maxDimensions;
clcxt->impl->maxWorkGroupSize = oclinfo.impl->maxWorkGroupSize; clcxt->impl->maxWorkGroupSize = oclinfo.impl->maxWorkGroupSize;
clcxt->impl->maxWorkItemSizes = oclinfo.impl->maxWorkItemSizes; for(size_t i=0; i<clcxt->impl->maxDimensions && i<4; i++)
clcxt->impl->maxWorkItemSizes[i] = oclinfo.impl->maxWorkItemSizes[i];
clcxt->impl->maxComputeUnits = oclinfo.impl->maxComputeUnits; clcxt->impl->maxComputeUnits = oclinfo.impl->maxComputeUnits;
clcxt->impl->double_support = oclinfo.impl->double_support; clcxt->impl->double_support = oclinfo.impl->double_support;
//extra options to recognize compiler options //extra options to recognize compiler options
clcxt->impl->extra_options = oclinfo.impl->extra_options; memcpy(clcxt->impl->extra_options, oclinfo.impl->extra_options, 512);
} }
Context::Context() Context::Context()
{ {
@ -814,11 +789,12 @@ namespace cv
impl->devices = NULL; impl->devices = NULL;
impl->maxDimensions = 0; impl->maxDimensions = 0;
impl->maxWorkGroupSize = 0; impl->maxWorkGroupSize = 0;
impl->maxWorkItemSizes = NULL; for(int i=0; i<4; i++)
impl->maxWorkItemSizes[i] = 0;
impl->maxComputeUnits = 0; impl->maxComputeUnits = 0;
impl->double_support = 0; impl->double_support = 0;
//extra options to recognize vendor specific fp64 extensions //extra options to recognize vendor specific fp64 extensions
impl->extra_options = NULL; memset(impl->extra_options, 0, 512);
programCache = ProgramCache::getProgramCache(); programCache = ProgramCache::getProgramCache();
} }

View File

@ -12,10 +12,12 @@
// //
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved. // Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. // Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// @Authors // @Authors
// Guoping Long, longguoping@gmail.com // Guoping Long, longguoping@gmail.com
// Yao Wang, bitwangyaoyao@gmail.com
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met: // are permitted provided that the following conditions are met:
@ -131,15 +133,15 @@ namespace cv
//Information of the OpenCL context //Information of the OpenCL context
cl_context clContext; cl_context clContext;
cl_command_queue clCmdQueue; cl_command_queue clCmdQueue;
cl_device_id *devices; cl_device_id devices;
string devName; string devName;
cl_uint maxDimensions; cl_uint maxDimensions;
size_t maxWorkGroupSize; size_t maxWorkGroupSize;
size_t *maxWorkItemSizes; size_t maxWorkItemSizes[4];
cl_uint maxComputeUnits; cl_uint maxComputeUnits;
int double_support; int double_support;
//extra options to recognize vendor specific fp64 extensions //extra options to recognize vendor specific fp64 extensions
char *extra_options; char extra_options[512];
string Binpath; string Binpath;
}; };
} }

View File

@ -742,7 +742,7 @@ static void lkSparse_run(oclMat &I, oclMat &J,
Context *clCxt = I.clCxt; Context *clCxt = I.clCxt;
char platform[256] = {0}; char platform[256] = {0};
cl_platform_id pid; cl_platform_id pid;
clGetDeviceInfo(*clCxt->impl->devices, CL_DEVICE_PLATFORM, sizeof(pid), &pid, NULL); clGetDeviceInfo(clCxt->impl->devices, CL_DEVICE_PLATFORM, sizeof(pid), &pid, NULL);
clGetPlatformInfo(pid, CL_PLATFORM_NAME, 256, platform, NULL); clGetPlatformInfo(pid, CL_PLATFORM_NAME, 256, platform, NULL);
std::string namestr = platform; std::string namestr = platform;
bool isImageSupported = true; bool isImageSupported = true;