renamed gpu::DeviceInfo::has into gpu::DeviceInfo::supports
This commit is contained in:
@@ -531,7 +531,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs,
|
||||
}
|
||||
};
|
||||
|
||||
CV_Assert(DeviceInfo().has(ATOMICS));
|
||||
CV_Assert(DeviceInfo().supports(ATOMICS));
|
||||
|
||||
const int nQuery = queryDescs.rows;
|
||||
const int nTrain = trainDescs.rows;
|
||||
|
@@ -128,7 +128,7 @@ CV_EXPORTS void cv::gpu::setDevice(int) { throw_nogpu(); }
|
||||
CV_EXPORTS int cv::gpu::getDevice() { throw_nogpu(); return 0; }
|
||||
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu(); return 0; }
|
||||
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu(); return 0; }
|
||||
bool cv::gpu::DeviceInfo::has(cv::gpu::GpuFeature) const { throw_nogpu(); return false; }
|
||||
bool cv::gpu::DeviceInfo::supports(cv::gpu::GpuFeature) const { throw_nogpu(); return false; }
|
||||
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu(); return false; }
|
||||
void cv::gpu::DeviceInfo::query() { throw_nogpu(); }
|
||||
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu(); }
|
||||
@@ -173,7 +173,7 @@ size_t cv::gpu::DeviceInfo::totalMemory() const
|
||||
}
|
||||
|
||||
|
||||
bool cv::gpu::DeviceInfo::has(cv::gpu::GpuFeature feature) const
|
||||
bool cv::gpu::DeviceInfo::supports(cv::gpu::GpuFeature feature) const
|
||||
{
|
||||
int version = majorVersion() * 10 + minorVersion();
|
||||
return version >= feature;
|
||||
|
@@ -201,7 +201,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
ensureSizeIsEnough(buf_size, CV_8U, buf);
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.depth()];
|
||||
@@ -242,7 +242,7 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
|
||||
ensureSizeIsEnough(buf_size, CV_8U, buf);
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.depth()];
|
||||
@@ -278,7 +278,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
|
||||
sqrSumCaller<int>, sqrSumCaller<float>, 0 };
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Size buf_size;
|
||||
@@ -359,7 +359,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
|
||||
|
||||
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
DeviceInfo().has(NATIVE_DOUBLE)));
|
||||
DeviceInfo().supports(NATIVE_DOUBLE)));
|
||||
|
||||
double minVal_; if (!minVal) minVal = &minVal_;
|
||||
double maxVal_; if (!maxVal) maxVal = &maxVal_;
|
||||
@@ -371,7 +371,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
|
||||
if (mask.empty())
|
||||
{
|
||||
Caller* callers = multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.type()];
|
||||
@@ -381,7 +381,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
|
||||
else
|
||||
{
|
||||
MaskedCaller* callers = masked_multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = masked_singlepass_callers;
|
||||
|
||||
MaskedCaller caller = callers[src.type()];
|
||||
@@ -458,7 +458,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
|
||||
|
||||
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
DeviceInfo().has(NATIVE_DOUBLE)));
|
||||
DeviceInfo().supports(NATIVE_DOUBLE)));
|
||||
|
||||
double minVal_; if (!minVal) minVal = &minVal_;
|
||||
double maxVal_; if (!maxVal) maxVal = &maxVal_;
|
||||
@@ -474,7 +474,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
|
||||
if (mask.empty())
|
||||
{
|
||||
Caller* callers = multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.type()];
|
||||
@@ -484,7 +484,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
|
||||
else
|
||||
{
|
||||
MaskedCaller* callers = masked_multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = masked_singlepass_callers;
|
||||
|
||||
MaskedCaller caller = callers[src.type()];
|
||||
@@ -539,14 +539,14 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
|
||||
CV_Assert(src.channels() == 1);
|
||||
|
||||
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
DeviceInfo().has(NATIVE_DOUBLE)));
|
||||
DeviceInfo().supports(NATIVE_DOUBLE)));
|
||||
|
||||
Size buf_size;
|
||||
getBufSizeRequired(src.cols, src.rows, buf_size.width, buf_size.height);
|
||||
ensureSizeIsEnough(buf_size, CV_8U, buf);
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
|
||||
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.type()];
|
||||
|
@@ -74,7 +74,7 @@ namespace cv { namespace gpu { namespace split_merge
|
||||
CV_Assert(n > 0);
|
||||
|
||||
bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
DeviceInfo().has(NATIVE_DOUBLE);
|
||||
DeviceInfo().supports(NATIVE_DOUBLE);
|
||||
CV_Assert(src[0].depth() != CV_64F || double_ok);
|
||||
|
||||
int depth = src[0].depth();
|
||||
@@ -117,7 +117,7 @@ namespace cv { namespace gpu { namespace split_merge
|
||||
CV_Assert(dst);
|
||||
|
||||
bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
DeviceInfo().has(NATIVE_DOUBLE);
|
||||
DeviceInfo().supports(NATIVE_DOUBLE);
|
||||
CV_Assert(src.depth() != CV_64F || double_ok);
|
||||
|
||||
int depth = src.depth();
|
||||
|
@@ -106,7 +106,7 @@ namespace
|
||||
CV_Assert(!img.empty() && img.type() == CV_8UC1);
|
||||
CV_Assert(mask.empty() || (mask.size() == img.size() && mask.type() == CV_8UC1));
|
||||
CV_Assert(nOctaves > 0 && nIntervals > 2 && nIntervals < 22);
|
||||
CV_Assert(DeviceInfo().has(ATOMICS));
|
||||
CV_Assert(DeviceInfo().supports(ATOMICS));
|
||||
|
||||
max_features = static_cast<int>(img.size().area() * featuresRatio);
|
||||
max_candidates = static_cast<int>(1.5 * max_features);
|
||||
|
Reference in New Issue
Block a user