renamed gpu::DeviceInfo::has into gpu::DeviceInfo::supports

This commit is contained in:
Alexey Spizhevoy 2011-02-09 12:31:05 +00:00
parent 924670d32c
commit 63806c9ab9
13 changed files with 35 additions and 35 deletions

View File

@ -56,7 +56,7 @@ public:
size_t freeMemory() const;
size_t totalMemory() const;
bool has(GpuFeature feature) const;
bool supports(GpuFeature feature) const;
bool isCompatible() const;
};
\end{lstlisting}
@ -108,10 +108,10 @@ Returns the amount of total memory in bytes.
\cvdefCpp{size\_t DeviceInfo::totalMemory();}
\cvCppFunc{gpu::DeviceInfo::has}
\cvCppFunc{gpu::DeviceInfo::supports}
Returns true if the device has the given GPU feature, otherwise false.
\cvdefCpp{bool DeviceInfo::has(GpuFeature feature);}
\cvdefCpp{bool DeviceInfo::supports(GpuFeature feature);}
\begin{description}
\cvarg{feature}{Feature to be checked. See \hyperref[cpp.gpu.GpuFeature]{cv::gpu::GpuFeature}.}
\end{description}

View File

@ -107,7 +107,7 @@ namespace cv
size_t freeMemory() const;
size_t totalMemory() const;
bool has(GpuFeature feature) const;
bool supports(GpuFeature feature) const;
bool isCompatible() const;
private:

View File

@ -531,7 +531,7 @@ void cv::gpu::BruteForceMatcher_GPU_base::radiusMatch(const GpuMat& queryDescs,
}
};
CV_Assert(DeviceInfo().has(ATOMICS));
CV_Assert(DeviceInfo().supports(ATOMICS));
const int nQuery = queryDescs.rows;
const int nTrain = trainDescs.rows;

View File

@ -128,7 +128,7 @@ CV_EXPORTS void cv::gpu::setDevice(int) { throw_nogpu(); }
CV_EXPORTS int cv::gpu::getDevice() { throw_nogpu(); return 0; }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu(); return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu(); return 0; }
bool cv::gpu::DeviceInfo::has(cv::gpu::GpuFeature) const { throw_nogpu(); return false; }
bool cv::gpu::DeviceInfo::supports(cv::gpu::GpuFeature) const { throw_nogpu(); return false; }
bool cv::gpu::DeviceInfo::isCompatible() const { throw_nogpu(); return false; }
void cv::gpu::DeviceInfo::query() { throw_nogpu(); }
void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu(); }
@ -173,7 +173,7 @@ size_t cv::gpu::DeviceInfo::totalMemory() const
}
bool cv::gpu::DeviceInfo::has(cv::gpu::GpuFeature feature) const
bool cv::gpu::DeviceInfo::supports(cv::gpu::GpuFeature feature) const
{
int version = majorVersion() * 10 + minorVersion();
return version >= feature;

View File

@ -201,7 +201,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
ensureSizeIsEnough(buf_size, CV_8U, buf);
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = singlepass_callers;
Caller caller = callers[src.depth()];
@ -242,7 +242,7 @@ Scalar cv::gpu::absSum(const GpuMat& src, GpuMat& buf)
ensureSizeIsEnough(buf_size, CV_8U, buf);
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = singlepass_callers;
Caller caller = callers[src.depth()];
@ -278,7 +278,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
sqrSumCaller<int>, sqrSumCaller<float>, 0 };
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = singlepass_callers;
Size buf_size;
@ -359,7 +359,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
DeviceInfo().has(NATIVE_DOUBLE)));
DeviceInfo().supports(NATIVE_DOUBLE)));
double minVal_; if (!minVal) minVal = &minVal_;
double maxVal_; if (!maxVal) maxVal = &maxVal_;
@ -371,7 +371,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
if (mask.empty())
{
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = singlepass_callers;
Caller caller = callers[src.type()];
@ -381,7 +381,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
else
{
MaskedCaller* callers = masked_multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = masked_singlepass_callers;
MaskedCaller caller = callers[src.type()];
@ -458,7 +458,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
DeviceInfo().has(NATIVE_DOUBLE)));
DeviceInfo().supports(NATIVE_DOUBLE)));
double minVal_; if (!minVal) minVal = &minVal_;
double maxVal_; if (!maxVal) maxVal = &maxVal_;
@ -474,7 +474,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
if (mask.empty())
{
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = singlepass_callers;
Caller caller = callers[src.type()];
@ -484,7 +484,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
else
{
MaskedCaller* callers = masked_multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = masked_singlepass_callers;
MaskedCaller caller = callers[src.type()];
@ -539,14 +539,14 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
CV_Assert(src.channels() == 1);
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
DeviceInfo().has(NATIVE_DOUBLE)));
DeviceInfo().supports(NATIVE_DOUBLE)));
Size buf_size;
getBufSizeRequired(src.cols, src.rows, buf_size.width, buf_size.height);
ensureSizeIsEnough(buf_size, CV_8U, buf);
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().has(ATOMICS))
if (TargetArchs::builtWith(ATOMICS) && DeviceInfo().supports(ATOMICS))
callers = singlepass_callers;
Caller caller = callers[src.type()];

View File

@ -74,7 +74,7 @@ namespace cv { namespace gpu { namespace split_merge
CV_Assert(n > 0);
bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&
DeviceInfo().has(NATIVE_DOUBLE);
DeviceInfo().supports(NATIVE_DOUBLE);
CV_Assert(src[0].depth() != CV_64F || double_ok);
int depth = src[0].depth();
@ -117,7 +117,7 @@ namespace cv { namespace gpu { namespace split_merge
CV_Assert(dst);
bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&
DeviceInfo().has(NATIVE_DOUBLE);
DeviceInfo().supports(NATIVE_DOUBLE);
CV_Assert(src.depth() != CV_64F || double_ok);
int depth = src.depth();

View File

@ -106,7 +106,7 @@ namespace
CV_Assert(!img.empty() && img.type() == CV_8UC1);
CV_Assert(mask.empty() || (mask.size() == img.size() && mask.type() == CV_8UC1));
CV_Assert(nOctaves > 0 && nIntervals > 2 && nIntervals < 22);
CV_Assert(DeviceInfo().has(ATOMICS));
CV_Assert(DeviceInfo().supports(ATOMICS));
max_features = static_cast<int>(img.size().area() * featuresRatio);
max_candidates = static_cast<int>(1.5 * max_features);

View File

@ -673,7 +673,7 @@ struct CV_GpuMinMaxTest: public CvTest
try
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
int depth_end = double_ok ? CV_64F : CV_32F;
for (int depth = CV_8U; depth <= depth_end; ++depth)
@ -798,7 +798,7 @@ struct CV_GpuMinMaxLocTest: public CvTest
try
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
int depth_end = double_ok ? CV_64F : CV_32F;
for (int depth = CV_8U; depth <= depth_end; ++depth)
@ -875,7 +875,7 @@ struct CV_GpuCountNonZeroTest: CvTest
try
{
int depth_end;
if (cv::gpu::DeviceInfo().has(cv::gpu::NATIVE_DOUBLE))
if (cv::gpu::DeviceInfo().supports(cv::gpu::NATIVE_DOUBLE))
depth_end = CV_64F;
else
depth_end = CV_32F;

View File

@ -60,7 +60,7 @@ struct CV_GpuBitwiseTest: public CvTest
int rows, cols;
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
int depth_end = double_ok ? CV_64F : CV_32F;
for (int depth = CV_8U; depth <= depth_end; ++depth)

View File

@ -65,7 +65,7 @@ struct CV_GpuMatchTemplateTest: CvTest
try
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
if (!double_ok)
{
// For sqrIntegral
@ -245,7 +245,7 @@ struct CV_GpuMatchTemplateFindPatternInBlackTest: CvTest
try
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
if (!double_ok)
{
// For sqrIntegral

View File

@ -57,7 +57,7 @@ struct CV_GpuMeanShiftTest : public CvTest
cv::Mat img_template;
if (cv::gpu::TargetArchs::builtWith(cv::gpu::COMPUTE_20) &&
cv::gpu::DeviceInfo().has(cv::gpu::COMPUTE_20))
cv::gpu::DeviceInfo().supports(cv::gpu::COMPUTE_20))
img_template = cv::imread(std::string(ts->get_data_path()) + "meanshift/con_result.png");
else
img_template = cv::imread(std::string(ts->get_data_path()) + "meanshift/con_result_CC1X.png");
@ -201,7 +201,7 @@ struct CV_GpuMeanShiftProcTest : public CvTest
cv::FileStorage fs;
if (cv::gpu::TargetArchs::builtWith(cv::gpu::COMPUTE_20) &&
cv::gpu::DeviceInfo().has(cv::gpu::COMPUTE_20))
cv::gpu::DeviceInfo().supports(cv::gpu::COMPUTE_20))
fs.open(std::string(ts->get_data_path()) + "meanshift/spmap.yaml", cv::FileStorage::READ);
else
fs.open(std::string(ts->get_data_path()) + "meanshift/spmap_CC1X.yaml", cv::FileStorage::READ);

View File

@ -69,7 +69,7 @@ struct CV_GpuMeanShiftSegmentationTest : public CvTest {
{
stringstream path;
path << ts->get_data_path() << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;
if (TargetArchs::builtWith(COMPUTE_20) && DeviceInfo().has(COMPUTE_20))
if (TargetArchs::builtWith(COMPUTE_20) && DeviceInfo().supports(COMPUTE_20))
path << ".png";
else
path << "_CC1X.png";

View File

@ -64,7 +64,7 @@ struct CV_MergeTest : public CvTest
void CV_MergeTest::can_merge(size_t rows, size_t cols)
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
size_t depth_end = double_ok ? CV_64F : CV_32F;
for (size_t num_channels = 1; num_channels <= 4; ++num_channels)
@ -106,7 +106,7 @@ void CV_MergeTest::can_merge(size_t rows, size_t cols)
void CV_MergeTest::can_merge_submatrixes(size_t rows, size_t cols)
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
size_t depth_end = double_ok ? CV_64F : CV_32F;
for (size_t num_channels = 1; num_channels <= 4; ++num_channels)
@ -180,7 +180,7 @@ struct CV_SplitTest : public CvTest
void CV_SplitTest::can_split(size_t rows, size_t cols)
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
size_t depth_end = double_ok ? CV_64F : CV_32F;
for (size_t num_channels = 1; num_channels <= 4; ++num_channels)
@ -222,7 +222,7 @@ void CV_SplitTest::can_split(size_t rows, size_t cols)
void CV_SplitTest::can_split_submatrix(size_t rows, size_t cols)
{
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
size_t depth_end = double_ok ? CV_64F : CV_32F;
for (size_t num_channels = 1; num_channels <= 4; ++num_channels)
@ -293,7 +293,7 @@ struct CV_SplitMergeTest : public CvTest
void CV_SplitMergeTest::can_split_merge(size_t rows, size_t cols) {
bool double_ok = gpu::TargetArchs::builtWith(gpu::NATIVE_DOUBLE) &&
gpu::DeviceInfo().has(gpu::NATIVE_DOUBLE);
gpu::DeviceInfo().supports(gpu::NATIVE_DOUBLE);
size_t depth_end = double_ok ? CV_64F : CV_32F;
for (size_t num_channels = 1; num_channels <= 4; ++num_channels)