replaced has* methods in the GPU module with the TargetArchs monostate
This commit is contained in:
@@ -162,52 +162,62 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasPtxVersion(int major, int minor)
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::builtWith(cv::gpu::GpuFeature feature)
|
||||
{
|
||||
if (feature == NATIVE_DOUBLE)
|
||||
return hasEqualOrGreater(1, 3);
|
||||
if (feature == ATOMICS)
|
||||
return hasEqualOrGreater(1, 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::has(int major, int minor)
|
||||
{
|
||||
return hasPtx(major, minor) || hasBin(major, minor);
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
|
||||
{
|
||||
return ::compare(CUDA_ARCH_PTX, major * 10 + minor, std::equal_to<int>());
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasLessOrEqualPtxVersion(int major, int minor)
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::hasBin(int major, int minor)
|
||||
{
|
||||
return ::compare(CUDA_ARCH_BIN, major * 10 + minor, std::equal_to<int>());
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
|
||||
{
|
||||
return ::compare(CUDA_ARCH_PTX, major * 10 + minor,
|
||||
std::less_equal<int>());
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasGreaterOrEqualPtxVersion(int major, int minor)
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
|
||||
{
|
||||
return hasEqualOrGreaterPtx(major, minor) ||
|
||||
hasEqualOrGreaterBin(major, minor);
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
|
||||
{
|
||||
return ::compare(CUDA_ARCH_PTX, major * 10 + minor,
|
||||
std::greater_equal<int>());
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasCubinVersion(int major, int minor)
|
||||
{
|
||||
return ::compare(CUDA_ARCH_BIN, major * 10 + minor, std::equal_to<int>());
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasGreaterOrEqualCubinVersion(int major, int minor)
|
||||
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
|
||||
{
|
||||
return ::compare(CUDA_ARCH_BIN, major * 10 + minor,
|
||||
std::greater_equal<int>());
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasVersion(int major, int minor)
|
||||
{
|
||||
return hasPtxVersion(major, minor) || hasCubinVersion(major, minor);
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::hasGreaterOrEqualVersion(int major, int minor)
|
||||
{
|
||||
return hasGreaterOrEqualPtxVersion(major, minor) ||
|
||||
hasGreaterOrEqualCubinVersion(major, minor);
|
||||
}
|
||||
|
||||
|
||||
CV_EXPORTS bool cv::gpu::isCompatibleWith(int device)
|
||||
{
|
||||
// According to the CUDA C Programming Guide Version 3.2: "PTX code
|
||||
@@ -218,12 +228,12 @@ CV_EXPORTS bool cv::gpu::isCompatibleWith(int device)
|
||||
getComputeCapability(device, major, minor);
|
||||
|
||||
// Check PTX compatibility
|
||||
if (hasLessOrEqualPtxVersion(major, minor))
|
||||
if (TargetArchs::hasEqualOrLessPtx(major, minor))
|
||||
return true;
|
||||
|
||||
// Check CUBIN compatibility
|
||||
for (int i = minor; i >= 0; --i)
|
||||
if (hasCubinVersion(major, i))
|
||||
if (TargetArchs::hasBin(major, i))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@@ -170,7 +170,7 @@ Scalar cv::gpu::sum(const GpuMat& src, GpuMat& buf)
|
||||
ensureSizeIsEnough(buf_size, CV_8U, buf);
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.depth()];
|
||||
@@ -206,7 +206,7 @@ Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
|
||||
sqrSumCaller<int>, sqrSumCaller<float>, 0 };
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Size buf_size;
|
||||
@@ -283,7 +283,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
|
||||
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
|
||||
|
||||
CV_Assert(src.type() != CV_64F || (hasGreaterOrEqualVersion(1, 3) &&
|
||||
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
hasNativeDoubleSupport(getDevice())));
|
||||
|
||||
double minVal_; if (!minVal) minVal = &minVal_;
|
||||
@@ -296,7 +296,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
|
||||
if (mask.empty())
|
||||
{
|
||||
Caller* callers = multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.type()];
|
||||
@@ -306,7 +306,7 @@ void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal, const Gp
|
||||
else
|
||||
{
|
||||
MaskedCaller* callers = masked_multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = masked_singlepass_callers;
|
||||
|
||||
MaskedCaller caller = callers[src.type()];
|
||||
@@ -382,7 +382,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
|
||||
|
||||
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
|
||||
|
||||
CV_Assert(src.type() != CV_64F || (hasGreaterOrEqualVersion(1, 3) &&
|
||||
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
hasNativeDoubleSupport(getDevice())));
|
||||
|
||||
double minVal_; if (!minVal) minVal = &minVal_;
|
||||
@@ -399,7 +399,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
|
||||
if (mask.empty())
|
||||
{
|
||||
Caller* callers = multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.type()];
|
||||
@@ -409,7 +409,7 @@ void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point
|
||||
else
|
||||
{
|
||||
MaskedCaller* callers = masked_multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = masked_singlepass_callers;
|
||||
|
||||
MaskedCaller caller = callers[src.type()];
|
||||
@@ -463,7 +463,7 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
|
||||
|
||||
CV_Assert(src.channels() == 1);
|
||||
|
||||
CV_Assert(src.type() != CV_64F || (hasGreaterOrEqualVersion(1, 3) &&
|
||||
CV_Assert(src.type() != CV_64F || (TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
hasNativeDoubleSupport(getDevice())));
|
||||
|
||||
Size buf_size;
|
||||
@@ -471,7 +471,7 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
|
||||
ensureSizeIsEnough(buf_size, CV_8U, buf);
|
||||
|
||||
Caller* callers = multipass_callers;
|
||||
if (hasGreaterOrEqualVersion(1, 1) && hasAtomicsSupport(getDevice()))
|
||||
if (TargetArchs::builtWith(ATOMICS) && hasAtomicsSupport(getDevice()))
|
||||
callers = singlepass_callers;
|
||||
|
||||
Caller caller = callers[src.type()];
|
||||
|
@@ -72,8 +72,8 @@ namespace cv { namespace gpu { namespace split_merge
|
||||
{
|
||||
CV_Assert(src);
|
||||
CV_Assert(n > 0);
|
||||
|
||||
bool double_ok = hasGreaterOrEqualVersion(1, 3) &&
|
||||
|
||||
bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
hasNativeDoubleSupport(getDevice());
|
||||
CV_Assert(src[0].depth() != CV_64F || double_ok);
|
||||
|
||||
@@ -116,7 +116,7 @@ namespace cv { namespace gpu { namespace split_merge
|
||||
{
|
||||
CV_Assert(dst);
|
||||
|
||||
bool double_ok = hasGreaterOrEqualVersion(1, 3) &&
|
||||
bool double_ok = TargetArchs::builtWith(NATIVE_DOUBLE) &&
|
||||
hasNativeDoubleSupport(getDevice());
|
||||
CV_Assert(src.depth() != CV_64F || double_ok);
|
||||
|
||||
|
Reference in New Issue
Block a user