Merge remote-tracking branch 'upstream/2.4' into merge-2.4

* #1538 from StevenPuttemans:bugfix_3283
* #1545 from alalek:ocl_test_fix_rng
* #1551 from alalek:cmake_install_win
* #1570 from ilya-lavrenov:ipp_warn_fix
* #1573 from alalek:perf_simple_strategy
* #1574 from alalek:svm_workaround
* #1576 from alalek:ocl_fix_cl_double
* #1577 from ilya-lavrenov:ocl_setto_opencl12
* #1578 from asmorkalov:android_fd_cp_fix
* #1579 from ilya-lavrenov:ocl_norm
* #1582 from sperrholz:ocl-arithm-additions
* #1586 from ilya-lavrenov:ocl_setto_win_fix
* #1589 from ilya-lavrenov:pr1582_fix
* #1591 from alalek:ocl_remove_cl_hpp_h
* #1592 from alalek:ocl_program_cache_update
* #1593 from ilya-lavrenov:ocl_war_on_double
* #1594 from ilya-lavrenov:ocl_perf
* #1595 from alalek:cl_code_cleanup
* #1596 from alalek:test_fix_run_py
* #1597 from alalek:ocl_fix_cleanup
* #1598 from alalek:ocl_fix_build_mac
* #1599 from ilya-lavrenov:ocl_mac_kernel_warnings
* #1601 from ilya-lavrenov:ocl_fix_tvl1_and_sparse
* #1602 from alalek:ocl_test_dump_info
* #1603 from ilya-lavrenov:ocl_disable_svm_noblas
* #1605 from alalek:ocl_fixes
* #1606 from ilya-lavrenov:ocl_imgproc
* #1607 from ilya-lavrenov:ocl_fft_cleanup
* #1608 from alalek:fix_warn_upd_haar
* #1609 from ilya-lavrenov:ocl_some_optimization
* #1610 from alalek:ocl_fix_perf_kalman
* #1612 from alalek:ocl_fix_string_info
* #1614 from ilya-lavrenov:ocl_svm_misprint
* #1616 from ilya-lavrenov:ocl_cvtColor
* #1617 from ilya-lavrenov:ocl_info
* #1622 from a0byte:2.4
* #1625 from ilya-lavrenov:to_string

Conflicts:
	cmake/OpenCVConfig.cmake
	cmake/OpenCVDetectPython.cmake
	cmake/OpenCVGenConfig.cmake
	modules/core/CMakeLists.txt
	modules/nonfree/src/surf.ocl.cpp
	modules/ocl/include/opencv2/ocl/ocl.hpp
	modules/ocl/include/opencv2/ocl/private/util.hpp
	modules/ocl/perf/main.cpp
	modules/ocl/src/arithm.cpp
	modules/ocl/src/cl_operations.cpp
	modules/ocl/src/cl_programcache.cpp
	modules/ocl/src/color.cpp
	modules/ocl/src/fft.cpp
	modules/ocl/src/filtering.cpp
	modules/ocl/src/gemm.cpp
	modules/ocl/src/haar.cpp
	modules/ocl/src/imgproc.cpp
	modules/ocl/src/matrix_operations.cpp
	modules/ocl/src/pyrlk.cpp
	modules/ocl/src/split_merge.cpp
	modules/ocl/src/svm.cpp
	modules/ocl/test/main.cpp
	modules/ocl/test/test_fft.cpp
	modules/ocl/test/test_moments.cpp
	modules/ocl/test/test_objdetect.cpp
	modules/ocl/test/test_optflow.cpp
	modules/ocl/test/utility.hpp
	modules/python/CMakeLists.txt
	modules/ts/include/opencv2/ts.hpp
	modules/ts/src/ts_perf.cpp
	samples/android/face-detection/jni/DetectionBasedTracker_jni.cpp
This commit is contained in:
Alexander Alekhin
2013-10-15 18:43:37 +04:00
124 changed files with 3144 additions and 3195 deletions

View File

@@ -538,12 +538,23 @@ CV_EXPORTS void smoothBorder(Mat& img, const Scalar& color, int delta = 3);
CV_EXPORTS void printVersionInfo(bool useStdOut = true);
} //namespace cvtest
#define CV_TEST_MAIN(resourcesubdir) \
#ifndef __CV_TEST_EXEC_ARGS
#if defined(_MSC_VER) && (_MSC_VER <= 1400)
#define __CV_TEST_EXEC_ARGS(...) \
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
#else
#define __CV_TEST_EXEC_ARGS(...) \
__VA_ARGS__;
#endif
#endif
#define CV_TEST_MAIN(resourcesubdir, ...) \
int main(int argc, char **argv) \
{ \
cvtest::TS::ptr()->init(resourcesubdir); \
::testing::InitGoogleTest(&argc, argv); \
cvtest::printVersionInfo();\
cvtest::printVersionInfo(); \
__CV_TEST_EXEC_ARGS(__VA_ARGS__) \
return RUN_ALL_TESTS(); \
}

View File

@@ -241,9 +241,20 @@ typedef struct CV_EXPORTS performance_metrics
};
performance_metrics();
void clear();
} performance_metrics;
/*****************************************************************************************\
* Strategy for performance measuring *
\*****************************************************************************************/
enum PERF_STRATEGY
{
PERF_STRATEGY_BASE = 0,
PERF_STRATEGY_SIMPLE = 1,
};
/*****************************************************************************************\
* Base fixture for performance tests *
\*****************************************************************************************/
@@ -259,6 +270,9 @@ public:
static std::string getDataPath(const std::string& relativePath);
static std::string getSelectedImpl();
static enum PERF_STRATEGY getPerformanceStrategy();
static enum PERF_STRATEGY setPerformanceStrategy(enum PERF_STRATEGY strategy);
protected:
virtual void PerfTestBody() = 0;
@@ -471,23 +485,25 @@ CV_EXPORTS void PrintTo(const Size& sz, ::std::ostream* os);
INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
void fixture##_##name::PerfTestBody()
#ifndef __CV_TEST_EXEC_ARGS
#if defined(_MSC_VER) && (_MSC_VER <= 1400)
#define CV_PERF_TEST_MAIN_INTERNALS_ARGS(...) \
#define __CV_TEST_EXEC_ARGS(...) \
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
#else
#define CV_PERF_TEST_MAIN_INTERNALS_ARGS(...) \
#define __CV_TEST_EXEC_ARGS(...) \
__VA_ARGS__;
#endif
#endif
#define CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, ...) \
CV_PERF_TEST_MAIN_INTERNALS_ARGS(__VA_ARGS__) \
::perf::Regression::Init(#modulename);\
::perf::TestBase::Init(std::vector<std::string>(impls, impls + sizeof impls / sizeof *impls),\
argc, argv);\
::testing::InitGoogleTest(&argc, argv);\
cvtest::printVersionInfo();\
::testing::Test::RecordProperty("cv_module_name", #modulename);\
::perf::TestBase::RecordRunParameters();\
::perf::Regression::Init(#modulename); \
::perf::TestBase::Init(std::vector<std::string>(impls, impls + sizeof impls / sizeof *impls), \
argc, argv); \
::testing::InitGoogleTest(&argc, argv); \
cvtest::printVersionInfo(); \
::testing::Test::RecordProperty("cv_module_name", #modulename); \
::perf::TestBase::RecordRunParameters(); \
__CV_TEST_EXEC_ARGS(__VA_ARGS__) \
return RUN_ALL_TESTS();
// impls must be an array, not a pointer; "plain" should always be one of the implementations

View File

@@ -7,6 +7,8 @@ from subprocess import Popen, PIPE
hostos = os.name # 'nt', 'posix'
hostmachine = platform.machine() # 'x86', 'AMD64', 'x86_64'
errorCode = 0
SIMD_DETECTION_PROGRAM="""
#if __SSE5__
# error SSE5
@@ -641,6 +643,8 @@ class TestSuite(object):
return True
def runTest(self, path, workingDir, _stdout, _stderr, args = []):
global errorCode
if self.error:
return
args = args[:]
@@ -759,9 +763,9 @@ class TestSuite(object):
print >> _stderr, "Run command:", " ".join(cmd)
try:
Popen(cmd, stdout=_stdout, stderr=_stderr, cwd = self.java_test_binary_dir + "/.build").wait()
except OSError:
pass
errorCode = Popen(cmd, stdout=_stdout, stderr=_stderr, cwd = self.java_test_binary_dir + "/.build").wait()
except:
print "Unexpected error:", sys.exc_info()[0]
return None
else:
@@ -777,9 +781,9 @@ class TestSuite(object):
print >> _stderr, "Run command:", " ".join(cmd)
try:
Popen(cmd, stdout=_stdout, stderr=_stderr, cwd = workingDir).wait()
except OSError:
pass
errorCode = Popen(cmd, stdout=_stdout, stderr=_stderr, cwd = workingDir).wait()
except:
print "Unexpected error:", sys.exc_info()[0]
# clean temporary files
if orig_temp_path:
@@ -891,3 +895,7 @@ if __name__ == "__main__":
if logs:
print >> sys.stderr, "Collected: ", " ".join(logs)
if errorCode != 0:
print "Error code: ", errorCode, (" (0x%x)" % (errorCode & 0xffffffff))
exit(errorCode)

View File

@@ -18,6 +18,9 @@ int64 TestBase::_timeadjustment = 0;
static std::vector<std::string> available_impls;
static std::string param_impl;
static enum PERF_STRATEGY param_strategy = PERF_STRATEGY_BASE;
static double param_max_outliers;
static double param_max_deviation;
static unsigned int param_min_samples;
@@ -152,7 +155,7 @@ void Regression::init(const std::string& testSuitName, const std::string& ext)
{
if (!storageInPath.empty())
{
LOGE("Subsequent initialisation of Regression utility is not allowed.");
LOGE("Subsequent initialization of Regression utility is not allowed.");
return;
}
@@ -598,6 +601,11 @@ Regression& Regression::operator() (const std::string& name, cv::InputArray arra
* ::perf::performance_metrics
\*****************************************************************************************/
performance_metrics::performance_metrics()
{
clear();
}
void performance_metrics::clear()
{
bytesIn = 0;
bytesOut = 0;
@@ -643,6 +651,7 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
"|the implementation variant of functions under test}"
"{ perf_list_impls |false |list available implementation variants and exit}"
"{ perf_run_cpu |false |deprecated, equivalent to --perf_impl=plain}"
"{ perf_strategy |default |specifies performance measuring strategy: default, base or simple (weak restrictions)}"
#ifdef ANDROID
"{ perf_time_limit |6.0 |default time limit for a single test (in seconds)}"
"{ perf_affinity_mask |0 |set affinity mask for the main thread}"
@@ -668,6 +677,24 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
::testing::AddGlobalTestEnvironment(new PerfEnvironment);
param_impl = args.has("perf_run_cpu") ? "plain" : args.get<std::string>("perf_impl");
std::string perf_strategy = args.get<std::string>("perf_strategy");
if (perf_strategy == "default")
{
// nothing
}
else if (perf_strategy == "base")
{
param_strategy = PERF_STRATEGY_BASE;
}
else if (perf_strategy == "simple")
{
param_strategy = PERF_STRATEGY_SIMPLE;
}
else
{
printf("No such strategy: %s\n", perf_strategy.c_str());
exit(1);
}
param_max_outliers = std::min(100., std::max(0., args.get<double>("perf_max_outliers")));
param_min_samples = std::max(1u, args.get<unsigned int>("perf_min_samples"));
param_max_deviation = std::max(0., args.get<double>("perf_max_deviation"));
@@ -761,6 +788,18 @@ std::string TestBase::getSelectedImpl()
return param_impl;
}
enum PERF_STRATEGY TestBase::getPerformanceStrategy()
{
return param_strategy;
}
enum PERF_STRATEGY TestBase::setPerformanceStrategy(enum PERF_STRATEGY strategy)
{
enum PERF_STRATEGY ret = param_strategy;
param_strategy = strategy;
return ret;
}
int64 TestBase::_calibrate()
{
@@ -791,6 +830,11 @@ int64 TestBase::_calibrate()
_helper h;
h.PerfTestBody();
double compensation = h.getMetrics().min;
if (param_strategy == PERF_STRATEGY_SIMPLE)
{
CV_Assert(compensation < 0.01 * cv::getTickFrequency());
compensation = 0.0f; // simple strategy doesn't require any compensation
}
LOGD("Time compensation is %.0f", compensation);
return (int64)compensation;
}
@@ -854,8 +898,64 @@ cv::Size TestBase::getSize(cv::InputArray a)
bool TestBase::next()
{
bool has_next = ++currentIter < nIters && totalTime < timeLimit;
static int64 lastActivityPrintTime = 0;
if (currentIter != (unsigned int)-1)
{
if (currentIter + 1 != times.size())
ADD_FAILURE() << " next() is called before stopTimer()";
}
else
{
lastActivityPrintTime = 0;
metrics.clear();
}
cv::theRNG().state = param_seed; //this rng should generate same numbers for each run
++currentIter;
bool has_next = false;
do {
assert(currentIter == times.size());
if (currentIter == 0)
{
has_next = true;
break;
}
if (param_strategy == PERF_STRATEGY_BASE)
{
has_next = currentIter < nIters && totalTime < timeLimit;
}
else
{
assert(param_strategy == PERF_STRATEGY_SIMPLE);
if (totalTime - lastActivityPrintTime >= cv::getTickFrequency() * 10)
{
std::cout << '.' << std::endl;
lastActivityPrintTime = totalTime;
}
if (currentIter >= nIters)
{
has_next = false;
break;
}
if (currentIter < param_min_samples)
{
has_next = true;
break;
}
calcMetrics();
double criteria = 0.03; // 3%
if (fabs(metrics.mean) > 1e-6)
has_next = metrics.stddev > criteria * fabs(metrics.mean);
else
has_next = true;
}
} while (false);
#ifdef ANDROID
if (log_power_checkpoints)
@@ -868,6 +968,9 @@ bool TestBase::next()
if (!has_next) RecordProperty("test_complete", cv::format("%llu",t1).c_str());
}
#endif
if (has_next)
startTimer(); // really we should measure activity from this moment, so reset start time
return has_next;
}
@@ -914,7 +1017,7 @@ void TestBase::stopTimer()
{
int64 time = cv::getTickCount();
if (lastTime == 0)
ADD_FAILURE() << " stopTimer() is called before startTimer()";
ADD_FAILURE() << " stopTimer() is called before startTimer()/next()";
lastTime = time - lastTime;
totalTime += lastTime;
lastTime -= _timeadjustment;
@@ -925,6 +1028,7 @@ void TestBase::stopTimer()
performance_metrics& TestBase::calcMetrics()
{
CV_Assert(metrics.samples <= (unsigned int)currentIter);
if ((metrics.samples == (unsigned int)currentIter) || times.size() == 0)
return metrics;
@@ -946,47 +1050,61 @@ performance_metrics& TestBase::calcMetrics()
std::sort(times.begin(), times.end());
//estimate mean and stddev for log(time)
double gmean = 0;
double gstddev = 0;
int n = 0;
for(TimeVector::const_iterator i = times.begin(); i != times.end(); ++i)
{
double x = static_cast<double>(*i)/runsPerIteration;
if (x < DBL_EPSILON) continue;
double lx = log(x);
++n;
double delta = lx - gmean;
gmean += delta / n;
gstddev += delta * (lx - gmean);
}
gstddev = n > 1 ? sqrt(gstddev / (n - 1)) : 0;
TimeVector::const_iterator start = times.begin();
TimeVector::const_iterator end = times.end();
//filter outliers assuming log-normal distribution
//http://stackoverflow.com/questions/1867426/modeling-distribution-of-performance-measurements
int offset = 0;
if (gstddev > DBL_EPSILON)
if (param_strategy == PERF_STRATEGY_BASE)
{
double minout = exp(gmean - 3 * gstddev) * runsPerIteration;
double maxout = exp(gmean + 3 * gstddev) * runsPerIteration;
while(*start < minout) ++start, ++metrics.outliers, ++offset;
do --end, ++metrics.outliers; while(*end > maxout);
++end, --metrics.outliers;
//estimate mean and stddev for log(time)
double gmean = 0;
double gstddev = 0;
int n = 0;
for(TimeVector::const_iterator i = times.begin(); i != times.end(); ++i)
{
double x = static_cast<double>(*i)/runsPerIteration;
if (x < DBL_EPSILON) continue;
double lx = log(x);
++n;
double delta = lx - gmean;
gmean += delta / n;
gstddev += delta * (lx - gmean);
}
gstddev = n > 1 ? sqrt(gstddev / (n - 1)) : 0;
//filter outliers assuming log-normal distribution
//http://stackoverflow.com/questions/1867426/modeling-distribution-of-performance-measurements
if (gstddev > DBL_EPSILON)
{
double minout = exp(gmean - 3 * gstddev) * runsPerIteration;
double maxout = exp(gmean + 3 * gstddev) * runsPerIteration;
while(*start < minout) ++start, ++metrics.outliers;
do --end, ++metrics.outliers; while(*end > maxout);
++end, --metrics.outliers;
}
}
else if (param_strategy == PERF_STRATEGY_SIMPLE)
{
metrics.outliers = static_cast<int>(times.size() * param_max_outliers / 100);
for (unsigned int i = 0; i < metrics.outliers; i++)
--end;
}
else
{
assert(false);
}
int offset = static_cast<int>(start - times.begin());
metrics.min = static_cast<double>(*start)/runsPerIteration;
//calc final metrics
n = 0;
gmean = 0;
gstddev = 0;
unsigned int n = 0;
double gmean = 0;
double gstddev = 0;
double mean = 0;
double stddev = 0;
int m = 0;
unsigned int m = 0;
for(; start != end; ++start)
{
double x = static_cast<double>(*start)/runsPerIteration;
@@ -1008,11 +1126,10 @@ performance_metrics& TestBase::calcMetrics()
metrics.gmean = exp(gmean);
metrics.gstddev = m > 1 ? sqrt(gstddev / (m - 1)) : 0;
metrics.stddev = n > 1 ? sqrt(stddev / (n - 1)) : 0;
metrics.median = n % 2
metrics.median = (n % 2
? (double)times[offset + n / 2]
: 0.5 * (times[offset + n / 2] + times[offset + n / 2 - 1]);
metrics.median /= runsPerIteration;
: 0.5 * (times[offset + n / 2] + times[offset + n / 2 - 1])
) / runsPerIteration;
return metrics;
}
@@ -1026,17 +1143,31 @@ void TestBase::validateMetrics()
ASSERT_GE(m.samples, 1u)
<< " No time measurements was performed.\nstartTimer() and stopTimer() commands are required for performance tests.";
EXPECT_GE(m.samples, param_min_samples)
<< " Only a few samples are collected.\nPlease increase number of iterations or/and time limit to get reliable performance measurements.";
if (m.gstddev > DBL_EPSILON)
if (param_strategy == PERF_STRATEGY_BASE)
{
EXPECT_GT(/*m.gmean * */1., /*m.gmean * */ 2 * sinh(m.gstddev * param_max_deviation))
<< " Test results are not reliable ((mean-sigma,mean+sigma) deviation interval is greater than measured time interval).";
}
EXPECT_GE(m.samples, param_min_samples)
<< " Only a few samples are collected.\nPlease increase number of iterations or/and time limit to get reliable performance measurements.";
EXPECT_LE(m.outliers, std::max((unsigned int)cvCeil(m.samples * param_max_outliers / 100.), 1u))
<< " Test results are not reliable (too many outliers).";
if (m.gstddev > DBL_EPSILON)
{
EXPECT_GT(/*m.gmean * */1., /*m.gmean * */ 2 * sinh(m.gstddev * param_max_deviation))
<< " Test results are not reliable ((mean-sigma,mean+sigma) deviation interval is greater than measured time interval).";
}
EXPECT_LE(m.outliers, std::max((unsigned int)cvCeil(m.samples * param_max_outliers / 100.), 1u))
<< " Test results are not reliable (too many outliers).";
}
else if (param_strategy == PERF_STRATEGY_SIMPLE)
{
double mean = metrics.mean * 1000.0f / metrics.frequency;
double stddev = metrics.stddev * 1000.0f / metrics.frequency;
double percents = stddev / mean * 100.f;
printf(" samples = %d, mean = %.2f, stddev = %.2f (%.1f%%)\n", (int)metrics.samples, mean, stddev, percents);
}
else
{
assert(false);
}
}
void TestBase::reportMetrics(bool toJUnitXML)
@@ -1199,12 +1330,12 @@ void TestBase::RunPerfTestBody()
{
this->PerfTestBody();
}
catch(PerfEarlyExitException)
catch(PerfEarlyExitException&)
{
metrics.terminationReason = performance_metrics::TERM_INTERRUPT;
return;//no additional failure logging
}
catch(cv::Exception e)
catch(cv::Exception& e)
{
metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
#ifdef HAVE_CUDA
@@ -1213,7 +1344,7 @@ void TestBase::RunPerfTestBody()
#endif
FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n Actual: it throws cv::Exception:\n " << e.what();
}
catch(std::exception e)
catch(std::exception& e)
{
metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n Actual: it throws std::exception:\n " << e.what();
@@ -1234,6 +1365,7 @@ TestBase::_declareHelper& TestBase::_declareHelper::iterations(unsigned int n)
test->times.reserve(n);
test->nIters = std::min(n, TestBase::iterationsLimitDefault);
test->currentIter = (unsigned int)-1;
test->metrics.clear();
return *this;
}
@@ -1242,6 +1374,7 @@ TestBase::_declareHelper& TestBase::_declareHelper::time(double timeLimitSecs)
test->times.clear();
test->currentIter = (unsigned int)-1;
test->timeLimit = (int64)(timeLimitSecs * cv::getTickFrequency());
test->metrics.clear();
return *this;
}