Merge remote-tracking branch 'origin/2.4' into merge-2.4
Conflicts: doc/tutorials/definitions/noContent.rst doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.rst doc/tutorials/introduction/android_binary_package/dev_with_OCV_on_Android.rst doc/tutorials/introduction/how_to_write_a_tutorial/how_to_write_a_tutorial.rst modules/core/include/opencv2/core/core.hpp modules/core/include/opencv2/core/internal.hpp modules/core/include/opencv2/core/version.hpp modules/gpu/CMakeLists.txt modules/highgui/perf/perf_output.cpp modules/highgui/test/test_video_io.cpp modules/ocl/include/opencv2/ocl/ocl.hpp modules/ocl/perf/main.cpp modules/ocl/src/hog.cpp modules/ocl/src/initialization.cpp modules/ocl/src/moments.cpp modules/ocl/src/opencl/moments.cl modules/ocl/test/main.cpp modules/ocl/test/test_moments.cpp modules/python/test/test.py modules/ts/include/opencv2/ts/ts_perf.hpp modules/ts/src/precomp.hpp modules/ts/src/ts_perf.cpp
This commit is contained in:
@@ -17566,6 +17566,9 @@ GTEST_DECLARE_string_(color);
|
||||
// the tests to run. If the filter is not given all tests are executed.
|
||||
GTEST_DECLARE_string_(filter);
|
||||
|
||||
// OpenCV extension: same as filter, but for the parameters string.
|
||||
GTEST_DECLARE_string_(param_filter);
|
||||
|
||||
// This flag causes the Google Test to list tests. None of the tests listed
|
||||
// are actually run if the flag is provided.
|
||||
GTEST_DECLARE_bool_(list_tests);
|
||||
|
@@ -218,8 +218,7 @@ public:
|
||||
static bool targetDevice();
|
||||
};
|
||||
|
||||
# define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice()
|
||||
|
||||
#define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice()
|
||||
|
||||
/*****************************************************************************************\
|
||||
* Container for performance metrics *
|
||||
@@ -261,7 +260,11 @@ public:
|
||||
TestBase();
|
||||
|
||||
static void Init(int argc, const char* const argv[]);
|
||||
static void Init(const std::vector<std::string> & availableImpls,
|
||||
int argc, const char* const argv[]);
|
||||
static void RecordRunParameters();
|
||||
static std::string getDataPath(const std::string& relativePath);
|
||||
static std::string getSelectedImpl();
|
||||
|
||||
protected:
|
||||
virtual void PerfTestBody() = 0;
|
||||
@@ -475,15 +478,29 @@ CV_EXPORTS void PrintTo(const Size& sz, ::std::ostream* os);
|
||||
void fixture##_##name::PerfTestBody()
|
||||
|
||||
|
||||
#define CV_PERF_TEST_MAIN(testsuitname, ...) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
#define CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, ...) \
|
||||
while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/\
|
||||
::perf::Regression::Init(#testsuitname);\
|
||||
::perf::TestBase::Init(argc, argv);\
|
||||
::perf::Regression::Init(#modulename);\
|
||||
::perf::TestBase::Init(std::vector<std::string>(impls, impls + sizeof impls / sizeof *impls),\
|
||||
argc, argv);\
|
||||
::testing::InitGoogleTest(&argc, argv);\
|
||||
cvtest::printVersionInfo();\
|
||||
return RUN_ALL_TESTS();\
|
||||
::testing::Test::RecordProperty("cv_module_name", #modulename);\
|
||||
::perf::TestBase::RecordRunParameters();\
|
||||
return RUN_ALL_TESTS();
|
||||
|
||||
// impls must be an array, not a pointer; "plain" should always be one of the implementations
|
||||
#define CV_PERF_TEST_MAIN_WITH_IMPLS(modulename, impls, ...) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, __VA_ARGS__)\
|
||||
}
|
||||
|
||||
#define CV_PERF_TEST_MAIN(modulename, ...) \
|
||||
int main(int argc, char **argv)\
|
||||
{\
|
||||
const char * plain_only[] = { "plain" };\
|
||||
CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only, __VA_ARGS__)\
|
||||
}
|
||||
|
||||
#define TEST_CYCLE_N(n) for(declare.iterations(n); startTimer(), next(); stopTimer())
|
||||
|
@@ -1,6 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, re, os.path
|
||||
import collections
|
||||
import re
|
||||
import os.path
|
||||
import sys
|
||||
from xml.dom.minidom import parse
|
||||
|
||||
class TestInfo(object):
|
||||
@@ -159,12 +162,31 @@ class TestInfo(object):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
# This is a Sequence for compatibility with old scripts,
|
||||
# which treat parseLogFile's return value as a list.
|
||||
class TestRunInfo(collections.Sequence):
|
||||
def __init__(self, properties, tests):
|
||||
self.properties = properties
|
||||
self.tests = tests
|
||||
|
||||
def __len__(self):
|
||||
return len(self.tests)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.tests[key]
|
||||
|
||||
def parseLogFile(filename):
|
||||
tests = []
|
||||
log = parse(filename)
|
||||
for case in log.getElementsByTagName("testcase"):
|
||||
tests.append(TestInfo(case))
|
||||
return tests
|
||||
|
||||
properties = {
|
||||
attr_name[3:]: attr_value
|
||||
for (attr_name, attr_value) in log.documentElement.attributes.items()
|
||||
if attr_name.startswith('cv_')
|
||||
}
|
||||
|
||||
tests = map(TestInfo, log.getElementsByTagName("testcase"))
|
||||
|
||||
return TestRunInfo(properties, tests)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -173,8 +195,18 @@ if __name__ == "__main__":
|
||||
exit(0)
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
print "Tests found in", arg
|
||||
tests = parseLogFile(arg)
|
||||
for t in sorted(tests):
|
||||
print "Processing {}...".format(arg)
|
||||
|
||||
run = parseLogFile(arg)
|
||||
|
||||
print "Properties:"
|
||||
|
||||
for (prop_name, prop_value) in run.properties.items():
|
||||
print "\t{} = {}".format(prop_name, prop_value)
|
||||
|
||||
print "Tests:"
|
||||
|
||||
for t in sorted(run.tests):
|
||||
t.dump()
|
||||
|
||||
print
|
||||
|
@@ -1,8 +1,73 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
This script can generate XLS reports from OpenCV tests' XML output files.
|
||||
|
||||
To use it, first, create a directory for each machine you ran tests on.
|
||||
Each such directory will become a sheet in the report. Put each XML file
|
||||
into the corresponding directory.
|
||||
|
||||
Then, create your configuration file(s). You can have a global configuration
|
||||
file (specified with the -c option), and per-sheet configuration files, which
|
||||
must be called sheet.conf and placed in the directory corresponding to the sheet.
|
||||
The settings in the per-sheet configuration file will override those in the
|
||||
global configuration file, if both are present.
|
||||
|
||||
A configuration file must consist of a Python dictionary. The following keys
|
||||
will be recognized:
|
||||
|
||||
* 'comparisons': [{'from': string, 'to': string}]
|
||||
List of configurations to compare performance between. For each item,
|
||||
the sheet will have a column showing speedup from configuration named
|
||||
'from' to configuration named "to".
|
||||
|
||||
* 'configuration_matchers': [{'properties': {string: object}, 'name': string}]
|
||||
Instructions for matching test run property sets to configuration names.
|
||||
|
||||
For each found XML file:
|
||||
|
||||
1) All attributes of the root element starting with the prefix 'cv_' are
|
||||
placed in a dictionary, with the cv_ prefix stripped and the cv_module_name
|
||||
element deleted.
|
||||
|
||||
2) The first matcher for which the XML's file property set contains the same
|
||||
keys with equal values as its 'properties' dictionary is searched for.
|
||||
A missing property can be matched by using None as the value.
|
||||
|
||||
Corollary 1: you should place more specific matchers before less specific
|
||||
ones.
|
||||
|
||||
Corollary 2: an empty 'properties' dictionary matches every property set.
|
||||
|
||||
3) If a matching matcher is found, its 'name' string is presumed to be the name
|
||||
of the configuration the XML file corresponds to. Otherwise, a warning is
|
||||
printed. A warning is also printed if two different property sets match to the
|
||||
same configuration name.
|
||||
|
||||
* 'configurations': [string]
|
||||
List of names for compile-time and runtime configurations of OpenCV.
|
||||
Each item will correspond to a column of the sheet.
|
||||
|
||||
* 'module_colors': {string: string}
|
||||
Mapping from module name to color name. In the sheet, cells containing module
|
||||
names from this mapping will be colored with the corresponding color. You can
|
||||
find the list of available colors here:
|
||||
<http://www.simplistix.co.uk/presentations/python-excel.pdf>.
|
||||
|
||||
* 'sheet_name': string
|
||||
Name for the sheet. If this parameter is missing, the name of sheet's directory
|
||||
will be used.
|
||||
|
||||
Note that all keys are optional, although to get useful results, you'll want to
|
||||
specify at least 'configurations' and 'configuration_matchers'.
|
||||
|
||||
Finally, run the script. Use the --help option for usage information.
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import ast
|
||||
import fnmatch
|
||||
import logging
|
||||
import numbers
|
||||
import os, os.path
|
||||
@@ -17,21 +82,6 @@ import xlwt
|
||||
|
||||
from testlog_parser import parseLogFile
|
||||
|
||||
# To build XLS report you neet to put your xmls (OpenCV tests output) in the
|
||||
# following way:
|
||||
#
|
||||
# "root" --- folder, representing the whole XLS document. It contains several
|
||||
# subfolders --- sheet-paths of the XLS document. Each sheet-path contains it's
|
||||
# subfolders --- config-paths. Config-paths are columns of the sheet and
|
||||
# they contains xmls files --- output of OpenCV modules testing.
|
||||
# Config-path means OpenCV build configuration, including different
|
||||
# options such as NEON, TBB, GPU enabling/disabling.
|
||||
#
|
||||
# root
|
||||
# root\sheet_path
|
||||
# root\sheet_path\configuration1 (column 1)
|
||||
# root\sheet_path\configuration2 (column 2)
|
||||
|
||||
re_image_size = re.compile(r'^ \d+ x \d+$', re.VERBOSE)
|
||||
re_data_type = re.compile(r'^ (?: 8 | 16 | 32 | 64 ) [USF] C [1234] $', re.VERBOSE)
|
||||
|
||||
@@ -45,15 +95,64 @@ no_speedup_style = no_time_style
|
||||
error_speedup_style = xlwt.easyxf('pattern: pattern solid, fore_color orange')
|
||||
header_style = xlwt.easyxf('font: bold true; alignment: horizontal centre, vertical top, wrap True')
|
||||
|
||||
def collect_xml(collection, configuration, xml_fullname):
|
||||
xml_fname = os.path.split(xml_fullname)[1]
|
||||
module = xml_fname[:xml_fname.index('_')]
|
||||
class Collector(object):
|
||||
def __init__(self, config_match_func):
|
||||
self.__config_cache = {}
|
||||
self.config_match_func = config_match_func
|
||||
self.tests = {}
|
||||
|
||||
module_tests = collection.setdefault(module, OrderedDict())
|
||||
# Format a sorted sequence of pairs as if it was a dictionary.
|
||||
# We can't just use a dictionary instead, since we want to preserve the sorted order of the keys.
|
||||
@staticmethod
|
||||
def __format_config_cache_key(pairs):
|
||||
return '{' + ', '.join(repr(k) + ': ' + repr(v) for (k, v) in pairs) + '}'
|
||||
|
||||
for test in sorted(parseLogFile(xml_fullname)):
|
||||
test_results = module_tests.setdefault((test.shortName(), test.param()), {})
|
||||
test_results[configuration] = test.get("gmean") if test.status == 'run' else test.status
|
||||
def collect_from(self, xml_path):
|
||||
run = parseLogFile(xml_path)
|
||||
|
||||
module = run.properties['module_name']
|
||||
|
||||
properties = run.properties.copy()
|
||||
del properties['module_name']
|
||||
|
||||
props_key = tuple(sorted(properties.iteritems())) # dicts can't be keys
|
||||
|
||||
if props_key in self.__config_cache:
|
||||
configuration = self.__config_cache[props_key]
|
||||
else:
|
||||
configuration = self.config_match_func(properties)
|
||||
|
||||
if configuration is None:
|
||||
logging.warning('failed to match properties to a configuration: %s',
|
||||
Collector.__format_config_cache_key(props_key))
|
||||
else:
|
||||
same_config_props = [it[0] for it in self.__config_cache.iteritems() if it[1] == configuration]
|
||||
if len(same_config_props) > 0:
|
||||
logging.warning('property set %s matches the same configuration %r as property set %s',
|
||||
Collector.__format_config_cache_key(props_key),
|
||||
configuration,
|
||||
Collector.__format_config_cache_key(same_config_props[0]))
|
||||
|
||||
self.__config_cache[props_key] = configuration
|
||||
|
||||
if configuration is None: return
|
||||
|
||||
module_tests = self.tests.setdefault(module, OrderedDict())
|
||||
|
||||
for test in run.tests:
|
||||
test_results = module_tests.setdefault((test.shortName(), test.param()), {})
|
||||
test_results[configuration] = test.get("gmean") if test.status == 'run' else test.status
|
||||
|
||||
def make_match_func(matchers):
|
||||
def match_func(properties):
|
||||
for matcher in matchers:
|
||||
if all(properties.get(name) == value
|
||||
for (name, value) in matcher['properties'].iteritems()):
|
||||
return matcher['name']
|
||||
|
||||
return None
|
||||
|
||||
return match_func
|
||||
|
||||
def main():
|
||||
arg_parser = ArgumentParser(description='Build an XLS performance report.')
|
||||
@@ -83,23 +182,15 @@ def main():
|
||||
|
||||
sheet_conf = dict(global_conf.items() + sheet_conf.items())
|
||||
|
||||
if 'configurations' in sheet_conf:
|
||||
config_names = sheet_conf['configurations']
|
||||
else:
|
||||
try:
|
||||
config_names = [p for p in os.listdir(sheet_path)
|
||||
if os.path.isdir(os.path.join(sheet_path, p))]
|
||||
except Exception as e:
|
||||
logging.warning('error while determining configuration names for %s: %s', sheet_path, e)
|
||||
continue
|
||||
config_names = sheet_conf.get('configurations', [])
|
||||
config_matchers = sheet_conf.get('configuration_matchers', [])
|
||||
|
||||
collection = {}
|
||||
collector = Collector(make_match_func(config_matchers))
|
||||
|
||||
for configuration, configuration_path in \
|
||||
[(c, os.path.join(sheet_path, c)) for c in config_names]:
|
||||
logging.info('processing %s', configuration_path)
|
||||
for xml_fullname in glob(os.path.join(configuration_path, '*.xml')):
|
||||
collect_xml(collection, configuration, xml_fullname)
|
||||
for root, _, filenames in os.walk(sheet_path):
|
||||
logging.info('looking in %s', root)
|
||||
for filename in fnmatch.filter(filenames, '*.xml'):
|
||||
collector.collect_from(os.path.join(root, filename))
|
||||
|
||||
sheet = wb.add_sheet(sheet_conf.get('sheet_name', os.path.basename(os.path.abspath(sheet_path))))
|
||||
|
||||
@@ -126,7 +217,7 @@ def main():
|
||||
module_styles = {module: xlwt.easyxf('pattern: pattern solid, fore_color {}'.format(color))
|
||||
for module, color in module_colors.iteritems()}
|
||||
|
||||
for module, tests in sorted(collection.iteritems()):
|
||||
for module, tests in sorted(collector.tests.iteritems()):
|
||||
for ((test, param), configs) in tests.iteritems():
|
||||
sheet.write(row, 0, module, module_styles.get(module, xlwt.Style.default_style))
|
||||
sheet.write(row, 1, test)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#include "opencv2/ts.hpp"
|
||||
#include "opencv2/core/utility.hpp"
|
||||
#include "opencv2/core/private.hpp"
|
||||
#include "opencv2/ts.hpp"
|
||||
|
||||
#ifdef GTEST_LINKED_AS_SHARED_LIBRARY
|
||||
#error ts module should not have GTEST_LINKED_AS_SHARED_LIBRARY defined
|
||||
|
@@ -2968,13 +2968,12 @@ void printVersionInfo(bool useStdOut)
|
||||
if(useStdOut) std::cout << "Inner VCS version: " << ver << std::endl;
|
||||
}
|
||||
|
||||
#ifdef CV_PARALLEL_FRAMEWORK
|
||||
::testing::Test::RecordProperty("cv_parallel_framework", CV_PARALLEL_FRAMEWORK);
|
||||
if (useStdOut)
|
||||
{
|
||||
std::cout << "Parallel framework: " << CV_PARALLEL_FRAMEWORK << std::endl;
|
||||
const char* parallel_framework = currentParallelFramework();
|
||||
|
||||
if (parallel_framework) {
|
||||
::testing::Test::RecordProperty("cv_parallel_framework", parallel_framework);
|
||||
if (useStdOut) std::cout << "Parallel framework: " << parallel_framework << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string cpu_features;
|
||||
|
||||
|
@@ -497,6 +497,7 @@ const char kBreakOnFailureFlag[] = "break_on_failure";
|
||||
const char kCatchExceptionsFlag[] = "catch_exceptions";
|
||||
const char kColorFlag[] = "color";
|
||||
const char kFilterFlag[] = "filter";
|
||||
const char kParamFilterFlag[] = "param_filter";
|
||||
const char kListTestsFlag[] = "list_tests";
|
||||
const char kOutputFlag[] = "output";
|
||||
const char kPrintTimeFlag[] = "print_time";
|
||||
@@ -575,6 +576,7 @@ class GTestFlagSaver {
|
||||
death_test_style_ = GTEST_FLAG(death_test_style);
|
||||
death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
|
||||
filter_ = GTEST_FLAG(filter);
|
||||
param_filter_ = GTEST_FLAG(param_filter);
|
||||
internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
|
||||
list_tests_ = GTEST_FLAG(list_tests);
|
||||
output_ = GTEST_FLAG(output);
|
||||
@@ -596,6 +598,7 @@ class GTestFlagSaver {
|
||||
GTEST_FLAG(death_test_style) = death_test_style_;
|
||||
GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
|
||||
GTEST_FLAG(filter) = filter_;
|
||||
GTEST_FLAG(param_filter) = param_filter_;
|
||||
GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
|
||||
GTEST_FLAG(list_tests) = list_tests_;
|
||||
GTEST_FLAG(output) = output_;
|
||||
@@ -617,6 +620,7 @@ class GTestFlagSaver {
|
||||
std::string death_test_style_;
|
||||
bool death_test_use_fork_;
|
||||
std::string filter_;
|
||||
std::string param_filter_;
|
||||
std::string internal_run_death_test_;
|
||||
bool list_tests_;
|
||||
std::string output_;
|
||||
@@ -1699,6 +1703,12 @@ GTEST_DEFINE_string_(
|
||||
"exclude). A test is run if it matches one of the positive "
|
||||
"patterns and does not match any of the negative patterns.");
|
||||
|
||||
GTEST_DEFINE_string_(
|
||||
param_filter,
|
||||
internal::StringFromGTestEnv("param_filter", kUniversalFilter),
|
||||
"Same syntax and semantics as for param, but these patterns "
|
||||
"have to match the test's parameters.");
|
||||
|
||||
GTEST_DEFINE_bool_(list_tests, false,
|
||||
"List all tests without running them.");
|
||||
|
||||
@@ -4188,6 +4198,14 @@ void PrettyUnitTestResultPrinter::OnTestIterationStart(
|
||||
"Note: %s filter = %s\n", GTEST_NAME_, filter);
|
||||
}
|
||||
|
||||
const char* const param_filter = GTEST_FLAG(param_filter).c_str();
|
||||
|
||||
// Ditto.
|
||||
if (!String::CStringEquals(param_filter, kUniversalFilter)) {
|
||||
ColoredPrintf(COLOR_YELLOW,
|
||||
"Note: %s parameter filter = %s\n", GTEST_NAME_, param_filter);
|
||||
}
|
||||
|
||||
if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
|
||||
const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);
|
||||
ColoredPrintf(COLOR_YELLOW,
|
||||
@@ -5873,9 +5891,15 @@ int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
|
||||
kDisableTestFilter);
|
||||
test_info->is_disabled_ = is_disabled;
|
||||
|
||||
const std::string value_param(test_info->value_param() == NULL ?
|
||||
"" : test_info->value_param());
|
||||
|
||||
const bool matches_filter =
|
||||
internal::UnitTestOptions::FilterMatchesTest(test_case_name,
|
||||
test_name);
|
||||
test_name) &&
|
||||
internal::UnitTestOptions::MatchesFilter(value_param,
|
||||
GTEST_FLAG(param_filter).c_str());
|
||||
|
||||
test_info->matches_filter_ = matches_filter;
|
||||
|
||||
const bool is_runnable =
|
||||
@@ -6223,6 +6247,12 @@ static const char kColorEncodedHelpMessage[] =
|
||||
" Run only the tests whose name matches one of the positive patterns but\n"
|
||||
" none of the negative patterns. '?' matches any single character; '*'\n"
|
||||
" matches any substring; ':' separates two patterns.\n"
|
||||
" @G--" GTEST_FLAG_PREFIX_ "param_filter=@YPOSITIVE_PATTERNS"
|
||||
"[@G-@YNEGATIVE_PATTERNS]@D\n"
|
||||
" Like @G--" GTEST_FLAG_PREFIX_
|
||||
"filter@D, but applies to the test's parameter. If a\n"
|
||||
" test is not parameterized, its parameter is considered to be the\n"
|
||||
" empty string.\n"
|
||||
" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
|
||||
" Run all disabled tests too.\n"
|
||||
"\n"
|
||||
@@ -6300,6 +6330,7 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
|
||||
ParseBoolFlag(arg, kDeathTestUseFork,
|
||||
>EST_FLAG(death_test_use_fork)) ||
|
||||
ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) ||
|
||||
ParseStringFlag(arg, kParamFilterFlag, >EST_FLAG(param_filter)) ||
|
||||
ParseStringFlag(arg, kInternalRunDeathTestFlag,
|
||||
>EST_FLAG(internal_run_death_test)) ||
|
||||
ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) ||
|
||||
|
@@ -14,30 +14,10 @@ int64 TestBase::timeLimitDefault = 0;
|
||||
unsigned int TestBase::iterationsLimitDefault = (unsigned int)(-1);
|
||||
int64 TestBase::_timeadjustment = 0;
|
||||
|
||||
const std::string command_line_keys =
|
||||
"{ perf_max_outliers |8 |percent of allowed outliers}"
|
||||
"{ perf_min_samples |10 |minimal required numer of samples}"
|
||||
"{ perf_force_samples |100 |force set maximum number of samples for all tests}"
|
||||
"{ perf_seed |809564 |seed for random numbers generator}"
|
||||
"{ perf_threads |-1 |the number of worker threads, if parallel execution is enabled}"
|
||||
"{ perf_write_sanity | |create new records for sanity checks}"
|
||||
"{ perf_verify_sanity | |fail tests having no regression data for sanity checks}"
|
||||
#ifdef ANDROID
|
||||
"{ perf_time_limit |6.0 |default time limit for a single test (in seconds)}"
|
||||
"{ perf_affinity_mask |0 |set affinity mask for the main thread}"
|
||||
"{ perf_log_power_checkpoints | |additional xml logging for power measurement}"
|
||||
#else
|
||||
"{ perf_time_limit |3.0 |default time limit for a single test (in seconds)}"
|
||||
#endif
|
||||
"{ perf_max_deviation |1.0 |}"
|
||||
"{ help h | |print help info}"
|
||||
#ifdef HAVE_CUDA
|
||||
"{ perf_run_cpu |false |run GPU performance tests for analogical CPU functions}"
|
||||
"{ perf_cuda_device |0 |run GPU test suite onto specific CUDA capable device}"
|
||||
"{ perf_cuda_info_only |false |print an information about system and an available CUDA devices and then exit.}"
|
||||
#endif
|
||||
;
|
||||
// Item [0] will be considered the default implementation.
|
||||
static std::vector<std::string> available_impls;
|
||||
|
||||
static std::string param_impl;
|
||||
static double param_max_outliers;
|
||||
static double param_max_deviation;
|
||||
static unsigned int param_min_samples;
|
||||
@@ -48,7 +28,6 @@ static int param_threads;
|
||||
static bool param_write_sanity;
|
||||
static bool param_verify_sanity;
|
||||
#ifdef HAVE_CUDA
|
||||
static bool param_run_cpu;
|
||||
static int param_cuda_device;
|
||||
#endif
|
||||
|
||||
@@ -573,11 +552,12 @@ Regression& Regression::operator() (const std::string& name, cv::InputArray arra
|
||||
|
||||
std::string nodename = getCurrentTestNodeName();
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
static const std::string prefix = (param_run_cpu)? "CPU_" : "GPU_";
|
||||
// This is a hack for compatibility and it should eventually get removed.
|
||||
// gpu's tests don't even have CPU sanity data anymore.
|
||||
if(suiteName == "gpu")
|
||||
nodename = prefix + nodename;
|
||||
#endif
|
||||
{
|
||||
nodename = (PERF_RUN_GPU() ? "GPU_" : "CPU_") + nodename;
|
||||
}
|
||||
|
||||
cv::FileNode n = rootIn[nodename];
|
||||
if(n.isNone())
|
||||
@@ -642,6 +622,43 @@ performance_metrics::performance_metrics()
|
||||
|
||||
void TestBase::Init(int argc, const char* const argv[])
|
||||
{
|
||||
std::vector<std::string> plain_only;
|
||||
plain_only.push_back("plain");
|
||||
TestBase::Init(plain_only, argc, argv);
|
||||
}
|
||||
|
||||
void TestBase::Init(const std::vector<std::string> & availableImpls,
|
||||
int argc, const char* const argv[])
|
||||
{
|
||||
available_impls = availableImpls;
|
||||
|
||||
const std::string command_line_keys =
|
||||
"{ perf_max_outliers |8 |percent of allowed outliers}"
|
||||
"{ perf_min_samples |10 |minimal required numer of samples}"
|
||||
"{ perf_force_samples |100 |force set maximum number of samples for all tests}"
|
||||
"{ perf_seed |809564 |seed for random numbers generator}"
|
||||
"{ perf_threads |-1 |the number of worker threads, if parallel execution is enabled}"
|
||||
"{ perf_write_sanity |false |create new records for sanity checks}"
|
||||
"{ perf_verify_sanity |false |fail tests having no regression data for sanity checks}"
|
||||
"{ perf_impl |" + available_impls[0] +
|
||||
"|the implementation variant of functions under test}"
|
||||
"{ perf_list_impls |false |list available implementation variants and exit}"
|
||||
"{ perf_run_cpu |false |deprecated, equivalent to --perf_impl=plain}"
|
||||
#ifdef ANDROID
|
||||
"{ perf_time_limit |6.0 |default time limit for a single test (in seconds)}"
|
||||
"{ perf_affinity_mask |0 |set affinity mask for the main thread}"
|
||||
"{ perf_log_power_checkpoints | |additional xml logging for power measurement}"
|
||||
#else
|
||||
"{ perf_time_limit |3.0 |default time limit for a single test (in seconds)}"
|
||||
#endif
|
||||
"{ perf_max_deviation |1.0 |}"
|
||||
"{ help h |false |print help info}"
|
||||
#ifdef HAVE_CUDA
|
||||
"{ perf_cuda_device |0 |run GPU test suite onto specific CUDA capable device}"
|
||||
"{ perf_cuda_info_only |false |print an information about system and an available CUDA devices and then exit.}"
|
||||
#endif
|
||||
;
|
||||
|
||||
cv::CommandLineParser args(argc, argv, command_line_keys);
|
||||
if (args.has("help"))
|
||||
{
|
||||
@@ -651,6 +668,7 @@ void TestBase::Init(int argc, const char* const argv[])
|
||||
|
||||
::testing::AddGlobalTestEnvironment(new PerfEnvironment);
|
||||
|
||||
param_impl = args.has("perf_run_cpu") ? "plain" : args.get<std::string>("perf_impl");
|
||||
param_max_outliers = std::min(100., std::max(0., args.get<double>("perf_max_outliers")));
|
||||
param_min_samples = std::max(1u, args.get<unsigned int>("perf_min_samples"));
|
||||
param_max_deviation = std::max(0., args.get<double>("perf_max_deviation"));
|
||||
@@ -665,19 +683,41 @@ void TestBase::Init(int argc, const char* const argv[])
|
||||
log_power_checkpoints = args.has("perf_log_power_checkpoints");
|
||||
#endif
|
||||
|
||||
bool param_list_impls = args.has("perf_list_impls");
|
||||
|
||||
if (param_list_impls)
|
||||
{
|
||||
fputs("Available implementation variants:", stdout);
|
||||
for (size_t i = 0; i < available_impls.size(); ++i) {
|
||||
putchar(' ');
|
||||
fputs(available_impls[i].c_str(), stdout);
|
||||
}
|
||||
putchar('\n');
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if (std::find(available_impls.begin(), available_impls.end(), param_impl) == available_impls.end())
|
||||
{
|
||||
printf("No such implementation: %s\n", param_impl.c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
bool printOnly = args.has("perf_cuda_info_only");
|
||||
|
||||
if (printOnly)
|
||||
exit(0);
|
||||
#endif
|
||||
|
||||
if (available_impls.size() > 1)
|
||||
printf("[----------]\n[ INFO ] \tImplementation variant: %s.\n[----------]\n", param_impl.c_str()), fflush(stdout);
|
||||
|
||||
#ifdef HAVE_CUDA
|
||||
|
||||
param_run_cpu = args.has("perf_run_cpu");
|
||||
param_cuda_device = std::max(0, std::min(cv::gpu::getCudaEnabledDeviceCount(), args.get<int>("perf_cuda_device")));
|
||||
|
||||
if (param_run_cpu)
|
||||
printf("[----------]\n[ GPU INFO ] \tRun test suite on CPU.\n[----------]\n"), fflush(stdout);
|
||||
else
|
||||
if (param_impl == "cuda")
|
||||
{
|
||||
cv::gpu::DeviceInfo info(param_cuda_device);
|
||||
if (!info.isCompatible())
|
||||
@@ -703,6 +743,18 @@ void TestBase::Init(int argc, const char* const argv[])
|
||||
_timeadjustment = _calibrate();
|
||||
}
|
||||
|
||||
void TestBase::RecordRunParameters()
|
||||
{
|
||||
::testing::Test::RecordProperty("cv_implementation", param_impl);
|
||||
::testing::Test::RecordProperty("cv_num_threads", param_threads);
|
||||
}
|
||||
|
||||
std::string TestBase::getSelectedImpl()
|
||||
{
|
||||
return param_impl;
|
||||
}
|
||||
|
||||
|
||||
int64 TestBase::_calibrate()
|
||||
{
|
||||
class _helper : public ::perf::TestBase
|
||||
@@ -1322,11 +1374,7 @@ void perf::sort(std::vector<cv::KeyPoint>& pts, cv::InputOutputArray descriptors
|
||||
\*****************************************************************************************/
|
||||
bool perf::GpuPerf::targetDevice()
|
||||
{
|
||||
#ifdef HAVE_CUDA
|
||||
return !param_run_cpu;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
return param_impl == "cuda";
|
||||
}
|
||||
|
||||
/*****************************************************************************************\
|
||||
@@ -1365,4 +1413,3 @@ void PrintTo(const Size& sz, ::std::ostream* os)
|
||||
}
|
||||
|
||||
} // namespace cv
|
||||
|
||||
|
Reference in New Issue
Block a user