summaryrefslogtreecommitdiff
path: root/poky/meta/lib/oeqa/core/runner.py
diff options
context:
space:
mode:
authorBrad Bishop <bradleyb@fuzziesquirrel.com>2018-12-05 01:18:15 +0300
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2018-12-17 16:07:13 +0300
commitf86d0556ed6040140f3d635b0bb2eeb1c507f818 (patch)
treebbf8537d5de28d729402c6458d4f67360e3f531b /poky/meta/lib/oeqa/core/runner.py
parent5a60ea963172683f7e8dab81bc77d92f12fbd18b (diff)
downloadopenbmc-f86d0556ed6040140f3d635b0bb2eeb1c507f818.tar.xz
poky: sumo refresh a4c7d28688..78020fb639
Update poky to sumo HEAD. Michael Halstead (1): scripts/runqemu: Replace subprocess.run() for compatibilty Richard Purdie (35): scripts/runqemu: Tidy up lock handling code scripts/runqemu: Improve lockfile handling for python with close_fd=True oeqa/selftest/signing: Skip tests if gpg isn't found oeqa/selftest/signing: Allow tests not to need gpg on the host oeqa/selftest/signing: Use do_populate_lic target instead of do_package oeqa/selftest/case: Use bb.utils.remove() instead of shutil.remove() oeqa/selftest/buildoptions: Improve ccache test failure output oeqa/utils/commands: Add extra qemu failure logging oeqa/utils/qemurunner: Fix python ResourceWarning for unclosed file oeqa/utils/commands: Avoid log message duplication oeqa/qemurunner: Remove resource python warnings oeqa/selftest/buildoptions: Improve ccache test oeqa/selftest/buildoptions: Ensure diskmon tests run consistently oeqa/selftest/runqemu: Improve testcase failure handling oeqa/utils/qemurunner: Avoid tracebacks on closed files oeqa/loader: Fix deprecation warning oeqa/utils/commands: Avoid unclosed file warnings oeqa/selftest/context: Replace deprecated imp module usage oeqa/utils/qemurunner.py: Fix python regex warnings oeqa/selftest/context: Improve log file handling oeqa/core/runner: Improve test case comparision oeqa/runner: Ensure we don't print misleading results output oeqa/core/threaded: Remove in favour of using concurrenttests oeqa/runner: Simplify code oeqa: Remove xmlrunner oeqa/runtime/ptest: Inject results+logs into stored json results file oeqa/runner: Always show a summary of success/fail/error/skip counts oeqa/runner: Sort the test result output by result class oeqa/selftest: Improvements to the json logging oeqa/utils/metadata: Allow to function without the git module image-buildinfo,oeqa/selftest/containerimage: Ensure image-buildinfo doesn't break tests oeqa/selftest/esdk: Ensure parent directory exists oeqa/selftest/esdk: Fix typo causing test failure testimage: Improvements to the json logging testsdk: Improvements to the json logging Ross Burton (3): oeqa/oelib/path: don't leak temporary directories oeqa: don't litter /tmp with temporary directories oeqa/selftest/esdk: run selftest inside workdir not /tmp Scott Rifenbark (1): documentation: Prepared for 2.5.2 document release Stefan Lendl (1): default-versions.inc: Make PREFERRED_VERSION_openssl* overwritable Yeoh Ee Peng (6): oeqa/core/runner: refactor for OEQA to write json testresult oeqa/core/runner: write testresult to json files oeqa/selftest/context: write testresult to json files testimage.bbclass: write testresult to json files testsdk.bbclass: write testresult to json files oeqa/selftest: Standardize json logging output directory Change-Id: I3c8123930c8c3441126c1e81b3bbbde9f2b126f2 Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Diffstat (limited to 'poky/meta/lib/oeqa/core/runner.py')
-rw-r--r--poky/meta/lib/oeqa/core/runner.py186
1 files changed, 96 insertions, 90 deletions
diff --git a/poky/meta/lib/oeqa/core/runner.py b/poky/meta/lib/oeqa/core/runner.py
index 13cdf5ba5..f8bb23f34 100644
--- a/poky/meta/lib/oeqa/core/runner.py
+++ b/poky/meta/lib/oeqa/core/runner.py
@@ -6,17 +6,10 @@ import time
import unittest
import logging
import re
+import json
-xmlEnabled = False
-try:
- import xmlrunner
- from xmlrunner.result import _XMLTestResult as _TestResult
- from xmlrunner.runner import XMLTestRunner as _TestRunner
- xmlEnabled = True
-except ImportError:
- # use the base runner instead
- from unittest import TextTestResult as _TestResult
- from unittest import TextTestRunner as _TestRunner
+from unittest import TextTestResult as _TestResult
+from unittest import TextTestRunner as _TestRunner
class OEStreamLogger(object):
def __init__(self, logger):
@@ -42,8 +35,12 @@ class OETestResult(_TestResult):
def __init__(self, tc, *args, **kwargs):
super(OETestResult, self).__init__(*args, **kwargs)
+ self.successes = []
+
+ # Inject into tc so that TestDepends decorator can see results
+ tc.results = self
+
self.tc = tc
- self._tc_map_results()
def startTest(self, test):
# Allow us to trigger the testcase buffer mode on a per test basis
@@ -53,12 +50,6 @@ class OETestResult(_TestResult):
self.buffer = test.buffer
super(OETestResult, self).startTest(test)
- def _tc_map_results(self):
- self.tc._results['failures'] = self.failures
- self.tc._results['errors'] = self.errors
- self.tc._results['skipped'] = self.skipped
- self.tc._results['expectedFailures'] = self.expectedFailures
-
def logSummary(self, component, context_msg=''):
elapsed_time = self.tc._run_end_time - self.tc._run_start_time
self.tc.logger.info("SUMMARY:")
@@ -70,67 +61,60 @@ class OETestResult(_TestResult):
msg = "%s - OK - All required tests passed" % component
else:
msg = "%s - FAIL - Required tests failed" % component
- skipped = len(self.tc._results['skipped'])
- if skipped:
- msg += " (skipped=%d)" % skipped
+ msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
self.tc.logger.info(msg)
- def _getDetailsNotPassed(self, case, type, desc):
- found = False
+ def _getTestResultDetails(self, case):
+ result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
+ 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED'}
- for (scase, msg) in self.tc._results[type]:
- # XXX: When XML reporting is enabled scase is
- # xmlrunner.result._TestInfo instance instead of
- # string.
- if xmlEnabled:
- if case.id() == scase.test_id:
- found = True
- break
- scase_str = scase.test_id
- else:
- if case == scase:
+ for rtype in result_types:
+ found = False
+ for (scase, msg) in getattr(self, rtype):
+ if case.id() == scase.id():
found = True
break
- scase_str = str(scase)
+ scase_str = str(scase.id())
- # When fails at module or class level the class name is passed as string
- # so figure out to see if match
- m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
- if m:
- if case.__class__.__module__ == m.group('module_name'):
- found = True
- break
+ # When fails at module or class level the class name is passed as string
+ # so figure out to see if match
+ m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
+ if m:
+ if case.__class__.__module__ == m.group('module_name'):
+ found = True
+ break
- m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
- if m:
- class_name = "%s.%s" % (case.__class__.__module__,
- case.__class__.__name__)
+ m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
+ if m:
+ class_name = "%s.%s" % (case.__class__.__module__,
+ case.__class__.__name__)
- if class_name == m.group('class_name'):
- found = True
- break
+ if class_name == m.group('class_name'):
+ found = True
+ break
+
+ if found:
+ return result_types[rtype], msg
- if found:
- return (found, msg)
+ return 'UNKNOWN', None
- return (found, None)
+ def addSuccess(self, test):
+ #Added so we can keep track of successes too
+ self.successes.append((test, None))
+ super(OETestResult, self).addSuccess(test)
- def logDetails(self):
+ def logDetails(self, json_file_dir=None, configuration=None, result_id=None):
self.tc.logger.info("RESULTS:")
+
+ result = {}
+ logs = {}
+ if hasattr(self.tc, "extraresults"):
+ result = self.tc.extraresults
+
for case_name in self.tc._registry['cases']:
case = self.tc._registry['cases'][case_name]
- result_types = ['failures', 'errors', 'skipped', 'expectedFailures']
- result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL']
-
- fail = False
- desc = None
- for idx, name in enumerate(result_types):
- (fail, msg) = self._getDetailsNotPassed(case, result_types[idx],
- result_desc[idx])
- if fail:
- desc = result_desc[idx]
- break
+ (status, log) = self._getTestResultDetails(case)
oeid = -1
if hasattr(case, 'decorators'):
@@ -138,12 +122,27 @@ class OETestResult(_TestResult):
if hasattr(d, 'oeid'):
oeid = d.oeid
- if fail:
- self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
- oeid, desc))
+ if status not in logs:
+ logs[status] = []
+ logs[status].append("RESULTS - %s - Testcase %s: %s" % (case.id(), oeid, status))
+ if log:
+ result[case.id()] = {'status': status, 'log': log}
else:
- self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
- oeid, 'PASSED'))
+ result[case.id()] = {'status': status}
+
+ for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
+ if i not in logs:
+ continue
+ for l in logs[i]:
+ self.tc.logger.info(l)
+
+ if json_file_dir:
+ tresultjsonhelper = OETestResultJSONHelper()
+ tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
+
+ def wasSuccessful(self):
+ # Override as we unexpected successes aren't failures for us
+ return (len(self.failures) == len(self.errors) == 0)
class OEListTestsResult(object):
def wasSuccessful(self):
@@ -153,33 +152,14 @@ class OETestRunner(_TestRunner):
streamLoggerClass = OEStreamLogger
def __init__(self, tc, *args, **kwargs):
- if xmlEnabled:
- if not kwargs.get('output'):
- kwargs['output'] = os.path.join(os.getcwd(),
- 'TestResults_%s_%s' % (time.strftime("%Y%m%d%H%M%S"), os.getpid()))
-
kwargs['stream'] = self.streamLoggerClass(tc.logger)
super(OETestRunner, self).__init__(*args, **kwargs)
self.tc = tc
self.resultclass = OETestResult
- # XXX: The unittest-xml-reporting package defines _make_result method instead
- # of _makeResult standard on unittest.
- if xmlEnabled:
- def _make_result(self):
- """
- Creates a TestResult object which will be used to store
- information about the executed tests.
- """
- # override in subclasses if necessary.
- return self.resultclass(self.tc,
- self.stream, self.descriptions, self.verbosity, self.elapsed_times
- )
- else:
- def _makeResult(self):
- return self.resultclass(self.tc, self.stream, self.descriptions,
- self.verbosity)
-
+ def _makeResult(self):
+ return self.resultclass(self.tc, self.stream, self.descriptions,
+ self.verbosity)
def _walk_suite(self, suite, func):
for obj in suite:
@@ -275,3 +255,29 @@ class OETestRunner(_TestRunner):
self._list_tests_module(suite)
return OEListTestsResult()
+
+class OETestResultJSONHelper(object):
+
+ testresult_filename = 'testresults.json'
+
+ def _get_existing_testresults_if_available(self, write_dir):
+ testresults = {}
+ file = os.path.join(write_dir, self.testresult_filename)
+ if os.path.exists(file):
+ with open(file, "r") as f:
+ testresults = json.load(f)
+ return testresults
+
+ def _write_file(self, write_dir, file_name, file_content):
+ file_path = os.path.join(write_dir, file_name)
+ with open(file_path, 'w') as the_file:
+ the_file.write(file_content)
+
+ def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
+ bb.utils.mkdirhier(write_dir)
+ lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
+ test_results = self._get_existing_testresults_if_available(write_dir)
+ test_results[result_id] = {'configuration': configuration, 'result': test_result}
+ json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
+ self._write_file(write_dir, self.testresult_filename, json_testresults)
+ bb.utils.unlockfile(lf)