summaryrefslogtreecommitdiff
path: root/poky/scripts/lib/resulttool
diff options
context:
space:
mode:
Diffstat (limited to 'poky/scripts/lib/resulttool')
-rw-r--r--poky/scripts/lib/resulttool/log.py71
-rwxr-xr-xpoky/scripts/lib/resulttool/manualexecution.py107
-rw-r--r--poky/scripts/lib/resulttool/merge.py18
-rw-r--r--poky/scripts/lib/resulttool/regression.py16
-rw-r--r--poky/scripts/lib/resulttool/report.py95
-rw-r--r--poky/scripts/lib/resulttool/resultutils.py49
-rw-r--r--poky/scripts/lib/resulttool/store.py14
-rw-r--r--poky/scripts/lib/resulttool/template/test_report_full_text.txt34
8 files changed, 294 insertions, 110 deletions
diff --git a/poky/scripts/lib/resulttool/log.py b/poky/scripts/lib/resulttool/log.py
new file mode 100644
index 000000000..25c339671
--- /dev/null
+++ b/poky/scripts/lib/resulttool/log.py
@@ -0,0 +1,71 @@
+# resulttool - Show logs
+#
+# Copyright (c) 2019 Garmin International
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import resulttool.resultutils as resultutils
+
+def show_ptest(result, ptest, logger):
+ if 'ptestresult.sections' in result:
+ if ptest in result['ptestresult.sections'] and 'log' in result['ptestresult.sections'][ptest]:
+ print(result['ptestresult.sections'][ptest]['log'])
+ return 0
+
+ print("ptest '%s' not found" % ptest)
+ return 1
+
+def log(args, logger):
+ results = resultutils.load_resultsdata(args.source)
+
+ ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r)
+ if ptest_count > 1 and not args.prepend_run:
+ print("%i ptest sections found. '--prepend-run' is required" % ptest_count)
+ return 1
+
+ for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.dump_ptest:
+ if 'ptestresult.sections' in r:
+ for name, ptest in r['ptestresult.sections'].items():
+ if 'log' in ptest:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+
+ os.makedirs(dest_dir, exist_ok=True)
+
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(ptest['log'])
+
+ if args.raw:
+ if 'ptestresult.rawlogs' in r:
+ print(r['ptestresult.rawlogs']['log'])
+ else:
+ print('Raw logs not found')
+ return 1
+
+ for ptest in args.ptest:
+ if not show_ptest(r, ptest, logger):
+ return 1
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser = subparsers.add_parser('log', help='show logs',
+ description='show the logs from test results',
+ group='analysis')
+ parser.set_defaults(func=log)
+ parser.add_argument('source',
+ help='the results file/directory/URL to import')
+ parser.add_argument('--ptest', action='append', default=[],
+ help='show logs for a ptest')
+ parser.add_argument('--dump-ptest', metavar='DIR',
+ help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--prepend-run', action='store_true',
+ help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
+ Required if more than one test run is present in the result file''')
+ parser.add_argument('--raw', action='store_true',
+ help='show raw logs')
+
diff --git a/poky/scripts/lib/resulttool/manualexecution.py b/poky/scripts/lib/resulttool/manualexecution.py
index 12ef90d6a..df28e1d21 100755
--- a/poky/scripts/lib/resulttool/manualexecution.py
+++ b/poky/scripts/lib/resulttool/manualexecution.py
@@ -2,27 +2,22 @@
#
# Copyright (c) 2018, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import json
import os
import sys
import datetime
import re
+import copy
from oeqa.core.runner import OETestResultJSONHelper
-def load_json_file(file):
- with open(file, "r") as f:
- return json.load(f)
+def load_json_file(f):
+ with open(f, "r") as filedata:
+ return json.load(filedata)
def write_json_file(f, json_data):
os.makedirs(os.path.dirname(f), exist_ok=True)
@@ -31,9 +26,8 @@ def write_json_file(f, json_data):
class ManualTestRunner(object):
- def _get_testcases(self, file):
- self.jdata = load_json_file(file)
- self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
+ def _get_test_module(self, case_file):
+ return os.path.basename(case_file).split('.')[0]
def _get_input(self, config):
while True:
@@ -57,23 +51,21 @@ class ManualTestRunner(object):
print('Only integer index inputs from above available configuration options are allowed. Please try again.')
return options[output]
- def _create_config(self, config_options):
+ def _get_config(self, config_options, test_module):
from oeqa.utils.metadata import get_layers
from oeqa.utils.commands import get_bb_var
from resulttool.resultutils import store_map
layers = get_layers(get_bb_var('BBLAYERS'))
- self.configuration = {}
- self.configuration['LAYERS'] = layers
- current_datetime = datetime.datetime.now()
- self.starttime = current_datetime.strftime('%Y%m%d%H%M%S')
- self.configuration['STARTTIME'] = self.starttime
- self.configuration['TEST_TYPE'] = 'manual'
- self.configuration['TEST_MODULE'] = self.test_module
-
- extra_config = set(store_map['manual']) - set(self.configuration)
+ configurations = {}
+ configurations['LAYERS'] = layers
+ configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ configurations['TEST_TYPE'] = 'manual'
+ configurations['TEST_MODULE'] = test_module
+
+ extra_config = set(store_map['manual']) - set(configurations)
for config in sorted(extra_config):
- avail_config_options = self._get_available_config_options(config_options, self.test_module, config)
+ avail_config_options = self._get_available_config_options(config_options, test_module, config)
if avail_config_options:
print('---------------------------------------------')
print('These are available configuration #%s options:' % config)
@@ -89,21 +81,19 @@ class ManualTestRunner(object):
print('---------------------------------------------')
value_conf = self._get_input('Configuration Value')
print('---------------------------------------------\n')
- self.configuration[config] = value_conf
-
- def _create_result_id(self):
- self.result_id = 'manual_%s_%s' % (self.test_module, self.starttime)
+ configurations[config] = value_conf
+ return configurations
- def _execute_test_steps(self, test):
+ def _execute_test_steps(self, case):
test_result = {}
print('------------------------------------------------------------------------')
- print('Executing test case: %s' % test['test']['@alias'])
+ print('Executing test case: %s' % case['test']['@alias'])
print('------------------------------------------------------------------------')
- print('You have total %s test steps to be executed.' % len(test['test']['execution']))
+ print('You have total %s test steps to be executed.' % len(case['test']['execution']))
print('------------------------------------------------------------------------\n')
- for step, _ in sorted(test['test']['execution'].items(), key=lambda x: int(x[0])):
- print('Step %s: %s' % (step, test['test']['execution'][step]['action']))
- expected_output = test['test']['execution'][step]['expected_results']
+ for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
+ print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
+ expected_output = case['test']['execution'][step]['expected_results']
if expected_output:
print('Expected output: %s' % expected_output)
while True:
@@ -118,31 +108,37 @@ class ManualTestRunner(object):
res = result_types[r]
if res == 'FAILED':
log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
- test_result.update({test['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
+ test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
else:
- test_result.update({test['test']['@alias']: {'status': '%s' % res}})
+ test_result.update({case['test']['@alias']: {'status': '%s' % res}})
break
print('Invalid input!')
return test_result
- def _create_write_dir(self):
- basepath = os.environ['BUILDDIR']
- self.write_dir = basepath + '/tmp/log/manual/'
+ def _get_write_dir(self):
+ return os.environ['BUILDDIR'] + '/tmp/log/manual/'
- def run_test(self, file, config_options_file):
- self._get_testcases(file)
+ def run_test(self, case_file, config_options_file, testcase_config_file):
+ test_module = self._get_test_module(case_file)
+ cases = load_json_file(case_file)
config_options = {}
if config_options_file:
config_options = load_json_file(config_options_file)
- self._create_config(config_options)
- self._create_result_id()
- self._create_write_dir()
+ configurations = self._get_config(config_options, test_module)
+ result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
test_results = {}
- print('\nTotal number of test cases in this test suite: %s\n' % len(self.jdata))
- for t in self.jdata:
- test_result = self._execute_test_steps(t)
+ if testcase_config_file:
+ test_case_config = load_json_file(testcase_config_file)
+ test_case_to_execute = test_case_config['testcases']
+ for case in copy.deepcopy(cases) :
+ if case['test']['@alias'] not in test_case_to_execute:
+ cases.remove(case)
+
+ print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
+ for c in cases:
+ test_result = self._execute_test_steps(c)
test_results.update(test_result)
- return self.configuration, self.result_id, self.write_dir, test_results
+ return configurations, result_id, self._get_write_dir(), test_results
def _get_true_false_input(self, input_message):
yes_list = ['Y', 'YES']
@@ -156,11 +152,11 @@ class ManualTestRunner(object):
return False
return True
- def make_config_option_file(self, logger, manual_case_file, config_options_file):
+ def make_config_option_file(self, logger, case_file, config_options_file):
config_options = {}
if config_options_file:
config_options = load_json_file(config_options_file)
- new_test_module = os.path.basename(manual_case_file).split('.')[0]
+ new_test_module = self._get_test_module(case_file)
print('Creating configuration options file for test module: %s' % new_test_module)
new_config_options = {}
@@ -181,8 +177,7 @@ class ManualTestRunner(object):
if new_config_options:
config_options[new_test_module] = new_config_options
if not config_options_file:
- self._create_write_dir()
- config_options_file = os.path.join(self.write_dir, 'manual_config_options.json')
+ config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
write_json_file(config_options_file, config_options)
logger.info('Configuration option file created at %s' % config_options_file)
@@ -191,9 +186,9 @@ def manualexecution(args, logger):
if args.make_config_options_file:
testrunner.make_config_option_file(logger, args.file, args.config_options_file)
return 0
- get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file, args.config_options_file)
+ configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
resultjsonhelper = OETestResultJSONHelper()
- resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, get_test_results)
+ resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
return 0
def register_commands(subparsers):
@@ -207,3 +202,5 @@ def register_commands(subparsers):
help='the config options file to import and used as available configuration option selection or make config option file')
parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
help='make the configuration options file based on provided inputs')
+ parser_build.add_argument('-t', '--testcase-config-file', default='',
+ help='the testcase configuration file to enable user to run a selected set of test case') \ No newline at end of file
diff --git a/poky/scripts/lib/resulttool/merge.py b/poky/scripts/lib/resulttool/merge.py
index 3e4b7a38a..70d23a48f 100644
--- a/poky/scripts/lib/resulttool/merge.py
+++ b/poky/scripts/lib/resulttool/merge.py
@@ -3,21 +3,15 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import json
import resulttool.resultutils as resultutils
def merge(args, logger):
- if os.path.isdir(args.target_results):
+ if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
resultutils.save_resultsdata(results, args.target_results)
@@ -31,12 +25,12 @@ def merge(args, logger):
def register_commands(subparsers):
"""Register subcommands from this plugin"""
- parser_build = subparsers.add_parser('merge', help='merge test result files/directories',
- description='merge the results from multiple files/directories into the target file or directory',
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
+ description='merge the results from multiple files/directories/URLs into the target file or directory',
group='setup')
parser_build.set_defaults(func=merge)
parser_build.add_argument('base_results',
- help='the results file/directory to import')
+ help='the results file/directory/URL to import')
parser_build.add_argument('target_results',
help='the target file or directory to merge the base_results with')
diff --git a/poky/scripts/lib/resulttool/regression.py b/poky/scripts/lib/resulttool/regression.py
index bdf531ded..9f952951b 100644
--- a/poky/scripts/lib/resulttool/regression.py
+++ b/poky/scripts/lib/resulttool/regression.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import resulttool.resultutils as resultutils
import json
@@ -64,7 +58,7 @@ def regression_common(args, logger, base_results, target_results):
if a in target_results:
base = list(base_results[a].keys())
target = list(target_results[a].keys())
- # We may have multiple base/targets which are for different configurations. Start by
+ # We may have multiple base/targets which are for different configurations. Start by
# removing any pairs which match
for c in base.copy():
for b in target.copy():
@@ -161,9 +155,9 @@ def register_commands(subparsers):
group='analysis')
parser_build.set_defaults(func=regression)
parser_build.add_argument('base_result',
- help='base result file/directory for the comparison')
+ help='base result file/directory/URL for the comparison')
parser_build.add_argument('target_result',
- help='target result file/directory to compare with')
+ help='target result file/directory/URL to compare with')
parser_build.add_argument('-b', '--base-result-id', default='',
help='(optional) filter the base results to this result ID')
parser_build.add_argument('-t', '--target-result-id', default='',
diff --git a/poky/scripts/lib/resulttool/report.py b/poky/scripts/lib/resulttool/report.py
index 90086209e..cb6b1cf94 100644
--- a/poky/scripts/lib/resulttool/report.py
+++ b/poky/scripts/lib/resulttool/report.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import glob
import json
@@ -23,6 +17,8 @@ import oeqa.utils.gitarchive as gitarchive
class ResultsTextReport(object):
def __init__(self):
self.ptests = {}
+ self.ltptests = {}
+ self.ltpposixtests = {}
self.result_types = {'passed': ['PASSED', 'passed'],
'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
'skipped': ['SKIPPED', 'skipped']}
@@ -57,6 +53,69 @@ class ResultsTextReport(object):
if status in self.result_types[tk]:
self.ptests[suite][tk] += 1
+ def handle_ltptest_result(self, k, status, result):
+ if k == 'ltpresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpresult.sections']:
+ if suite not in self.ltptests:
+ self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpresult.sections'][suite]:
+ self.ltptests[suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ if 'timeout' in result['ltpresult.sections'][suite]:
+ self.ltptests[suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ print("split2: %s %s %s" % (suite, suite1, test))
+ if suite + "." + suite1 in result['ltpresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltptests:
+ self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltptests[suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result):
+ if k == 'ltpposixresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpposixresult.sections']:
+ if suite not in self.ltpposixtests:
+ self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpposixresult.sections'][suite]:
+ self.ltpposixtests[suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpposixresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltpposixtests:
+ self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltpposixtests[suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ def get_aggregated_test_result(self, logger, testresult):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ def get_aggregated_test_result(self, logger, testresult):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
def get_aggregated_test_result(self, logger, testresult):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
@@ -69,6 +128,10 @@ class ResultsTextReport(object):
test_count_report['failed_testcases'].append(k)
if k.startswith("ptestresult."):
self.handle_ptest_result(k, test_status, result)
+ if k.startswith("ltpresult."):
+ self.handle_ltptest_result(k, test_status, result)
+ if k.startswith("ltpposixresult."):
+ self.handle_ltpposixtest_result(k, test_status, result)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
@@ -79,9 +142,11 @@ class ResultsTextReport(object):
template = env.get_template(template_file_name)
havefailed = False
haveptest = bool(self.ptests)
+ haveltp = bool(self.ltptests)
+ haveltpposix = bool(self.ltpposixtests)
reportvalues = []
cols = ['passed', 'failed', 'skipped']
- maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
for line in test_count_reports:
total_tested = line['passed'] + line['failed'] + line['skipped']
vals = {}
@@ -100,10 +165,20 @@ class ResultsTextReport(object):
for ptest in self.ptests:
if len(ptest) > maxlen['ptest']:
maxlen['ptest'] = len(ptest)
+ for ltptest in self.ltptests:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for ltpposixtest in self.ltpposixtests:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
output = template.render(reportvalues=reportvalues,
havefailed=havefailed,
haveptest=haveptest,
ptests=self.ptests,
+ haveltp=haveltp,
+ haveltpposix=haveltpposix,
+ ltptests=self.ltptests,
+ ltpposixtests=self.ltpposixtests,
maxlen=maxlen)
print(output)
@@ -143,7 +218,7 @@ def register_commands(subparsers):
group='analysis')
parser_build.set_defaults(func=report)
parser_build.add_argument('source_dir',
- help='source file/directory that contain the test result files to summarise')
+ help='source file/directory/URL that contain the test result files to summarise')
parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
parser_build.add_argument('--commit', help="Revision to report")
parser_build.add_argument('-t', '--tag', default='',
diff --git a/poky/scripts/lib/resulttool/resultutils.py b/poky/scripts/lib/resulttool/resultutils.py
index ad40ac849..ea4ab42d9 100644
--- a/poky/scripts/lib/resulttool/resultutils.py
+++ b/poky/scripts/lib/resulttool/resultutils.py
@@ -3,19 +3,15 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import json
import scriptpath
import copy
+import urllib.request
+import posixpath
scriptpath.add_oe_lib_path()
flatten_map = {
@@ -40,20 +36,33 @@ store_map = {
"manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
}
+def is_url(p):
+ """
+ Helper for determining if the given path is a URL
+ """
+ return p.startswith('http://') or p.startswith('https://')
+
#
# Load the json file and append the results data into the provided results dict
#
def append_resultsdata(results, f, configmap=store_map):
if type(f) is str:
- with open(f, "r") as filedata:
- data = json.load(filedata)
+ if is_url(f):
+ with urllib.request.urlopen(f) as response:
+ data = json.loads(response.read().decode('utf-8'))
+ url = urllib.parse.urlparse(f)
+ testseries = posixpath.basename(posixpath.dirname(url.path))
+ else:
+ with open(f, "r") as filedata:
+ data = json.load(filedata)
+ testseries = os.path.basename(os.path.dirname(f))
else:
data = f
for res in data:
if "configuration" not in data[res] or "result" not in data[res]:
raise ValueError("Test results data without configuration or result section?")
if "TESTSERIES" not in data[res]["configuration"]:
- data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f))
+ data[res]["configuration"]["TESTSERIES"] = testseries
testtype = data[res]["configuration"].get("TEST_TYPE")
if testtype not in configmap:
raise ValueError("Unknown test type %s" % testtype)
@@ -69,7 +78,7 @@ def append_resultsdata(results, f, configmap=store_map):
#
def load_resultsdata(source, configmap=store_map):
results = {}
- if os.path.isfile(source):
+ if is_url(source) or os.path.isfile(source):
append_resultsdata(results, source, configmap)
return results
for root, dirs, files in os.walk(source):
@@ -152,3 +161,19 @@ def git_get_result(repo, tags):
append_resultsdata(results, obj)
return results
+
+def test_run_results(results):
+ """
+ Convenient generator function that iterates over all test runs that have a
+ result section.
+
+ Generates a tuple of:
+ (result json file path, test run name, test run (dict), test run "results" (dict))
+ for each test run that has a "result" section
+ """
+ for path in results:
+ for run_name, test_run in results[path].items():
+ if not 'result' in test_run:
+ continue
+ yield path, run_name, test_run, test_run['result']
+
diff --git a/poky/scripts/lib/resulttool/store.py b/poky/scripts/lib/resulttool/store.py
index e4a080752..06505aecc 100644
--- a/poky/scripts/lib/resulttool/store.py
+++ b/poky/scripts/lib/resulttool/store.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import tempfile
import os
import subprocess
@@ -29,7 +23,7 @@ def store(args, logger):
try:
results = {}
logger.info('Reading files from %s' % args.source)
- if os.path.isfile(args.source):
+ if resultutils.is_url(args.source) or os.path.isfile(args.source):
resultutils.append_resultsdata(results, args.source)
else:
for root, dirs, files in os.walk(args.source):
@@ -92,7 +86,7 @@ def register_commands(subparsers):
group='setup')
parser_build.set_defaults(func=store)
parser_build.add_argument('source',
- help='source file or directory that contain the test result files to be stored')
+ help='source file/directory/URL that contain the test result files to be stored')
parser_build.add_argument('git_dir',
help='the location of the git repository to store the results in')
parser_build.add_argument('-a', '--all', action='store_true',
diff --git a/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
index 590f35c7d..d2725b8d0 100644
--- a/poky/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -25,6 +25,40 @@ PTest Result Summary
There was no ptest data
{% endif %}
+{% if haveltp %}
+==============================================================================================================
+Ltp Test Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltptest in ltptests |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[ltptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% else %}
+There was no LTP Test data
+{% endif %}
+
+{% if haveltpposix %}
+==============================================================================================================
+Ltp Posix Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltpposixtest in ltpposixtests |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[ltpposixtest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% else %}
+There was no LTP Posix Test data
+{% endif %}
+
+
+
==============================================================================================================
Failed test cases (sorted by testseries, ID)
==============================================================================================================