summaryrefslogtreecommitdiff
path: root/poky/scripts/lib/resulttool
diff options
context:
space:
mode:
Diffstat (limited to 'poky/scripts/lib/resulttool')
-rwxr-xr-xpoky/scripts/lib/resulttool/manualexecution.py31
-rw-r--r--poky/scripts/lib/resulttool/merge.py20
-rw-r--r--poky/scripts/lib/resulttool/report.py104
-rw-r--r--poky/scripts/lib/resulttool/resultutils.py19
-rw-r--r--poky/scripts/lib/resulttool/store.py9
-rw-r--r--poky/scripts/lib/resulttool/template/test_report_full_text.txt38
6 files changed, 138 insertions, 83 deletions
diff --git a/poky/scripts/lib/resulttool/manualexecution.py b/poky/scripts/lib/resulttool/manualexecution.py
index df28e1d21..ecb27c593 100755
--- a/poky/scripts/lib/resulttool/manualexecution.py
+++ b/poky/scripts/lib/resulttool/manualexecution.py
@@ -181,11 +181,38 @@ class ManualTestRunner(object):
write_json_file(config_options_file, config_options)
logger.info('Configuration option file created at %s' % config_options_file)
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
def manualexecution(args, logger):
testrunner = ManualTestRunner()
if args.make_config_options_file:
testrunner.make_config_option_file(logger, args.file, args.config_options_file)
return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
resultjsonhelper = OETestResultJSONHelper()
resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
@@ -203,4 +230,6 @@ def register_commands(subparsers):
parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
help='make the configuration options file based on provided inputs')
parser_build.add_argument('-t', '--testcase-config-file', default='',
- help='the testcase configuration file to enable user to run a selected set of test case') \ No newline at end of file
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/poky/scripts/lib/resulttool/merge.py b/poky/scripts/lib/resulttool/merge.py
index 70d23a48f..18b4825a1 100644
--- a/poky/scripts/lib/resulttool/merge.py
+++ b/poky/scripts/lib/resulttool/merge.py
@@ -11,16 +11,23 @@ import json
import resulttool.resultutils as resultutils
def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
- results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
- resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
resultutils.save_resultsdata(results, args.target_results)
else:
- results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
if os.path.exists(args.target_results):
- resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
return 0
def register_commands(subparsers):
@@ -33,4 +40,7 @@ def register_commands(subparsers):
help='the results file/directory/URL to import')
parser_build.add_argument('target_results',
help='the target file or directory to merge the base_results with')
-
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/poky/scripts/lib/resulttool/report.py b/poky/scripts/lib/resulttool/report.py
index cb6b1cf94..a48c59f63 100644
--- a/poky/scripts/lib/resulttool/report.py
+++ b/poky/scripts/lib/resulttool/report.py
@@ -24,16 +24,19 @@ class ResultsTextReport(object):
'skipped': ['SKIPPED', 'skipped']}
- def handle_ptest_result(self, k, status, result):
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
+
if k == 'ptestresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ptestresult.sections']:
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
if 'timeout' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] += " T"
+ self.ptests[machine][suite]['duration'] += " T"
return
try:
_, suite, test = k.split(".", 2)
@@ -47,22 +50,25 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ptests[suite][tk] += 1
+ self.ptests[machine][suite][tk] += 1
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
- def handle_ltptest_result(self, k, status, result):
if k == 'ltpresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ltpresult.sections']:
- if suite not in self.ltptests:
- self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ltpresult.sections'][suite]:
- self.ltptests[suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
if 'timeout' in result['ltpresult.sections'][suite]:
- self.ltptests[suite]['duration'] += " T"
+ self.ltptests[machine][suite]['duration'] += " T"
return
try:
_, suite, test = k.split(".", 2)
@@ -77,20 +83,23 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ltptests:
- self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ltptests[suite][tk] += 1
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
- def handle_ltpposixtest_result(self, k, status, result):
if k == 'ltpposixresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ltpposixresult.sections']:
- if suite not in self.ltpposixtests:
- self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ltpposixresult.sections'][suite]:
- self.ltpposixtests[suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
return
try:
_, suite, test = k.split(".", 2)
@@ -104,19 +113,13 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ltpposixtests:
- self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ltpposixtests[suite][tk] += 1
+ self.ltpposixtests[machine][suite][tk] += 1
- def get_aggregated_test_result(self, logger, testresult):
- test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- def get_aggregated_test_result(self, logger, testresult):
- test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- def get_aggregated_test_result(self, logger, testresult):
- test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- def get_aggregated_test_result(self, logger, testresult):
+ def get_aggregated_test_result(self, logger, testresult, machine):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
for k in result:
@@ -127,11 +130,11 @@ class ResultsTextReport(object):
if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
if k.startswith("ptestresult."):
- self.handle_ptest_result(k, test_status, result)
+ self.handle_ptest_result(k, test_status, result, machine)
if k.startswith("ltpresult."):
- self.handle_ltptest_result(k, test_status, result)
+ self.handle_ltptest_result(k, test_status, result, machine)
if k.startswith("ltpposixresult."):
- self.handle_ltpposixtest_result(k, test_status, result)
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
@@ -141,10 +144,8 @@ class ResultsTextReport(object):
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
- haveptest = bool(self.ptests)
- haveltp = bool(self.ltptests)
- haveltpposix = bool(self.ltpposixtests)
reportvalues = []
+ machines = []
cols = ['passed', 'failed', 'skipped']
maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
for line in test_count_reports:
@@ -162,21 +163,24 @@ class ResultsTextReport(object):
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
- for ptest in self.ptests:
- if len(ptest) > maxlen['ptest']:
- maxlen['ptest'] = len(ptest)
- for ltptest in self.ltptests:
- if len(ltptest) > maxlen['ltptest']:
- maxlen['ltptest'] = len(ltptest)
- for ltpposixtest in self.ltpposixtests:
- if len(ltpposixtest) > maxlen['ltpposixtest']:
- maxlen['ltpposixtest'] = len(ltpposixtest)
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
output = template.render(reportvalues=reportvalues,
havefailed=havefailed,
- haveptest=haveptest,
+ machines=machines,
ptests=self.ptests,
- haveltp=haveltp,
- haveltpposix=haveltpposix,
ltptests=self.ltptests,
ltpposixtests=self.ltpposixtests,
maxlen=maxlen)
@@ -200,7 +204,9 @@ class ResultsTextReport(object):
for testsuite in testresults:
for resultid in testresults[testsuite]:
result = testresults[testsuite][resultid]
- test_count_report = self.get_aggregated_test_result(logger, result)
+ machine = result['configuration']['MACHINE']
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
test_count_report['testseries'] = result['configuration']['TESTSERIES']
test_count_report['result_id'] = resultid
test_count_reports.append(test_count_report)
diff --git a/poky/scripts/lib/resulttool/resultutils.py b/poky/scripts/lib/resulttool/resultutils.py
index ea4ab42d9..e595c185d 100644
--- a/poky/scripts/lib/resulttool/resultutils.py
+++ b/poky/scripts/lib/resulttool/resultutils.py
@@ -42,10 +42,12 @@ def is_url(p):
"""
return p.startswith('http://') or p.startswith('https://')
+extra_configvars = {'TESTSERIES': ''}
+
#
# Load the json file and append the results data into the provided results dict
#
-def append_resultsdata(results, f, configmap=store_map):
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
if type(f) is str:
if is_url(f):
with urllib.request.urlopen(f) as response:
@@ -61,12 +63,15 @@ def append_resultsdata(results, f, configmap=store_map):
for res in data:
if "configuration" not in data[res] or "result" not in data[res]:
raise ValueError("Test results data without configuration or result section?")
- if "TESTSERIES" not in data[res]["configuration"]:
- data[res]["configuration"]["TESTSERIES"] = testseries
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
testtype = data[res]["configuration"].get("TEST_TYPE")
if testtype not in configmap:
raise ValueError("Unknown test type %s" % testtype)
- configvars = configmap[testtype]
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
if testpath not in results:
results[testpath] = {}
@@ -76,16 +81,16 @@ def append_resultsdata(results, f, configmap=store_map):
# Walk a directory and find/load results data
# or load directly from a file
#
-def load_resultsdata(source, configmap=store_map):
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
results = {}
if is_url(source) or os.path.isfile(source):
- append_resultsdata(results, source, configmap)
+ append_resultsdata(results, source, configmap, configvars)
return results
for root, dirs, files in os.walk(source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- append_resultsdata(results, f, configmap)
+ append_resultsdata(results, f, configmap, configvars)
return results
def filter_resultsdata(results, resultid):
diff --git a/poky/scripts/lib/resulttool/store.py b/poky/scripts/lib/resulttool/store.py
index 06505aecc..79c83dd8b 100644
--- a/poky/scripts/lib/resulttool/store.py
+++ b/poky/scripts/lib/resulttool/store.py
@@ -21,16 +21,19 @@ import oeqa.utils.gitarchive as gitarchive
def store(args, logger):
tempdir = tempfile.mkdtemp(prefix='testresults.')
try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
results = {}
logger.info('Reading files from %s' % args.source)
if resultutils.is_url(args.source) or os.path.isfile(args.source):
- resultutils.append_resultsdata(results, args.source)
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
else:
for root, dirs, files in os.walk(args.source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- resultutils.append_resultsdata(results, f)
+ resultutils.append_resultsdata(results, f, configvars=configvars)
elif args.all:
dst = f.replace(args.source, tempdir + "/")
os.makedirs(os.path.dirname(dst), exist_ok=True)
@@ -93,4 +96,6 @@ def register_commands(subparsers):
help='include all files, not just testresults.json files')
parser_build.add_argument('-e', '--allow-empty', action='store_true',
help='don\'t error if no results to store are found')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
index d2725b8d0..17c99cb4e 100644
--- a/poky/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -9,54 +9,54 @@ Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% if haveptest %}
+
+{% for machine in machines %}
+{% if ptests[machine] %}
==============================================================================================================
-PTest Result Summary
+{{ machine }} PTest Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ptest in ptests |sort %}
-{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no ptest data
{% endif %}
+{% endfor %}
-{% if haveltp %}
+{% for machine in machines %}
+{% if ltptests[machine] %}
==============================================================================================================
-Ltp Test Result Summary
+{{ machine }} Ltp Test Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ltptest in ltptests |sort %}
-{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[ltptest]['duration']|string) }}
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no LTP Test data
{% endif %}
+{% endfor %}
-{% if haveltpposix %}
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
==============================================================================================================
-Ltp Posix Result Summary
+{{ machine }} Ltp Posix Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ltpposixtest in ltpposixtests |sort %}
-{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[ltpposixtest]['duration']|string) }}
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no LTP Posix Test data
{% endif %}
-
+{% endfor %}
==============================================================================================================