summaryrefslogtreecommitdiff
path: root/poky/scripts/lib
diff options
context:
space:
mode:
Diffstat (limited to 'poky/scripts/lib')
-rw-r--r--poky/scripts/lib/recipetool/create_buildsys_python.py22
-rwxr-xr-xpoky/scripts/lib/resulttool/manualexecution.py31
-rw-r--r--poky/scripts/lib/resulttool/merge.py20
-rw-r--r--poky/scripts/lib/resulttool/report.py104
-rw-r--r--poky/scripts/lib/resulttool/resultutils.py19
-rw-r--r--poky/scripts/lib/resulttool/store.py9
-rw-r--r--poky/scripts/lib/resulttool/template/test_report_full_text.txt38
-rw-r--r--poky/scripts/lib/wic/engine.py7
-rw-r--r--poky/scripts/lib/wic/filemap.py5
-rw-r--r--poky/scripts/lib/wic/plugins/source/bootimg-efi.py38
-rw-r--r--poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py8
-rw-r--r--poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py21
12 files changed, 206 insertions, 116 deletions
diff --git a/poky/scripts/lib/recipetool/create_buildsys_python.py b/poky/scripts/lib/recipetool/create_buildsys_python.py
index 1a38bd0b1..ac9bc9237 100644
--- a/poky/scripts/lib/recipetool/create_buildsys_python.py
+++ b/poky/scripts/lib/recipetool/create_buildsys_python.py
@@ -31,11 +31,11 @@ def tinfoil_init(instance):
class PythonRecipeHandler(RecipeHandler):
- base_pkgdeps = ['python-core']
- excluded_pkgdeps = ['python-dbg']
- # os.path is provided by python-core
+ base_pkgdeps = ['python3-core']
+ excluded_pkgdeps = ['python3-dbg']
+ # os.path is provided by python3-core
assume_provided = ['builtins', 'os.path']
- # Assumes that the host python builtin_module_names is sane for target too
+ # Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
bbvar_map = {
@@ -215,9 +215,9 @@ class PythonRecipeHandler(RecipeHandler):
self.apply_info_replacements(info)
if uses_setuptools:
- classes.append('setuptools')
+ classes.append('setuptools3')
else:
- classes.append('distutils')
+ classes.append('distutils3')
if license_str:
for i, line in enumerate(lines_before):
@@ -282,7 +282,7 @@ class PythonRecipeHandler(RecipeHandler):
for feature, feature_reqs in extras_req.items():
unmapped_deps.difference_update(feature_reqs)
- feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ feature_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
inst_reqs = set()
@@ -293,7 +293,7 @@ class PythonRecipeHandler(RecipeHandler):
if inst_reqs:
unmapped_deps.difference_update(inst_reqs)
- inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ inst_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
@@ -356,7 +356,7 @@ class PythonRecipeHandler(RecipeHandler):
return info, 'setuptools' in imported_modules, non_literals, extensions
def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
info = {}
keys = set(self.bbvar_map.keys())
keys |= set(self.setuparg_list_fields)
@@ -390,7 +390,7 @@ class PythonRecipeHandler(RecipeHandler):
def get_setup_byline(self, fields, setupscript='./setup.py'):
info = {}
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
try:
info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
@@ -527,7 +527,7 @@ class PythonRecipeHandler(RecipeHandler):
pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes/python-dir.bbclass', ldata, True)
+ bb.parse.handle('classes/python3-dir.bbclass', ldata, True)
python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
diff --git a/poky/scripts/lib/resulttool/manualexecution.py b/poky/scripts/lib/resulttool/manualexecution.py
index df28e1d21..ecb27c593 100755
--- a/poky/scripts/lib/resulttool/manualexecution.py
+++ b/poky/scripts/lib/resulttool/manualexecution.py
@@ -181,11 +181,38 @@ class ManualTestRunner(object):
write_json_file(config_options_file, config_options)
logger.info('Configuration option file created at %s' % config_options_file)
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
def manualexecution(args, logger):
testrunner = ManualTestRunner()
if args.make_config_options_file:
testrunner.make_config_option_file(logger, args.file, args.config_options_file)
return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
resultjsonhelper = OETestResultJSONHelper()
resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
@@ -203,4 +230,6 @@ def register_commands(subparsers):
parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
help='make the configuration options file based on provided inputs')
parser_build.add_argument('-t', '--testcase-config-file', default='',
- help='the testcase configuration file to enable user to run a selected set of test case') \ No newline at end of file
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/poky/scripts/lib/resulttool/merge.py b/poky/scripts/lib/resulttool/merge.py
index 70d23a48f..18b4825a1 100644
--- a/poky/scripts/lib/resulttool/merge.py
+++ b/poky/scripts/lib/resulttool/merge.py
@@ -11,16 +11,23 @@ import json
import resulttool.resultutils as resultutils
def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
- results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
- resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
resultutils.save_resultsdata(results, args.target_results)
else:
- results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
if os.path.exists(args.target_results):
- resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
return 0
def register_commands(subparsers):
@@ -33,4 +40,7 @@ def register_commands(subparsers):
help='the results file/directory/URL to import')
parser_build.add_argument('target_results',
help='the target file or directory to merge the base_results with')
-
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/poky/scripts/lib/resulttool/report.py b/poky/scripts/lib/resulttool/report.py
index cb6b1cf94..a48c59f63 100644
--- a/poky/scripts/lib/resulttool/report.py
+++ b/poky/scripts/lib/resulttool/report.py
@@ -24,16 +24,19 @@ class ResultsTextReport(object):
'skipped': ['SKIPPED', 'skipped']}
- def handle_ptest_result(self, k, status, result):
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
+
if k == 'ptestresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ptestresult.sections']:
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
if 'timeout' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] += " T"
+ self.ptests[machine][suite]['duration'] += " T"
return
try:
_, suite, test = k.split(".", 2)
@@ -47,22 +50,25 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ptests[suite][tk] += 1
+ self.ptests[machine][suite][tk] += 1
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
- def handle_ltptest_result(self, k, status, result):
if k == 'ltpresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ltpresult.sections']:
- if suite not in self.ltptests:
- self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ltpresult.sections'][suite]:
- self.ltptests[suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
if 'timeout' in result['ltpresult.sections'][suite]:
- self.ltptests[suite]['duration'] += " T"
+ self.ltptests[machine][suite]['duration'] += " T"
return
try:
_, suite, test = k.split(".", 2)
@@ -77,20 +83,23 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ltptests:
- self.ltptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ltptests[suite][tk] += 1
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
- def handle_ltpposixtest_result(self, k, status, result):
if k == 'ltpposixresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ltpposixresult.sections']:
- if suite not in self.ltpposixtests:
- self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ltpposixresult.sections'][suite]:
- self.ltpposixtests[suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
return
try:
_, suite, test = k.split(".", 2)
@@ -104,19 +113,13 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ltpposixtests:
- self.ltpposixtests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ltpposixtests[suite][tk] += 1
+ self.ltpposixtests[machine][suite][tk] += 1
- def get_aggregated_test_result(self, logger, testresult):
- test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- def get_aggregated_test_result(self, logger, testresult):
- test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- def get_aggregated_test_result(self, logger, testresult):
- test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- def get_aggregated_test_result(self, logger, testresult):
+ def get_aggregated_test_result(self, logger, testresult, machine):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
for k in result:
@@ -127,11 +130,11 @@ class ResultsTextReport(object):
if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
if k.startswith("ptestresult."):
- self.handle_ptest_result(k, test_status, result)
+ self.handle_ptest_result(k, test_status, result, machine)
if k.startswith("ltpresult."):
- self.handle_ltptest_result(k, test_status, result)
+ self.handle_ltptest_result(k, test_status, result, machine)
if k.startswith("ltpposixresult."):
- self.handle_ltpposixtest_result(k, test_status, result)
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
@@ -141,10 +144,8 @@ class ResultsTextReport(object):
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
- haveptest = bool(self.ptests)
- haveltp = bool(self.ltptests)
- haveltpposix = bool(self.ltpposixtests)
reportvalues = []
+ machines = []
cols = ['passed', 'failed', 'skipped']
maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
for line in test_count_reports:
@@ -162,21 +163,24 @@ class ResultsTextReport(object):
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
- for ptest in self.ptests:
- if len(ptest) > maxlen['ptest']:
- maxlen['ptest'] = len(ptest)
- for ltptest in self.ltptests:
- if len(ltptest) > maxlen['ltptest']:
- maxlen['ltptest'] = len(ltptest)
- for ltpposixtest in self.ltpposixtests:
- if len(ltpposixtest) > maxlen['ltpposixtest']:
- maxlen['ltpposixtest'] = len(ltpposixtest)
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
output = template.render(reportvalues=reportvalues,
havefailed=havefailed,
- haveptest=haveptest,
+ machines=machines,
ptests=self.ptests,
- haveltp=haveltp,
- haveltpposix=haveltpposix,
ltptests=self.ltptests,
ltpposixtests=self.ltpposixtests,
maxlen=maxlen)
@@ -200,7 +204,9 @@ class ResultsTextReport(object):
for testsuite in testresults:
for resultid in testresults[testsuite]:
result = testresults[testsuite][resultid]
- test_count_report = self.get_aggregated_test_result(logger, result)
+ machine = result['configuration']['MACHINE']
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
test_count_report['testseries'] = result['configuration']['TESTSERIES']
test_count_report['result_id'] = resultid
test_count_reports.append(test_count_report)
diff --git a/poky/scripts/lib/resulttool/resultutils.py b/poky/scripts/lib/resulttool/resultutils.py
index ea4ab42d9..e595c185d 100644
--- a/poky/scripts/lib/resulttool/resultutils.py
+++ b/poky/scripts/lib/resulttool/resultutils.py
@@ -42,10 +42,12 @@ def is_url(p):
"""
return p.startswith('http://') or p.startswith('https://')
+extra_configvars = {'TESTSERIES': ''}
+
#
# Load the json file and append the results data into the provided results dict
#
-def append_resultsdata(results, f, configmap=store_map):
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
if type(f) is str:
if is_url(f):
with urllib.request.urlopen(f) as response:
@@ -61,12 +63,15 @@ def append_resultsdata(results, f, configmap=store_map):
for res in data:
if "configuration" not in data[res] or "result" not in data[res]:
raise ValueError("Test results data without configuration or result section?")
- if "TESTSERIES" not in data[res]["configuration"]:
- data[res]["configuration"]["TESTSERIES"] = testseries
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
testtype = data[res]["configuration"].get("TEST_TYPE")
if testtype not in configmap:
raise ValueError("Unknown test type %s" % testtype)
- configvars = configmap[testtype]
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
if testpath not in results:
results[testpath] = {}
@@ -76,16 +81,16 @@ def append_resultsdata(results, f, configmap=store_map):
# Walk a directory and find/load results data
# or load directly from a file
#
-def load_resultsdata(source, configmap=store_map):
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
results = {}
if is_url(source) or os.path.isfile(source):
- append_resultsdata(results, source, configmap)
+ append_resultsdata(results, source, configmap, configvars)
return results
for root, dirs, files in os.walk(source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- append_resultsdata(results, f, configmap)
+ append_resultsdata(results, f, configmap, configvars)
return results
def filter_resultsdata(results, resultid):
diff --git a/poky/scripts/lib/resulttool/store.py b/poky/scripts/lib/resulttool/store.py
index 06505aecc..79c83dd8b 100644
--- a/poky/scripts/lib/resulttool/store.py
+++ b/poky/scripts/lib/resulttool/store.py
@@ -21,16 +21,19 @@ import oeqa.utils.gitarchive as gitarchive
def store(args, logger):
tempdir = tempfile.mkdtemp(prefix='testresults.')
try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
results = {}
logger.info('Reading files from %s' % args.source)
if resultutils.is_url(args.source) or os.path.isfile(args.source):
- resultutils.append_resultsdata(results, args.source)
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
else:
for root, dirs, files in os.walk(args.source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- resultutils.append_resultsdata(results, f)
+ resultutils.append_resultsdata(results, f, configvars=configvars)
elif args.all:
dst = f.replace(args.source, tempdir + "/")
os.makedirs(os.path.dirname(dst), exist_ok=True)
@@ -93,4 +96,6 @@ def register_commands(subparsers):
help='include all files, not just testresults.json files')
parser_build.add_argument('-e', '--allow-empty', action='store_true',
help='don\'t error if no results to store are found')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
index d2725b8d0..17c99cb4e 100644
--- a/poky/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -9,54 +9,54 @@ Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% if haveptest %}
+
+{% for machine in machines %}
+{% if ptests[machine] %}
==============================================================================================================
-PTest Result Summary
+{{ machine }} PTest Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ptest in ptests |sort %}
-{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no ptest data
{% endif %}
+{% endfor %}
-{% if haveltp %}
+{% for machine in machines %}
+{% if ltptests[machine] %}
==============================================================================================================
-Ltp Test Result Summary
+{{ machine }} Ltp Test Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ltptest in ltptests |sort %}
-{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[ltptest]['duration']|string) }}
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no LTP Test data
{% endif %}
+{% endfor %}
-{% if haveltpposix %}
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
==============================================================================================================
-Ltp Posix Result Summary
+{{ machine }} Ltp Posix Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ltpposixtest in ltpposixtests |sort %}
-{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[ltpposixtest]['duration']|string) }}
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no LTP Posix Test data
{% endif %}
-
+{% endfor %}
==============================================================================================================
diff --git a/poky/scripts/lib/wic/engine.py b/poky/scripts/lib/wic/engine.py
index 42e93c3eb..61939ad19 100644
--- a/poky/scripts/lib/wic/engine.py
+++ b/poky/scripts/lib/wic/engine.py
@@ -75,7 +75,8 @@ def find_canned_image(scripts_path, wks_file):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ if ((fname.endswith(".wks") and wks_file + ".wks" == fname) or \
+ (fname.endswith(".wks.in") and wks_file + ".wks.in" == fname)):
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
return None
@@ -92,7 +93,7 @@ def list_canned_images(scripts_path):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks"):
+ if fname.endswith(".wks") or fname.endswith(".wks.in"):
fullpath = os.path.join(canned_wks_dir, fname)
with open(fullpath) as wks:
for line in wks:
@@ -101,7 +102,7 @@ def list_canned_images(scripts_path):
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
- basename = os.path.splitext(fname)[0]
+ basename = fname.split('.')[0]
print(" %s\t\t%s" % (basename.ljust(30), desc))
diff --git a/poky/scripts/lib/wic/filemap.py b/poky/scripts/lib/wic/filemap.py
index 244c07a71..a3919fbca 100644
--- a/poky/scripts/lib/wic/filemap.py
+++ b/poky/scripts/lib/wic/filemap.py
@@ -32,7 +32,10 @@ def get_block_size(file_obj):
"""
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
- binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ try:
+ binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ except OSError:
+ raise IOError("Unable to determine block size")
bsize = struct.unpack('I', binary_data)[0]
if not bsize:
import os
diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
index 652323fa7..d87db1f1b 100644
--- a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -71,10 +71,17 @@ class BootimgEFIPlugin(SourcePlugin):
grubefi_conf += "timeout=%s\n" % bootloader.timeout
grubefi_conf += "menuentry '%s'{\n" % (title if title else "boot")
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
- grubefi_conf += "linux %s root=%s rootwait %s\n" \
- % (kernel, creator.rootdev, bootloader.append)
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ grubefi_conf += "linux /%s %s rootwait %s\n" \
+ % (kernel, label_conf, bootloader.append)
if initrd:
grubefi_conf += "initrd /%s\n" % initrd
@@ -138,14 +145,23 @@ class BootimgEFIPlugin(SourcePlugin):
if not custom_cfg:
# Create systemd-boot configuration using parameters from wks file
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
+
title = source_params.get('title')
boot_conf = ""
boot_conf += "title %s\n" % (title if title else "boot")
- boot_conf += "linux %s\n" % kernel
- boot_conf += "options LABEL=Boot root=%s %s\n" % \
- (creator.rootdev, bootloader.append)
+ boot_conf += "linux /%s\n" % kernel
+
+ label = source_params.get('label')
+ label_conf = "LABEL=Boot root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ boot_conf += "options %s %s\n" % \
+ (label_conf, bootloader.append)
if initrd:
boot_conf += "initrd /%s\n" % initrd
@@ -198,8 +214,12 @@ class BootimgEFIPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (staging_kernel_dir, hdddir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
exec_cmd(install_cmd)
diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index 6c9f54a89..670d34774 100644
--- a/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -149,8 +149,12 @@ class BootimgPcbiosPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- cmds = ("install -m 0644 %s/bzImage %s/vmlinuz" %
- (staging_kernel_dir, hdddir),
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
+
+ cmds = ("install -m 0644 %s/%s %s/vmlinuz" %
+ (staging_kernel_dir, kernel, hdddir),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
diff --git a/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 96d07ff62..74d6f1451 100644
--- a/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -70,8 +70,10 @@ class IsoImagePlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/bzImage"
- syslinux_conf += "KERNEL " + kernel + "\n"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
+ syslinux_conf += "KERNEL /" + kernel + "\n"
syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
% bootloader.append
@@ -114,9 +116,11 @@ class IsoImagePlugin(SourcePlugin):
grubefi_conf += "\n"
grubefi_conf += "menuentry 'boot'{\n"
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
- grubefi_conf += "linux %s rootwait %s\n" \
+ grubefi_conf += "linux /%s rootwait %s\n" \
% (kernel, bootloader.append)
grubefi_conf += "initrd /initrd \n"
grubefi_conf += "}\n"
@@ -268,9 +272,12 @@ class IsoImagePlugin(SourcePlugin):
if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
os.remove("%s/initrd.cpio.gz" % cr_workdir)
- # Install bzImage
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (kernel_dir, isodir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ kernel = "bzImage"
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (kernel_dir, kernel, isodir, kernel)
exec_cmd(install_cmd)
#Create bootloader for efi boot