summaryrefslogtreecommitdiff
path: root/import-layers/yocto-poky/meta/lib
diff options
context:
space:
mode:
authorBrad Bishop <bradleyb@fuzziesquirrel.com>2018-06-25 19:45:53 +0300
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2018-06-27 21:38:15 +0300
commit316dfdd917bec6a218f431211d28bf8df6b6fb0f (patch)
tree5541073f9851f44c2bd67b4959dc776ee3c3810f /import-layers/yocto-poky/meta/lib
parent36acd3e888044dea2ac0b2946f15616f968388c9 (diff)
downloadopenbmc-316dfdd917bec6a218f431211d28bf8df6b6fb0f.tar.xz
Yocto 2.5
Move OpenBMC to Yocto 2.5(sumo) Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com> Change-Id: I5c5ad6904a16e14c1c397f0baf10c9d465594a78
Diffstat (limited to 'import-layers/yocto-poky/meta/lib')
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py61
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/gpg_sign.py8
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/manifest.py4
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/package.py3
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/package_manager.py424
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/patch.py20
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/path.py22
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/recipeutils.py107
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/rootfs.py68
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/sdk.py31
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/sstatesig.py44
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/utils.py62
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/buildperf/base.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/buildperf/test_basic.py4
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/core/loader.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/core/target/qemu.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/apt.py47
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/gi.py15
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/kernelmodule.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/opkg.py47
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/ptest.py13
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/stap.py33
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/runtime/files/hello.stp1
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/archiver.py13
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/bbtests.py29
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/buildoptions.py14
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/devtool.py25
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/distrodata.py67
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/efibootpartition.py45
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/gotoolchain.py67
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/imagefeatures.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/meta_ide.py49
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runqemu.py72
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runtime_test.py119
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/signing.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/sstatetests.py42
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/wic.py9
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/targetcontrol.py4
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/utils/commands.py4
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/utils/package_manager.py3
-rw-r--r--import-layers/yocto-poky/meta/lib/oeqa/utils/qemurunner.py24
42 files changed, 1204 insertions, 410 deletions
diff --git a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
index 3e86a46a3..b0365abce 100644
--- a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
+++ b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
@@ -36,10 +36,29 @@ related_fields = {}
related_fields['RDEPENDS'] = ['DEPENDS']
related_fields['RRECOMMENDS'] = ['DEPENDS']
related_fields['FILELIST'] = ['FILES']
-related_fields['PKGSIZE'] = ['FILELIST']
related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+}
+
+def init_colours(use_colours):
+ global colours
+ if use_colours:
+ colours = {
+ 'colour_default': '\033[0m',
+ 'colour_add': '\033[1;32m',
+ 'colour_remove': '\033[1;31m',
+ }
+ else:
+ colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+ }
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
@@ -79,7 +98,17 @@ class ChangeRecord:
for name in adirs - bdirs]
files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
for name in bdirs - adirs]
- renamed_dirs = [(dir1, dir2) for dir1, files1 in files_ab for dir2, files2 in files_ba if files1 == files2]
+ renamed_dirs = []
+ for dir1, files1 in files_ab:
+ rename = False
+ for dir2, files2 in files_ba:
+ if files1 == files2 and not rename:
+ renamed_dirs.append((dir1,dir2))
+ # Make sure that we don't use this (dir, files) pair again.
+ files_ba.remove((dir2,files2))
+ # If a dir has already been found to have a rename, stop and go no further.
+ rename = True
+
# remove files that belong to renamed dirs from aitems and bitems
for dir1, dir2 in renamed_dirs:
aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
@@ -88,6 +117,7 @@ class ChangeRecord:
if self.fieldname in list_fields or self.fieldname in list_order_fields:
renamed_dirs = []
+ changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
@@ -101,22 +131,33 @@ class ChangeRecord:
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
+ if not removed and not added:
+ depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+ depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+ for i, j in zip(depvera.items(), depverb.items()):
+ if i[0] != j[0]:
+ changed_order = True
+ break
+
lines = []
if renamed_dirs:
for dfrom, dto in renamed_dirs:
- lines.append('directory renamed %s -> %s' % (dfrom, dto))
+ lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
- lines.append('removed all items "%s"' % ' '.join(removed))
+ lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
if removed:
- lines.append('removed "%s"' % ' '.join(removed))
+ lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
if added:
- lines.append('added "%s"' % ' '.join(added))
+ lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
else:
lines.append('changed order')
- out = '%s: %s' % (self.fieldname, ', '.join(lines))
+ if not (removed or added or changed_order):
+ out = ''
+ else:
+ out = '%s: %s' % (self.fieldname, ', '.join(lines))
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
@@ -125,9 +166,9 @@ class ChangeRecord:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
- out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
- out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
@@ -163,7 +204,7 @@ class ChangeRecord:
else:
out = ''
else:
- out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
+ out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.related:
for chg in self.related:
diff --git a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
index ac2fae1ed..4b94806c7 100644
--- a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
+++ b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
@@ -95,7 +95,7 @@ class BuildSystem(object):
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
- if os.path.exists(layerdestpath):
+ if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
diff --git a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
index 9cc88f020..b17272928 100644
--- a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
+++ b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
@@ -12,6 +12,7 @@ class LocalSigner(object):
self.gpg_path = d.getVar('GPG_PATH')
self.gpg_version = self.get_gpg_version()
self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+ self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
@@ -31,7 +32,7 @@ class LocalSigner(object):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
- gpg_args = '--no-permission-warning --batch --passphrase=%s' % passphrase
+ gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
if self.gpg_version > (2,1,):
gpg_args += ' --pinentry-mode=loopback'
cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
@@ -71,6 +72,9 @@ class LocalSigner(object):
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
+ if self.gpg_agent_bin:
+ cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
+
cmd += [input_file]
try:
@@ -99,7 +103,7 @@ class LocalSigner(object):
import subprocess
try:
ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8")
- return tuple([int(i) for i in ver_str.split('.')])
+ return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
diff --git a/import-layers/yocto-poky/meta/lib/oe/manifest.py b/import-layers/yocto-poky/meta/lib/oe/manifest.py
index 60c49be0e..674303c86 100644
--- a/import-layers/yocto-poky/meta/lib/oe/manifest.py
+++ b/import-layers/yocto-poky/meta/lib/oe/manifest.py
@@ -274,8 +274,8 @@ class OpkgManifest(Manifest):
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
diff --git a/import-layers/yocto-poky/meta/lib/oe/package.py b/import-layers/yocto-poky/meta/lib/oe/package.py
index 1e5c3aa8e..4f3e21ad4 100644
--- a/import-layers/yocto-poky/meta/lib/oe/package.py
+++ b/import-layers/yocto-poky/meta/lib/oe/package.py
@@ -72,8 +72,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=
# 16 - kernel module
def is_elf(path):
exec_type = 0
- ret, result = oe.utils.getstatusoutput(
- "file \"%s\"" % path.replace("\"", "\\\""))
+ ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
if ret:
bb.error("split_and_strip_files: 'file %s' failed" % path)
diff --git a/import-layers/yocto-poky/meta/lib/oe/package_manager.py b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
index ed8fec850..2d8aeba03 100644
--- a/import-layers/yocto-poky/meta/lib/oe/package_manager.py
+++ b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
@@ -12,6 +12,7 @@ import oe.utils
import oe.path
import string
from oe.gpg_sign import get_signer
+import hashlib
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
@@ -22,12 +23,12 @@ def create_index(arg):
if result:
bb.note(result)
-"""
-This method parse the output from the package managerand return
-a dictionary with the information of the packages. This is used
-when the packages are in deb or ipk format.
-"""
def opkg_query(cmd_output):
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
verregex = re.compile(' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
@@ -83,6 +84,11 @@ def opkg_query(cmd_output):
return output
+# Note: this should be bb.fatal in the future.
+def failed_postinsts_warn(pkgs, log_path):
+ bb.warn("""Intentionally failing postinstall scriptlets of %s to defer them to first boot is deprecated. Please place them into pkg_postinst_ontarget_${PN} ().
+If deferring to first boot wasn't the intent, then scriptlet failure may mean an issue in the recipe, or a regression elsewhere.
+Details of the failure are in %s.""" %(pkgs, log_path))
class Indexer(object, metaclass=ABCMeta):
def __init__(self, d, deploy_dir):
@@ -96,13 +102,16 @@ class Indexer(object, metaclass=ABCMeta):
class RpmIndexer(Indexer):
def write_index(self):
+ self.do_write_index(self.deploy_dir)
+
+ def do_write_index(self, deploy_dir):
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
- result = create_index("%s --update -q %s" % (createrepo_c, self.deploy_dir))
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
if result:
bb.fatal(result)
@@ -110,17 +119,28 @@ class RpmIndexer(Indexer):
if signer:
sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (sig_type.upper() != "BIN")
- signer.detach_sign(os.path.join(self.deploy_dir, 'repodata', 'repomd.xml'),
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
- "MULTILIB_ARCHS"]
+ ]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
@@ -307,39 +327,103 @@ class PackageManager(object, metaclass=ABCMeta):
This is an abstract class. Do not instantiate this directly.
"""
- def __init__(self, d):
+ def __init__(self, d, target_rootfs):
self.d = d
+ self.target_rootfs = target_rootfs
self.deploy_dir = None
self.deploy_lock = None
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
+ if not postinst_intercepts_dir:
+ postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
+ "intercept_scripts-%s" %(hashlib.sha256(self.target_rootfs.encode()).hexdigest()) )
+
+ bb.utils.remove(self.intercepts_dir, True)
+ shutil.copytree(postinst_intercepts_dir, self.intercepts_dir)
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match("^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ if script == "delay_to_first_boot":
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ self._postpone_to_first_boot(script_full)
- """
- Update the package manager package database.
- """
@abstractmethod
def update(self):
+ """
+ Update the package manager package database.
+ """
pass
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
@abstractmethod
def install(self, pkgs, attempt_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
pass
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, the any dependencies are left in place.
- """
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
pass
- """
- This function creates the index files
- """
@abstractmethod
def write_index(self):
+ """
+ This function creates the index files
+ """
pass
@abstractmethod
@@ -350,30 +434,28 @@ class PackageManager(object, metaclass=ABCMeta):
def list_installed(self):
pass
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- """
@abstractmethod
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
pass
- """
- Add remote package feeds into repository manager configuration. The parameters
- for the feeds are set by feed_uris, feed_base_paths and feed_archs.
- See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
- for their description.
- """
@abstractmethod
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
pass
- """
- Install all packages that match a glob.
- """
def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
# TODO don't have sdk here but have a property on the superclass
# (and respect in install_complementary)
if sdk:
@@ -393,14 +475,14 @@ class PackageManager(object, metaclass=ABCMeta):
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+ these packages, if they don't exist then no error will occur. Note: every
+ backend needs to call this function explicitly after the normal package
+ installation
+ """
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
split_linguas = set()
@@ -457,13 +539,13 @@ class PackageManager(object, metaclass=ABCMeta):
self.deploy_lock = None
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
def _append(arr1, arr2, sep='/'):
res = []
narr1 = [a.rstrip(sep) for a in arr1]
@@ -477,18 +559,98 @@ class PackageManager(object, metaclass=ABCMeta):
return res
return _append(uris, base_paths)
+def create_packages_dir(d, rpm_repo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ rpm_subrepo_dir = oe.path.join(rpm_repo_dir, "rpm")
+
+ bb.utils.remove(rpm_subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(rpm_subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ oe.path.symlink(deploydir, rpm_subrepo_dir, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ rpmdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> rpms)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> rpms)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ rpmdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in rpmdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ dest = l.replace(deploydir, "")
+ dest = rpm_subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
- providename=None,
arch_var=None,
os_var=None,
- rpm_repo_workdir="oe-rootfs-repo"):
- super(RpmPM, self).__init__(d)
- self.target_rootfs = target_rootfs
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
self.target_vendor = target_vendor
self.task_name = task_name
if arch_var == None:
@@ -501,8 +663,7 @@ class RpmPM(PackageManager):
self.primary_arch = self.d.getVar('MACHINE_ARCH')
self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
- bb.utils.mkdirhier(self.rpm_repo_dir)
- oe.path.symlink(self.d.getVar('DEPLOY_DIR_RPM'), oe.path.join(self.rpm_repo_dir, "rpm"), True)
+ create_packages_dir(self.d, self.rpm_repo_dir, d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
@@ -577,7 +738,7 @@ class RpmPM(PackageManager):
gpg_opts += 'repo_gpgcheck=1\n'
gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
- if self.d.getVar('RPM_SIGN_PACKAGES') == '0':
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
gpg_opts += 'gpgcheck=0\n'
bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
@@ -602,8 +763,7 @@ class RpmPM(PackageManager):
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = oe.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
@@ -628,6 +788,8 @@ class RpmPM(PackageManager):
if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
failed_scriptlets_pkgnames[line.split()[-1]] = True
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_warn(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
for pkg in failed_scriptlets_pkgnames.keys():
self.save_rpmpostinst(pkg)
@@ -730,6 +892,7 @@ class RpmPM(PackageManager):
"--setopt=logdir=%s" % (self.d.getVar('T'))
]
cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
try:
output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
if print_output:
@@ -782,6 +945,14 @@ class RpmPM(PackageManager):
open(saved_script_name, 'w').write(output)
os.chmod(saved_script_name, 0o755)
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
+
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
+
def extract(self, pkg):
output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
pkg_name = output.splitlines()[-1]
@@ -819,18 +990,18 @@ class RpmPM(PackageManager):
class OpkgDpkgPM(PackageManager):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- def __init__(self, d):
- super(OpkgDpkgPM, self).__init__(d)
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
@@ -839,14 +1010,14 @@ class OpkgDpkgPM(PackageManager):
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
return opkg_query(output)
- """
- Returns the path to a tmpdir where resides the contents of a package.
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
- Deleting the tmpdir is responsability of the caller.
+ Deleting the tmpdir is responsability of the caller.
- This method extracts the common parts for Opkg and Dpkg
- """
- def extract(self, pkg, pkg_info):
+ This method extracts the common parts for Opkg and Dpkg
+ """
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
@@ -885,12 +1056,13 @@ class OpkgDpkgPM(PackageManager):
return tmp_dir
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
class OpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
- super(OpkgPM, self).__init__(d)
+ super(OpkgPM, self).__init__(d, target_rootfs)
- self.target_rootfs = target_rootfs
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
@@ -921,12 +1093,12 @@ class OpkgPM(OpkgDpkgPM):
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
@@ -1079,8 +1251,7 @@ class OpkgPM(OpkgDpkgPM):
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
try:
@@ -1088,6 +1259,13 @@ class OpkgPM(OpkgDpkgPM):
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_warn(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
except subprocess.CalledProcessError as e:
(bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
@@ -1170,10 +1348,10 @@ class OpkgPM(OpkgDpkgPM):
# is separated from the following entry
status.write("\n")
- '''
- The following function dummy installs pkgs and returns the log of output.
- '''
def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
if len(pkgs) == 0:
return
@@ -1228,10 +1406,10 @@ class OpkgPM(OpkgDpkgPM):
self.opkg_dir,
symlinks=True)
- """
- Returns a dictionary with the package info.
- """
def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
@@ -1242,12 +1420,12 @@ class OpkgPM(OpkgDpkgPM):
return pkg_info
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
@@ -1260,8 +1438,7 @@ class OpkgPM(OpkgDpkgPM):
class DpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
- super(DpkgPM, self).__init__(d)
- self.target_rootfs = target_rootfs
+ super(DpkgPM, self).__init__(d, target_rootfs)
self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
@@ -1281,12 +1458,12 @@ class DpkgPM(OpkgDpkgPM):
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
@@ -1309,11 +1486,11 @@ class DpkgPM(OpkgDpkgPM):
os.rename(status_file + ".tmp", status_file)
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
control_scripts = [
@@ -1335,8 +1512,7 @@ class DpkgPM(OpkgDpkgPM):
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
failed_pkgs = []
@@ -1351,9 +1527,10 @@ class DpkgPM(OpkgDpkgPM):
stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
- bb.note("%s for package %s failed with %d:\n%s" %
+ bb.warn("%s for package %s failed with %d:\n%s" %
(control_script.name, pkg_name, e.returncode,
e.output.decode("utf-8")))
+ failed_postinsts_warn([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
failed_pkgs.append(pkg_name)
break
@@ -1558,10 +1735,10 @@ class DpkgPM(OpkgDpkgPM):
def list_installed(self):
return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
- """
- Returns a dictionary with the package info.
- """
def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
@@ -1572,12 +1749,12 @@ class DpkgPM(OpkgDpkgPM):
return pkg_info
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
@@ -1592,7 +1769,7 @@ def generate_index_files(d):
classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
indexer_map = {
- "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
"ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
"deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
}
@@ -1608,12 +1785,3 @@ def generate_index_files(d):
if result is not None:
bb.fatal(result)
-
-if __name__ == "__main__":
- """
- We should be able to run this as a standalone script, from outside bitbake
- environment.
- """
- """
- TBD
- """
diff --git a/import-layers/yocto-poky/meta/lib/oe/patch.py b/import-layers/yocto-poky/meta/lib/oe/patch.py
index 584bf6c05..af7aa5235 100644
--- a/import-layers/yocto-poky/meta/lib/oe/patch.py
+++ b/import-layers/yocto-poky/meta/lib/oe/patch.py
@@ -36,6 +36,22 @@ def runcmd(args, dir = None):
(exitstatus, output) = oe.utils.getstatusoutput(cmd)
if exitstatus != 0:
raise CmdError(cmd, exitstatus >> 8, output)
+ if " fuzz " in output:
+ bb.warn("""
+Some of the context lines in patches were ignored. This can lead to incorrectly applied patches.
+The context lines in the patches can be updated with devtool:
+
+ devtool modify <recipe>
+ devtool finish --force-patch-refresh <recipe> <layer_path>
+
+Then the updated patches and the source tree (in devtool's workspace)
+should be reviewed to make sure the patches apply in the correct place
+and don't introduce duplicate lines (which can, and does happen
+when some of the context is ignored). Further information:
+http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
+Details:
+{}""".format(output))
return output
finally:
@@ -212,7 +228,7 @@ class PatchTree(PatchSet):
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
- shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+ shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
@@ -432,7 +448,7 @@ class GitApplyTree(PatchTree):
import re
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
if paths:
shellcmd.append('--')
shellcmd.extend(paths)
diff --git a/import-layers/yocto-poky/meta/lib/oe/path.py b/import-layers/yocto-poky/meta/lib/oe/path.py
index 1ea03d5d5..76c58fa76 100644
--- a/import-layers/yocto-poky/meta/lib/oe/path.py
+++ b/import-layers/yocto-poky/meta/lib/oe/path.py
@@ -237,3 +237,25 @@ def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False)
raise
return file
+
+def is_path_parent(possible_parent, *paths):
+ """
+ Return True if a path is the parent of another, False otherwise.
+ Multiple paths to test can be specified in which case all
+ specified test paths must be under the parent in order to
+ return True.
+ """
+ def abs_path_trailing(pth):
+ pth_abs = os.path.abspath(pth)
+ if not pth_abs.endswith(os.sep):
+ pth_abs += os.sep
+ return pth_abs
+
+ possible_parent_abs = abs_path_trailing(possible_parent)
+ if not paths:
+ return False
+ for path in paths:
+ path_abs = abs_path_trailing(path)
+ if not path_abs.startswith(possible_parent_abs):
+ return False
+ return True
diff --git a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
index cab8e4015..aa64553c0 100644
--- a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
@@ -22,7 +22,7 @@ from collections import OrderedDict, defaultdict
# Help us to find places to insert values
recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI\[(.+\.)?md5sum\]', 'SRC_URI\[(.+\.)?sha256sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
@@ -142,6 +142,10 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
else:
newline = ''
+ nowrap_vars_res = []
+ for item in nowrap_vars:
+ nowrap_vars_res.append(re.compile('^%s$' % item))
+
recipe_progression_res = []
recipe_progression_restrs = []
for item in recipe_progression:
@@ -174,7 +178,12 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
return
rawtext = '%s = "%s"%s' % (name, values[name], newline)
addlines = []
- if name in nowrap_vars:
+ nowrap = False
+ for nowrap_re in nowrap_vars_res:
+ if nowrap_re.match(name):
+ nowrap = True
+ break
+ if nowrap:
addlines.append(rawtext)
elif name in list_vars:
splitvalue = split_var_value(values[name], assignment=False)
@@ -242,7 +251,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True):
return changed, tolines
-def patch_recipe_file(fn, values, patch=False, relpath=''):
+def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
"""Update or insert variable values into a recipe file (assuming you
have already identified the exact file you want to update.)
Note that some manual inspection/intervention may be required
@@ -254,7 +263,11 @@ def patch_recipe_file(fn, values, patch=False, relpath=''):
_, tolines = patch_recipe_lines(fromlines, values)
- if patch:
+ if redirect_output:
+ with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
+ f.writelines(tolines)
+ return None
+ elif patch:
relfn = os.path.relpath(fn, relpath)
diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
return diff
@@ -304,7 +317,7 @@ def localise_file_vars(fn, varfiles, varlist):
return filevars
-def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
"""Modify a list of variable values in the specified recipe. Handles inc files if
used by the recipe.
"""
@@ -314,7 +327,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
patches = []
for f,v in locs.items():
vals = {k: varvalues[k] for k in v}
- patchdata = patch_recipe_file(f, vals, patch, relpath)
+ patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
if patch:
patches.append(patchdata)
@@ -395,7 +408,7 @@ def get_recipe_local_files(d, patches=False, archives=False):
# fetcher) though note that this only encompasses actual container formats
# i.e. that can contain multiple files as opposed to those that only
# contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
- archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
+ archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
ret = {}
for uri in uris:
if fetch.ud[uri].type == 'file':
@@ -575,7 +588,7 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
"""
Writes a bbappend file for a recipe
Parameters:
@@ -602,6 +615,9 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
value pairs, or simply a list of the lines.
removevalues:
Variable values to remove - a dict of names/values.
+ redirect_output:
+ If specified, redirects writing the output file to the
+ specified directory (for dry-run purposes)
"""
if not removevalues:
@@ -616,7 +632,8 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
- bb.utils.mkdirhier(appenddir)
+ if not redirect_output:
+ bb.utils.mkdirhier(appenddir)
# FIXME check if the bbappend doesn't get overridden by a higher priority layer?
@@ -693,9 +710,18 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if instfunclines:
bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
- bb.note('Writing append file %s' % appendpath)
+ if redirect_output:
+ bb.note('Writing append file %s (dry-run)' % appendpath)
+ outfile = os.path.join(redirect_output, os.path.basename(appendpath))
+ # Only take a copy if the file isn't already there (this function may be called
+ # multiple times per operation when we're handling overrides)
+ if os.path.exists(appendpath) and not os.path.exists(outfile):
+ shutil.copy2(appendpath, outfile)
+ else:
+ bb.note('Writing append file %s' % appendpath)
+ outfile = appendpath
- if os.path.exists(appendpath):
+ if os.path.exists(outfile):
# Work around lack of nonlocal in python 2
extvars = {'destsubdir': destsubdir}
@@ -767,7 +793,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
if removevalues:
varnames.extend(list(removevalues.keys()))
- with open(appendpath, 'r') as f:
+ with open(outfile, 'r') as f:
(updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
destsubdir = extvars['destsubdir']
@@ -784,20 +810,27 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
updated = True
if updated:
- with open(appendpath, 'w') as f:
+ with open(outfile, 'w') as f:
f.writelines(newlines)
if copyfiles:
if machine:
destsubdir = os.path.join(destsubdir, machine)
+ if redirect_output:
+ outdir = redirect_output
+ else:
+ outdir = appenddir
for newfile, srcfile in copyfiles.items():
- filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+ filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
if newfile.startswith(tempfile.gettempdir()):
newfiledisp = os.path.basename(newfile)
else:
newfiledisp = newfile
- bb.note('Copying %s to %s' % (newfiledisp, filedest))
+ if redirect_output:
+ bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
+ else:
+ bb.note('Copying %s to %s' % (newfiledisp, filedest))
bb.utils.mkdirhier(os.path.dirname(filedest))
shutil.copyfile(newfile, filedest)
@@ -867,25 +900,25 @@ def get_recipe_upstream_version(rd):
FetchError when don't have network access or upstream site don't response.
NoMethodError when uri latest_versionstring method isn't implemented.
- Returns a dictonary with version, type and datetime.
+ Returns a dictonary with version, repository revision, current_version, type and datetime.
Type can be A for Automatic, M for Manual and U for Unknown.
"""
from bb.fetch2 import decodeurl
from datetime import datetime
ru = {}
+ ru['current_version'] = rd.getVar('PV')
ru['version'] = ''
ru['type'] = 'U'
ru['datetime'] = ''
-
- pv = rd.getVar('PV')
+ ru['revision'] = ''
# XXX: If don't have SRC_URI means that don't have upstream sources so
# returns the current recipe version, so that upstream version check
# declares a match.
src_uris = rd.getVar('SRC_URI')
if not src_uris:
- ru['version'] = pv
+ ru['version'] = ru['current_version']
ru['type'] = 'M'
ru['datetime'] = datetime.now()
return ru
@@ -894,6 +927,9 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
+ (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ ru['current_version'] = pv
+
manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
if manual_upstream_version:
# manual tracking of upstream version.
@@ -914,33 +950,22 @@ def get_recipe_upstream_version(rd):
ru['datetime'] = datetime.now()
else:
ud = bb.fetch2.FetchData(src_uri, rd)
- pupver = ud.method.latest_versionstring(ud, rd)
- (upversion, revision) = pupver
-
- # format git version version+gitAUTOINC+HASH
- if uri_type == 'git':
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
-
- # if contains revision but not upversion use current pv
- if upversion == '' and revision:
- upversion = pv
-
- if upversion:
- tmp = upversion
- upversion = ''
-
- if pfx:
- upversion = pfx + tmp
- else:
- upversion = tmp
-
- if sfx:
- upversion = upversion + sfx + revision[:10]
+ if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ revision = ud.method.latest_revision(ud, rd, 'default')
+ upversion = pv
+ if revision != rd.getVar("SRCREV"):
+ upversion = upversion + "-new-commits-available"
+ else:
+ pupver = ud.method.latest_versionstring(ud, rd)
+ (upversion, revision) = pupver
if upversion:
ru['version'] = upversion
ru['type'] = 'A'
+ if revision:
+ ru['revision'] = revision
+
ru['datetime'] = datetime.now()
return ru
diff --git a/import-layers/yocto-poky/meta/lib/oe/rootfs.py b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
index 754ef563a..f8f717c05 100644
--- a/import-layers/yocto-poky/meta/lib/oe/rootfs.py
+++ b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
@@ -92,10 +92,6 @@ class Rootfs(object, metaclass=ABCMeta):
self.d.getVar('PACKAGE_FEED_ARCHS'))
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
-
"""
The _cleanup() method should be used to clean-up stuff that we don't really
want to end up on target. For example, in the case of RPM, the DB locks.
@@ -178,20 +174,10 @@ class Rootfs(object, metaclass=ABCMeta):
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
-
- bb.utils.remove(intercepts_dir, True)
-
bb.utils.mkdirhier(self.image_rootfs)
bb.utils.mkdirhier(self.deploydir)
- shutil.copytree(postinst_intercepts_dir, intercepts_dir)
-
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
@@ -207,7 +193,7 @@ class Rootfs(object, metaclass=ABCMeta):
execute_pre_post_process(self.d, rootfs_post_install_cmds)
- self._run_intercepts()
+ self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
@@ -293,44 +279,6 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the package manager data files
self.pm.remove_packaging_data()
- def _run_intercepts(self):
- intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts")
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.image_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
- if output: bb.note(output.decode("utf-8"))
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed, details in log.do_rootfs" % script)
- bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
-
- with open(script_full) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.warn("The postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
def _run_ldconfig(self):
if self.d.getVar('LDCONFIGDEPEND'):
bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
@@ -523,14 +471,6 @@ class RpmRootfs(Rootfs):
self._log_check_warn()
self._log_check_error()
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.pm.save_rpmpostinst(pkg)
-
def _cleanup(self):
self.pm._invoke_dnf(["clean", "all"])
@@ -711,9 +651,6 @@ class DpkgRootfs(DpkgOpkgRootfs):
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
def _log_check(self):
self._log_check_warn()
self._log_check_error()
@@ -982,9 +919,6 @@ class OpkgRootfs(DpkgOpkgRootfs):
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
- def _handle_intercept_failure(self, registered_pkgs):
- self.pm.mark_packages("unpacked", registered_pkgs.split())
-
def _log_check(self):
self._log_check_warn()
self._log_check_error()
diff --git a/import-layers/yocto-poky/meta/lib/oe/sdk.py b/import-layers/yocto-poky/meta/lib/oe/sdk.py
index 7f71cfba6..d6a503372 100644
--- a/import-layers/yocto-poky/meta/lib/oe/sdk.py
+++ b/import-layers/yocto-poky/meta/lib/oe/sdk.py
@@ -162,40 +162,21 @@ class RpmSdk(Sdk):
self.host_manifest = RpmManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
- target_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig'
- ]
-
rpm_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
rpm_repo_workdir = "oe-sdk-ext-repo"
-
self.target_pm = RpmPM(d,
self.sdk_target_sysroot,
self.d.getVar('TARGET_VENDOR'),
'target',
- target_providename,
rpm_repo_workdir=rpm_repo_workdir
)
- sdk_providename = ['/bin/sh',
- '/bin/bash',
- '/usr/bin/env',
- '/usr/bin/perl',
- 'pkgconfig',
- 'libGL.so()(64bit)',
- 'libGL.so'
- ]
-
self.host_pm = RpmPM(d,
self.sdk_host_sysroot,
self.d.getVar('SDK_VENDOR'),
'host',
- sdk_providename,
"SDK_PACKAGE_ARCHS",
"SDK_OS",
rpm_repo_workdir=rpm_repo_workdir
@@ -228,6 +209,8 @@ class RpmSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ self.target_pm.run_intercepts()
+
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -237,6 +220,8 @@ class RpmSdk(Sdk):
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
+ self.host_pm.run_intercepts()
+
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -312,6 +297,8 @@ class OpkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ self.target_pm.run_intercepts()
+
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -321,6 +308,8 @@ class OpkgSdk(Sdk):
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
+ self.host_pm.run_intercepts()
+
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -397,6 +386,8 @@ class DpkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ self.target_pm.run_intercepts()
+
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
@@ -408,6 +399,8 @@ class DpkgSdk(Sdk):
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
+ self.host_pm.run_intercepts()
+
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
diff --git a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
index 3a8778eae..b82e0f422 100644
--- a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
+++ b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
@@ -1,4 +1,5 @@
import bb.siggen
+import oe
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Return True if we should keep the dependency, False to drop it
@@ -28,15 +29,14 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
return False
return True
- # Quilt (patch application) changing isn't likely to affect anything
- excludelist = ['quilt-native', 'subversion-native', 'git-native', 'ccache-native']
- if depname in excludelist and recipename != depname:
- return False
-
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
+ # Check for special wildcard
+ if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+ return False
+
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
@@ -368,3 +368,37 @@ def sstate_get_manifest_filename(task, d):
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+ d2 = d
+ variant = ''
+ if taskdata2.startswith("virtual:multilib"):
+ variant = taskdata2.split(":")[2]
+ if variant not in multilibcache:
+ multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+ d2 = multilibcache[variant]
+
+ if taskdata.endswith("-native"):
+ pkgarchs = ["${BUILD_ARCH}"]
+ elif taskdata.startswith("nativesdk-"):
+ pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+ elif "-cross-canadian" in taskdata:
+ pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+ elif "-cross-" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+ elif "-crosssdk" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+ if os.path.exists(manifest):
+ return manifest, d2
+ bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ return None, d2
+
+
diff --git a/import-layers/yocto-poky/meta/lib/oe/utils.py b/import-layers/yocto-poky/meta/lib/oe/utils.py
index 643ab78df..80f0442d0 100644
--- a/import-layers/yocto-poky/meta/lib/oe/utils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/utils.py
@@ -86,17 +86,6 @@ def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
-def param_bool(cfg, field, dflt = None):
- """Lookup <field> in <cfg> map and convert it to a boolean; take
- <dflt> when this <field> does not exist"""
- value = cfg.get(field, dflt)
- strvalue = str(value).lower()
- if strvalue in ('yes', 'y', 'true', 't', '1'):
- return True
- elif strvalue in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
-
def build_depends_string(depends, task):
"""Append a taskname to a string of dependencies as used by the [depends] flag"""
return " ".join(dep + ":" + task for dep in depends.split())
@@ -167,6 +156,49 @@ def any_distro_features(d, features, truevalue="1", falsevalue=""):
"""
return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+def parallel_make(d):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return None
+
+def parallel_make_argument(d, fmt, limit=None):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
+
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
@@ -292,6 +324,14 @@ def host_gcc_version(d):
version = match.group(1)
return "-%s" % version if version in ("4.8", "4.9") else ""
+
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ return localdata
+
#
# Python 2.7 doesn't have threaded pools (just multiprocessing)
# so implement a version here
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/buildperf/base.py b/import-layers/yocto-poky/meta/lib/oeqa/buildperf/base.py
index 7b2b4aa2a..ac6ee15d0 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/buildperf/base.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/buildperf/base.py
@@ -282,7 +282,7 @@ class BuildPerfTestCase(unittest.TestCase):
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir)
if self.build_target:
- self.run_cmd(['bitbake', self.build_target, '-c', 'fetchall'])
+ self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
def tearDown(self):
"""Tear-down fixture for each test"""
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/buildperf/test_basic.py b/import-layers/yocto-poky/meta/lib/oeqa/buildperf/test_basic.py
index a19089a6e..6d6b01b04 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/buildperf/test_basic.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/buildperf/test_basic.py
@@ -15,8 +15,7 @@ import shutil
import oe.path
from oeqa.buildperf import BuildPerfTestCase
-from oeqa.utils.commands import get_bb_vars
-
+from oeqa.utils.commands import get_bb_var, get_bb_vars
class Test1P1(BuildPerfTestCase):
build_target = 'core-image-sato'
@@ -30,6 +29,7 @@ class Test1P1(BuildPerfTestCase):
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
'bitbake ' + self.build_target, save_bs=True)
self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
+ self.measure_disk_usage(get_bb_var("IMAGE_ROOTFS", self.build_target), 'rootfs', 'rootfs', True)
class Test1P2(BuildPerfTestCase):
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/core/loader.py b/import-layers/yocto-poky/meta/lib/oeqa/core/loader.py
index 975a081ba..a4744dee0 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/core/loader.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/core/loader.py
@@ -43,7 +43,7 @@ def _built_modules_dict(modules):
for module in modules:
# Assumption: package and module names do not contain upper case
# characters, whereas class names do
- m = re.match(r'^([^A-Z]+)(?:\.([A-Z][^.]*)(?:\.([^.]+))?)?$', module)
+ m = re.match(r'^(\w+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII)
module_name, class_name, test_name = m.groups()
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/core/target/qemu.py b/import-layers/yocto-poky/meta/lib/oeqa/core/target/qemu.py
index d359bf9fe..bf3b633f0 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/core/target/qemu.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/core/target/qemu.py
@@ -9,7 +9,7 @@ import time
from .ssh import OESSHTarget
from oeqa.utils.qemurunner import QemuRunner
-supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic', 'elf']
+supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
class OEQemuTarget(OESSHTarget):
def __init__(self, logger, ip, server_ip, timeout=300, user='root',
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/apt.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/apt.py
new file mode 100644
index 000000000..8d4dd35c5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/apt.py
@@ -0,0 +1,47 @@
+import os
+from oeqa.utils.httpserver import HTTPService
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class AptTest(OERuntimeTestCase):
+
+ def pkg(self, command, expected = 0):
+ command = 'apt-get %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class AptRepoTest(AptTest):
+
+ @classmethod
+ def setUpClass(cls):
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_DEB'], 'all')
+ cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def setup_source_config_for_package_install(self):
+ apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
+ apt_get_sourceslist_dir = '/etc/apt/'
+ self.target.run('cd %s; echo deb %s ./ > sources.list' % (apt_get_sourceslist_dir, apt_get_source_server))
+
+ def cleanup_source_config_for_package_install(self):
+ apt_get_sourceslist_dir = '/etc/apt/'
+ self.target.run('cd %s; rm sources.list' % (apt_get_sourceslist_dir))
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'deb',
+ 'DEB is not the primary package manager')
+ @OEHasPackage(['apt'])
+ def test_apt_install_from_repo(self):
+ self.setup_source_config_for_package_install()
+ self.pkg('update')
+ self.pkg('remove --yes run-postinsts-dev')
+ self.pkg('install --yes --allow-unauthenticated run-postinsts-dev')
+ self.cleanup_source_config_for_package_install()
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/gi.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/gi.py
new file mode 100644
index 000000000..19073e52c
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/gi.py
@@ -0,0 +1,15 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class GObjectIntrospectionTest(OERuntimeTestCase):
+
+ @OETestDepends(["ssh.SSHTest.test_ssh"])
+ @OEHasPackage(["python3-pygobject"])
+ def test_python(self):
+ script = """from gi.repository import GObject; print(GObject.markup_escape_text("<testing&testing>"))"""
+ status, output = self.target.run("python3 -c '%s'" % script)
+ self.assertEqual(status, 0, msg="Python failed (%s)" % (output))
+ self.assertEqual(output, "&lt;testing&amp;testing&gt;", msg="Unexpected output (%s)" % output)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/kernelmodule.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/kernelmodule.py
index 11ad7b7f0..de1a5aa44 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/kernelmodule.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/kernelmodule.py
@@ -28,7 +28,7 @@ class KernelModuleTest(OERuntimeTestCase):
@OETestDepends(['gcc.GccCompileTest.test_gcc_compile'])
def test_kernel_module(self):
cmds = [
- 'cd /usr/src/kernel && make scripts',
+ 'cd /usr/src/kernel && make scripts prepare',
'cd /tmp && make',
'cd /tmp && insmod hellomod.ko',
'lsmod | grep hellomod',
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/opkg.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/opkg.py
new file mode 100644
index 000000000..671ee06c7
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/opkg.py
@@ -0,0 +1,47 @@
+import os
+from oeqa.utils.httpserver import HTTPService
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class OpkgTest(OERuntimeTestCase):
+
+ def pkg(self, command, expected = 0):
+ command = 'opkg %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class OpkgRepoTest(OpkgTest):
+
+ @classmethod
+ def setUpClass(cls):
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_IPK'], 'all')
+ cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def setup_source_config_for_package_install(self):
+ apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
+ apt_get_sourceslist_dir = '/etc/opkg/'
+ self.target.run('cd %s; echo src/gz all %s >> opkg.conf' % (apt_get_sourceslist_dir, apt_get_source_server))
+
+ def cleanup_source_config_for_package_install(self):
+ apt_get_sourceslist_dir = '/etc/opkg/'
+ self.target.run('cd %s; sed -i "/^src/d" opkg.conf' % (apt_get_sourceslist_dir))
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'ipk',
+ 'IPK is not the primary package manager')
+ @OEHasPackage(['opkg'])
+ def test_opkg_install_from_repo(self):
+ self.setup_source_config_for_package_install()
+ self.pkg('update')
+ self.pkg('remove run-postinsts-dev')
+ self.pkg('install run-postinsts-dev')
+ self.cleanup_source_config_for_package_install()
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/ptest.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/ptest.py
index ec8c038a5..f60a433d5 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/ptest.py
@@ -48,9 +48,12 @@ class PtestRunnerTest(OERuntimeTestCase):
@OETestID(1600)
@skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
- @skipIfNotFeature('ptest-pkgs', 'Test requires ptest-pkgs to be in IMAGE_FEATURES')
@OETestDepends(['ssh.SSHTest.test_ssh'])
def test_ptestrunner(self):
+ status, output = self.target.run('which ptest-runner', 0)
+ if status != 0:
+ self.skipTest("No -ptest packages are installed in the image")
+
import datetime
test_log_dir = self.td.get('TEST_LOG_DIR', '')
@@ -80,3 +83,11 @@ class PtestRunnerTest(OERuntimeTestCase):
# Remove the old link to create a new one
os.remove(ptest_log_dir_link)
os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
+
+ failed_tests = {}
+ for section in parse_result.result_dict:
+ failed_testcases = [ test for test, result in parse_result.result_dict[section] if result == 'fail' ]
+ if failed_testcases:
+ failed_tests[section] = failed_testcases
+
+ self.assertFalse(failed_tests, msg = "Failed ptests: %s" %(str(failed_tests)))
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/stap.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/stap.py
new file mode 100644
index 000000000..fc728bfc5
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/cases/stap.py
@@ -0,0 +1,33 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class StapTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ src = os.path.join(cls.tc.runtime_files_dir, 'hello.stp')
+ dst = '/tmp/hello.stp'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ files = '/tmp/hello.stp'
+ cls.tc.target.run('rm %s' % files)
+
+ @OETestID(1652)
+ @skipIfNotFeature('tools-profile',
+ 'Test requires tools-profile to be in IMAGE_FEATURES')
+ @OETestDepends(['kernelmodule.KernelModuleTest.test_kernel_module'])
+ def test_stap(self):
+ cmds = [
+ 'cd /usr/src/kernel && make scripts prepare',
+ 'cd /lib/modules/`uname -r` && (if [ ! -L build ]; then ln -s /usr/src/kernel build; fi)',
+ 'stap --disable-cache -DSTP_NO_VERREL_CHECK /tmp/hello.stp'
+ ]
+ for cmd in cmds:
+ status, output = self.target.run(cmd, 900)
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/files/hello.stp b/import-layers/yocto-poky/meta/lib/oeqa/runtime/files/hello.stp
new file mode 100644
index 000000000..367714716
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/files/hello.stp
@@ -0,0 +1 @@
+probe oneshot { println("hello world") }
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/archiver.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/archiver.py
index f61a52201..0a6d4e325 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/archiver.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/archiver.py
@@ -116,3 +116,16 @@ class Archiver(OESelftestTestCase):
excluded_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[1]))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % target_recipes[1])
+
+
+
+ def test_archiver_srpm_mode(self):
+ """
+ Test that in srpm mode, the added recipe dependencies at least exist/work [YOCTO #11121]
+ """
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[srpm] = "1"\n'
+ self.write_config(features)
+
+ bitbake('-n core-image-sato')
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/bbtests.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/bbtests.py
index 4c8204903..350614967 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -64,15 +64,14 @@ class BitbakeTests(OESelftestTestCase):
@OETestID(108)
def test_invalid_patch(self):
- # This patch already exists in SRC_URI so adding it again will cause the
- # patch to fail.
- self.write_recipeinc('man', 'SRC_URI += "file://man-1.5h1-make.patch"')
+ # This patch should fail to apply.
+ self.write_recipeinc('man-db', 'FILESEXTRAPATHS_prepend := "${THISDIR}/files:"\nSRC_URI += "file://0001-Test-patch-here.patch"')
self.write_config("INHERIT_remove = \"report-error\"")
- result = bitbake('man -c patch', ignore_status=True)
- self.delete_recipeinc('man')
- bitbake('-cclean man')
+ result = bitbake('man-db -c patch', ignore_status=True)
+ self.delete_recipeinc('man-db')
+ bitbake('-cclean man-db')
line = self.getline(result, "Function failed: patch_do_patch")
- self.assertTrue(line and line.startswith("ERROR:"), msg = "Repeated patch application didn't fail. bitbake output: %s" % result.output)
+ self.assertTrue(line and line.startswith("ERROR:"), msg = "Incorrectly formed patch application didn't fail. bitbake output: %s" % result.output)
@OETestID(1354)
def test_force_task_1(self):
@@ -132,17 +131,17 @@ class BitbakeTests(OESelftestTestCase):
@OETestID(168)
def test_invalid_recipe_src_uri(self):
data = 'SRC_URI = "file://invalid"'
- self.write_recipeinc('man', data)
+ self.write_recipeinc('man-db', data)
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
INHERIT_remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- bitbake('-ccleanall man')
- result = bitbake('-c fetch man', ignore_status=True)
- bitbake('-ccleanall man')
- self.delete_recipeinc('man')
+ bitbake('-ccleanall man-db')
+ result = bitbake('-c fetch man-db', ignore_status=True)
+ bitbake('-ccleanall man-db')
+ self.delete_recipeinc('man-db')
self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output, msg = "\"invalid\" file \
doesn't exist, yet no error message encountered. bitbake output: %s" % result.output)
@@ -222,9 +221,9 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
INHERIT_remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- self.write_recipeinc('man',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
- runCmd('bitbake -c cleanall man xcursor-transparent-theme')
- result = runCmd('bitbake -c unpack -k man xcursor-transparent-theme', ignore_status=True)
+ self.write_recipeinc('man-db',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
+ runCmd('bitbake -c cleanall man-db xcursor-transparent-theme')
+ result = runCmd('bitbake -c unpack -k man-db xcursor-transparent-theme', ignore_status=True)
errorpos = result.output.find('ERROR: Function failed: do_fail_task')
manver = re.search("NOTE: recipe xcursor-transparent-theme-(.*?): task do_unpack: Started", result.output)
continuepos = result.output.find('NOTE: recipe xcursor-transparent-theme-%s: task do_unpack: Started' % manver.group(1))
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/buildoptions.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/buildoptions.py
index cf221c33a..e60e32dad 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/buildoptions.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/buildoptions.py
@@ -164,3 +164,17 @@ class ArchiverTest(OESelftestTestCase):
src_file_glob = str(pkgs_path[0]) + "/xcursor*.src.rpm"
tar_file_glob = str(pkgs_path[0]) + "/xcursor*.tar.gz"
self.assertTrue((g.glob(src_file_glob) and g.glob(tar_file_glob)), "Couldn't find .src.rpm and .tar.gz files under %s/allarch*/xcursor*" % deploy_dir_src)
+
+class ToolchainOptions(OESelftestTestCase):
+
+ def test_toolchain_fortran(self):
+ """
+ Test whether we can enable and build fortran and its supporting libraries
+ """
+
+ features = 'FORTRAN_forcevariable = ",fortran"\n'
+ features += 'RUNTIMETARGET_append_pn-gcc-runtime = " libquadmath"\n'
+ self.write_config(features)
+
+ bitbake('gcc-runtime libgfortran')
+
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/devtool.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/devtool.py
index 43280cdc0..d5b6a46d4 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/devtool.py
@@ -174,7 +174,7 @@ class DevtoolTests(DevtoolBase):
def test_create_workspace(self):
# Check preconditions
result = runCmd('bitbake-layers show-layers')
- self.assertTrue('/workspace' not in result.output, 'This test cannot be run with a workspace layer in bblayers.conf')
+ self.assertTrue('\nworkspace' not in result.output, 'This test cannot be run with a workspace layer in bblayers.conf')
# Try creating a workspace layer with a specific path
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -611,7 +611,7 @@ class DevtoolTests(DevtoolBase):
@OETestID(1165)
def test_devtool_modify_git(self):
# Check preconditions
- testrecipe = 'mkelfimage'
+ testrecipe = 'psplash'
src_uri = get_bb_var('SRC_URI', testrecipe)
self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
# Clean up anything in the workdir/sysroot/sstate cache
@@ -623,9 +623,9 @@ class DevtoolTests(DevtoolBase):
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
- self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. devtool output: %s' % result.output)
- matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'mkelfimage_*.bbappend'))
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'psplash_*.bbappend'))
self.assertTrue(matches, 'bbappend not created')
# Test devtool status
result = runCmd('devtool status')
@@ -899,6 +899,7 @@ class DevtoolTests(DevtoolBase):
f.write('BBFILE_PATTERN_oeselftesttemplayer = "^${LAYERDIR}/"\n')
f.write('BBFILE_PRIORITY_oeselftesttemplayer = "999"\n')
f.write('BBFILE_PATTERN_IGNORE_EMPTY_oeselftesttemplayer = "1"\n')
+ f.write('LAYERSERIES_COMPAT_oeselftesttemplayer = "${LAYERSERIES_COMPAT_core}"\n')
self.add_command_to_tearDown('bitbake-layers remove-layer %s || true' % templayerdir)
result = runCmd('bitbake-layers add-layer %s' % templayerdir, cwd=self.builddir)
# Create the bbappend
@@ -987,8 +988,12 @@ class DevtoolTests(DevtoolBase):
@OETestID(1371)
def test_devtool_update_recipe_local_files_2(self):
"""Check local source files support when oe-local-files is in Git"""
- testrecipe = 'lzo'
+ testrecipe = 'devtool-test-local'
recipefile = get_bb_var('FILE', testrecipe)
+ recipedir = os.path.dirname(recipefile)
+ result = runCmd('git status --porcelain .', cwd=recipedir)
+ if result.output.strip():
+ self.fail('Recipe directory for %s contains uncommitted changes' % testrecipe)
# Setup srctree for modifying the recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -1002,9 +1007,9 @@ class DevtoolTests(DevtoolBase):
runCmd('git add oe-local-files', cwd=tempdir)
runCmd('git commit -m "Add local sources"', cwd=tempdir)
# Edit / commit local sources
- runCmd('echo "# Foobar" >> oe-local-files/acinclude.m4', cwd=tempdir)
+ runCmd('echo "# Foobar" >> oe-local-files/file1', cwd=tempdir)
runCmd('git commit -am "Edit existing file"', cwd=tempdir)
- runCmd('git rm oe-local-files/run-ptest', cwd=tempdir)
+ runCmd('git rm oe-local-files/file2', cwd=tempdir)
runCmd('git commit -m"Remove file"', cwd=tempdir)
runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
runCmd('git add oe-local-files/new-local', cwd=tempdir)
@@ -1016,11 +1021,11 @@ class DevtoolTests(DevtoolBase):
os.path.dirname(recipefile))
# Checkout unmodified file to working copy -> devtool should still pick
# the modified version from HEAD
- runCmd('git checkout HEAD^ -- oe-local-files/acinclude.m4', cwd=tempdir)
+ runCmd('git checkout HEAD^ -- oe-local-files/file1', cwd=tempdir)
runCmd('devtool update-recipe %s' % testrecipe)
expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
- (' M', '.*/acinclude.m4$'),
- (' D', '.*/run-ptest$'),
+ (' M', '.*/file1$'),
+ (' D', '.*/file2$'),
('??', '.*/new-local$'),
('??', '.*/0001-Add-new-file.patch$')]
self._check_repo_status(os.path.dirname(recipefile), expected_status)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/distrodata.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/distrodata.py
index 12540adc7..7b2800464 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/distrodata.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/distrodata.py
@@ -9,6 +9,11 @@ class Distrodata(OESelftestTestCase):
@classmethod
def setUpClass(cls):
super(Distrodata, cls).setUpClass()
+ feature = 'INHERIT += "distrodata"\n'
+ feature += 'LICENSE_FLAGS_WHITELIST += " commercial"\n'
+
+ cls.write_config(cls, feature)
+ bitbake('-c checkpkg world')
@OETestID(1902)
def test_checkpkg(self):
@@ -18,11 +23,6 @@ class Distrodata(OESelftestTestCase):
Product: oe-core
Author: Alexander Kanavin <alexander.kanavin@intel.com>
"""
- feature = 'INHERIT += "distrodata"\n'
- feature += 'LICENSE_FLAGS_WHITELIST += " commercial"\n'
-
- self.write_config(feature)
- bitbake('-c checkpkg world')
checkpkg_result = open(os.path.join(get_bb_var("LOG_DIR"), "checkpkg.csv")).readlines()[1:]
regressed_failures = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] if pkg_data[11] == 'UNKNOWN_BROKEN']
regressed_successes = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] if pkg_data[11] == 'KNOWN_BROKEN']
@@ -40,3 +40,60 @@ The following packages have been checked successfully for upstream versions,
but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please remove that line from the recipes.
""" + "\n".join(regressed_successes)
self.assertTrue(len(regressed_failures) == 0 and len(regressed_successes) == 0, msg)
+
+ def test_maintainers(self):
+ """
+ Summary: Test that oe-core recipes have a maintainer
+ Expected: All oe-core recipes (except a few special static/testing ones) should have a maintainer listed in maintainers.inc file.
+ Product: oe-core
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ """
+ def is_exception(pkg):
+ exceptions = ["packagegroup-", "initramfs-", "systemd-machine-units", "target-sdk-provides-dummy"]
+ for i in exceptions:
+ if i in pkg:
+ return True
+ return False
+
+ def is_in_oe_core(recipe, recipes):
+ self.assertTrue(recipe in recipes.keys(), "Recipe %s was not in 'bitbake-layers show-recipes' output" %(recipe))
+ self.assertTrue(len(recipes[recipe]) > 0, "'bitbake-layers show-recipes' could not determine what layer(s) a recipe %s is in" %(recipe))
+ try:
+ recipes[recipe].index('meta')
+ return True
+ except ValueError:
+ return False
+
+ def get_recipe_layers():
+ import re
+
+ recipes = {}
+ recipe_regex = re.compile('^(?P<name>.*):$')
+ layer_regex = re.compile('^ (?P<name>\S*) +')
+ output = runCmd('bitbake-layers show-recipes').output
+ for line in output.split('\n'):
+ recipe_name_obj = recipe_regex.search(line)
+ if recipe_name_obj:
+ recipe_name = recipe_name_obj.group('name')
+ recipes[recipe_name] = []
+ recipe_layer_obj = layer_regex.search(line)
+ if recipe_layer_obj:
+ layer_name = recipe_layer_obj.group('name')
+ recipes[recipe_name].append(layer_name)
+ return recipes
+
+ checkpkg_result = open(os.path.join(get_bb_var("LOG_DIR"), "checkpkg.csv")).readlines()[1:]
+ recipes_layers = get_recipe_layers()
+ no_maintainer_list = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] \
+ if pkg_data[14] == '' and is_in_oe_core(pkg_data[0], recipes_layers) and not is_exception(pkg_data[0])]
+ msg = """
+The following packages do not have a maintainer assigned to them. Please add an entry to meta/conf/distro/include/maintainers.inc file.
+""" + "\n".join(no_maintainer_list)
+ self.assertTrue(len(no_maintainer_list) == 0, msg)
+
+ with_maintainer_list = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] \
+ if pkg_data[14] != '' and is_in_oe_core(pkg_data[0], recipes_layers) and not is_exception(pkg_data[0])]
+ msg = """
+The list of oe-core packages with maintainers is empty. This may indicate that the test has regressed and needs fixing.
+"""
+ self.assertTrue(len(with_maintainer_list) > 0, msg)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/efibootpartition.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
new file mode 100644
index 000000000..0c8325669
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
@@ -0,0 +1,45 @@
+# Based on runqemu.py test file
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+
+import re
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, runqemu, get_bb_var
+
+class GenericEFITest(OESelftestTestCase):
+ """EFI booting test class"""
+
+ buffer = True
+ cmd_common = "runqemu nographic serial wic ovmf"
+ efi_provider = "systemd-boot"
+ image = "core-image-minimal"
+ machine = "qemux86-64"
+ recipes_built = False
+
+ @classmethod
+ def setUpLocal(self):
+ super(GenericEFITest, self).setUpLocal(self)
+
+ self.write_config(self,
+"""
+EFI_PROVIDER = "%s"
+IMAGE_FSTYPES_pn-%s_append = " wic"
+MACHINE = "%s"
+MACHINE_FEATURES_append = " efi"
+WKS_FILE = "efi-bootdisk.wks.in"
+IMAGE_INSTALL_append = " grub-efi systemd-boot kernel-image-bzimage"
+"""
+% (self.efi_provider, self.image, self.machine))
+ if not self.recipes_built:
+ bitbake("ovmf")
+ bitbake(self.image)
+ self.recipes_built = True
+
+ @classmethod
+ def test_boot_efi(self):
+ """Test generic boot partition with qemu"""
+ cmd = "%s %s" % (self.cmd_common, self.machine)
+ with runqemu(self.image, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/gotoolchain.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/gotoolchain.py
new file mode 100644
index 000000000..1e23257f4
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/gotoolchain.py
@@ -0,0 +1,67 @@
+import glob
+import os
+import shutil
+import tempfile
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
+
+
+class oeGoToolchainSelfTest(OESelftestTestCase):
+ """
+ Test cases for OE's Go toolchain
+ """
+
+ @staticmethod
+ def get_sdk_environment(tmpdir_SDKQA):
+ pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
+ # FIXME: this is a very naive implementation
+ return glob.glob(pattern)[0]
+
+ @staticmethod
+ def get_sdk_toolchain():
+ bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
+ "meta-go-toolchain")
+ sdk_deploy = bb_vars['SDK_DEPLOY']
+ toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
+ return os.path.join(sdk_deploy, toolchain_name + ".sh")
+
+ @classmethod
+ def setUpClass(cls):
+ super(oeGoToolchainSelfTest, cls).setUpClass()
+ cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
+ cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
+ # Build the SDK and locate it in DEPLOYDIR
+ bitbake("meta-go-toolchain")
+ cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
+ # Install the SDK into the tmpdir
+ runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
+ cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
+ super(oeGoToolchainSelfTest, cls).tearDownClass()
+
+ def run_sdk_go_command(self, gocmd):
+ cmd = "cd %s; " % self.tmpdir_SDKQA
+ cmd = cmd + ". %s; " % self.env_SDK
+ cmd = cmd + "export GOPATH=%s; " % self.go_path
+ cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
+ return runCmd(cmd).status
+
+ def test_go_dep_build(self):
+ proj = "github.com/golang"
+ name = "dep"
+ ver = "v0.3.1"
+ archive = ".tar.gz"
+ url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
+
+ runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
+ runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
+ runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
+ runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
+ % (self.tmpdir_SDKQA, self.go_path, proj, name))
+ retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
+ % (proj, name))
+ self.assertEqual(retv, 0,
+ msg="Running go build failed for %s" % name)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/imagefeatures.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
index 0ffb68692..09e0b2062 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -211,7 +211,7 @@ class ImageFeatures(OESelftestTestCase):
image_name = 'core-image-minimal'
img_types = [itype for itype in get_bb_var("IMAGE_TYPES", image_name).split() \
- if itype not in ('container', 'elf', 'multiubi')]
+ if itype not in ('container', 'elf', 'f2fs', 'multiubi')]
config = 'IMAGE_FSTYPES += "%s"\n'\
'MKUBIFS_ARGS ?= "-m 2048 -e 129024 -c 2047"\n'\
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/meta_ide.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/meta_ide.py
new file mode 100644
index 000000000..5df9d3ed9
--- /dev/null
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/meta_ide.py
@@ -0,0 +1,49 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.sdk.utils.sdkbuildproject import SDKBuildProject
+from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
+from oeqa.core.decorator.oeid import OETestID
+import tempfile
+import shutil
+
+class MetaIDE(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(MetaIDE, cls).setUpClass()
+ bitbake('meta-ide-support')
+ bb_vars = get_bb_vars(['MULTIMACH_TARGET_SYS', 'TMPDIR', 'COREBASE'])
+ cls.environment_script = 'environment-setup-%s' % bb_vars['MULTIMACH_TARGET_SYS']
+ cls.tmpdir = bb_vars['TMPDIR']
+ cls.environment_script_path = '%s/%s' % (cls.tmpdir, cls.environment_script)
+ cls.corebasedir = bb_vars['COREBASE']
+ cls.tmpdir_metaideQA = tempfile.mkdtemp(prefix='metaide')
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_metaideQA, ignore_errors=True)
+ super(MetaIDE, cls).tearDownClass()
+
+ @OETestID(1982)
+ def test_meta_ide_had_installed_meta_ide_support(self):
+ self.assertExists(self.environment_script_path)
+
+ @OETestID(1983)
+ def test_meta_ide_can_compile_c_program(self):
+ runCmd('cp %s/test.c %s' % (self.tc.files_dir, self.tmpdir_metaideQA))
+ runCmd("cd %s; . %s; $CC test.c -lm" % (self.tmpdir_metaideQA, self.environment_script_path))
+ compiled_file = '%s/a.out' % self.tmpdir_metaideQA
+ self.assertExists(compiled_file)
+
+ @OETestID(1984)
+ def test_meta_ide_can_build_cpio_project(self):
+ dl_dir = self.td.get('DL_DIR', None)
+ self.project = SDKBuildProject(self.tmpdir_metaideQA + "/cpio/", self.environment_script_path,
+ "https://ftp.gnu.org/gnu/cpio/cpio-2.12.tar.gz",
+ self.tmpdir_metaideQA, self.td['DATETIME'], dl_dir=dl_dir)
+ self.project.download_archive()
+ self.assertEqual(self.project.run_configure(), 0,
+ msg="Running configure failed")
+ self.assertEqual(self.project.run_make(), 0,
+ msg="Running make failed")
+ self.assertEqual(self.project.run_install(), 0,
+ msg="Running make install failed")
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runqemu.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runqemu.py
index 47d41f521..5ebdd57a4 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runqemu.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runqemu.py
@@ -3,9 +3,10 @@
#
import re
-
+import tempfile
+import time
from oeqa.selftest.case import OESelftestTestCase
-from oeqa.utils.commands import bitbake, runqemu, get_bb_var
+from oeqa.utils.commands import bitbake, runqemu, get_bb_var, runCmd
from oeqa.core.decorator.oeid import OETestID
class RunqemuTests(OESelftestTestCase):
@@ -136,3 +137,70 @@ SYSLINUX_TIMEOUT = "10"
cmd = "%s %s" % (self.cmd_common, rootfs)
with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
+
+# This test was designed as a separate class to test that shutdown
+# command will shutdown qemu as expected on each qemu architecture
+# based on the MACHINE configuration inside the config file
+# (eg. local.conf).
+#
+# This was different compared to RunqemuTests, where RunqemuTests was
+# dedicated for MACHINE=qemux86-64 where it test that qemux86-64 will
+# bootup various filesystem types, including live image(iso and hddimg)
+# where live image was not supported on all qemu architecture.
+class QemuTest(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(QemuTest, cls).setUpClass()
+ cls.recipe = 'core-image-minimal'
+ cls.machine = get_bb_var('MACHINE')
+ cls.deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ cls.cmd_common = "runqemu nographic"
+ cls.qemuboot_conf = "%s-%s.qemuboot.conf" % (cls.recipe, cls.machine)
+ cls.qemuboot_conf = os.path.join(cls.deploy_dir_image, cls.qemuboot_conf)
+ bitbake(cls.recipe)
+
+ def _start_qemu_shutdown_check_if_shutdown_succeeded(self, qemu, timeout):
+ qemu.run_serial("shutdown -h now")
+ # Stop thread will stop the LoggingThread instance used for logging
+ # qemu through serial console, stop thread will prevent this code
+ # from facing exception (Console connection closed unexpectedly)
+ # when qemu was shutdown by the above shutdown command
+ qemu.runner.stop_thread()
+ time_track = 0
+ while True:
+ is_alive = qemu.check()
+ if not is_alive:
+ return True
+ if time_track > timeout:
+ return False
+ time.sleep(1)
+ time_track += 1
+
+ def test_qemu_can_shutdown(self):
+ self.assertExists(self.qemuboot_conf)
+ cmd = "%s %s" % (self.cmd_common, self.qemuboot_conf)
+ shutdown_timeout = 120
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ qemu_shutdown_succeeded = self._start_qemu_shutdown_check_if_shutdown_succeeded(qemu, shutdown_timeout)
+ self.assertTrue(qemu_shutdown_succeeded, 'Failed: %s does not shutdown within timeout(%s)' % (self.machine, shutdown_timeout))
+
+ # Need to have portmap/rpcbind running to allow this test to work and
+ # current autobuilder setup does not have this.
+ def disabled_test_qemu_can_boot_nfs_and_shutdown(self):
+ self.assertExists(self.qemuboot_conf)
+ bitbake('meta-ide-support')
+ rootfs_tar = "%s-%s.tar.bz2" % (self.recipe, self.machine)
+ rootfs_tar = os.path.join(self.deploy_dir_image, rootfs_tar)
+ self.assertExists(rootfs_tar)
+ tmpdir = tempfile.mkdtemp(prefix='qemu_nfs')
+ tmpdir_nfs = os.path.join(tmpdir, 'nfs')
+ cmd_extract_nfs = 'runqemu-extract-sdk %s %s' % (rootfs_tar, tmpdir_nfs)
+ result = runCmd(cmd_extract_nfs)
+ self.assertEqual(0, result.status, "runqemu-extract-sdk didn't run as expected. %s" % result.output)
+ cmd = "%s nfs %s %s" % (self.cmd_common, self.qemuboot_conf, tmpdir_nfs)
+ shutdown_timeout = 120
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ qemu_shutdown_succeeded = self._start_qemu_shutdown_check_if_shutdown_succeeded(qemu, shutdown_timeout)
+ self.assertTrue(qemu_shutdown_succeeded, 'Failed: %s does not shutdown within timeout(%s)' % (self.machine, shutdown_timeout))
+ runCmd('rm -rf %s' % tmpdir)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runtime_test.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runtime_test.py
index 25270b753..9c9b4b341 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -167,55 +167,6 @@ class TestImage(OESelftestTestCase):
class Postinst(OESelftestTestCase):
@OETestID(1540)
- def test_verify_postinst(self):
- """
- Summary: The purpose of this test is to verify the execution order of postinst Bugzilla ID: [5319]
- Expected :
- 1. Compile a minimal image.
- 2. The compiled image will add the created layer with the recipes postinst[ abdpt]
- 3. Run qemux86
- 4. Validate the task execution order
- Author: Francisco Pedraza <francisco.j.pedraza.gonzalez@intel.com>
- """
- features = 'INHERIT += "testimage"\n'
- features += 'CORE_IMAGE_EXTRA_INSTALL += "postinst-at-rootfs \
-postinst-delayed-a \
-postinst-delayed-b \
-postinst-delayed-d \
-postinst-delayed-p \
-postinst-delayed-t \
-"\n'
- self.write_config(features)
-
- bitbake('core-image-minimal -f ')
-
- postinst_list = ['100-postinst-at-rootfs',
- '101-postinst-delayed-a',
- '102-postinst-delayed-b',
- '103-postinst-delayed-d',
- '104-postinst-delayed-p',
- '105-postinst-delayed-t']
- path_workdir = get_bb_var('WORKDIR','core-image-minimal')
- workspacedir = 'testimage/qemu_boot_log'
- workspacedir = os.path.join(path_workdir, workspacedir)
- rexp = re.compile("^Running postinst .*/(?P<postinst>.*)\.\.\.$")
- with runqemu('core-image-minimal') as qemu:
- with open(workspacedir) as f:
- found = False
- idx = 0
- for line in f.readlines():
- line = line.strip().replace("^M","")
- if not line: # To avoid empty lines
- continue
- m = rexp.search(line)
- if m:
- self.assertEqual(postinst_list[idx], m.group('postinst'), "Fail")
- idx = idx+1
- found = True
- elif found:
- self.assertEqual(idx, len(postinst_list), "Not found all postinsts")
- break
-
@OETestID(1545)
def test_postinst_rootfs_and_boot(self):
"""
@@ -234,16 +185,22 @@ postinst-delayed-t \
for initialization managers: sysvinit and systemd.
"""
- file_rootfs_name = "this-was-created-at-rootfstime"
- fileboot_name = "this-was-created-at-first-boot"
- rootfs_pkg = 'postinst-at-rootfs'
- boot_pkg = 'postinst-delayed-a'
+
+ import oe.path
+
+ vars = get_bb_vars(("IMAGE_ROOTFS", "sysconfdir"), "core-image-minimal")
+ rootfs = vars["IMAGE_ROOTFS"]
+ self.assertIsNotNone(rootfs)
+ sysconfdir = vars["sysconfdir"]
+ self.assertIsNotNone(sysconfdir)
+ # Need to use oe.path here as sysconfdir starts with /
+ hosttestdir = oe.path.join(rootfs, sysconfdir, "postinst-test")
+ targettestdir = os.path.join(sysconfdir, "postinst-test")
for init_manager in ("sysvinit", "systemd"):
for classes in ("package_rpm", "package_deb", "package_ipk"):
with self.subTest(init_manager=init_manager, package_class=classes):
- features = 'MACHINE = "qemux86"\n'
- features += 'CORE_IMAGE_EXTRA_INSTALL += "%s %s "\n'% (rootfs_pkg, boot_pkg)
+ features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-delayed-b"\n'
features += 'IMAGE_FEATURES += "package-management empty-root-password"\n'
features += 'PACKAGE_CLASSES = "%s"\n' % classes
if init_manager == "systemd":
@@ -255,13 +212,49 @@ postinst-delayed-t \
bitbake('core-image-minimal')
- file_rootfs_created = os.path.join(get_bb_var('IMAGE_ROOTFS', "core-image-minimal"),
- file_rootfs_name)
- found = os.path.isfile(file_rootfs_created)
- self.assertTrue(found, "File %s was not created at rootfs time by %s" % \
- (file_rootfs_name, rootfs_pkg))
+ self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs")),
+ "rootfs state file was not created")
- testcommand = 'ls /etc/' + fileboot_name
with runqemu('core-image-minimal') as qemu:
- status, output = qemu.run_serial("-f /etc/" + fileboot_name)
- self.assertEqual(status, 0, 'File %s was not created at first boot (%s)' % (fileboot_name, output))
+ # Make the test echo a string and search for that as
+ # run_serial()'s status code is useless.'
+ for filename in ("rootfs", "delayed-a", "delayed-b"):
+ status, output = qemu.run_serial("test -f %s && echo found" % os.path.join(targettestdir, filename))
+ self.assertEqual(output, "found", "%s was not present on boot" % filename)
+
+
+
+ def test_failing_postinst(self):
+ """
+ Summary: The purpose of this test case is to verify that post-installation
+ scripts that contain errors are properly reported.
+ Expected: The scriptlet failure is properly reported.
+ The file that is created after the error in the scriptlet is not present.
+ Product: oe-core
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ """
+
+ import oe.path
+
+ vars = get_bb_vars(("IMAGE_ROOTFS", "sysconfdir"), "core-image-minimal")
+ rootfs = vars["IMAGE_ROOTFS"]
+ self.assertIsNotNone(rootfs)
+ sysconfdir = vars["sysconfdir"]
+ self.assertIsNotNone(sysconfdir)
+ # Need to use oe.path here as sysconfdir starts with /
+ hosttestdir = oe.path.join(rootfs, sysconfdir, "postinst-test")
+
+ for classes in ("package_rpm", "package_deb", "package_ipk"):
+ with self.subTest(package_class=classes):
+ features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-rootfs-failing"\n'
+ features += 'PACKAGE_CLASSES = "%s"\n' % classes
+ self.write_config(features)
+ bb_result = bitbake('core-image-minimal')
+ self.assertGreaterEqual(bb_result.output.find("Intentionally failing postinstall scriptlets of ['postinst-rootfs-failing'] to defer them to first boot is deprecated."), 0,
+ "Warning about a failed scriptlet not found in bitbake output: %s" %(bb_result.output))
+
+ self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs-before-failure")),
+ "rootfs-before-failure file was not created")
+ self.assertFalse(os.path.isfile(os.path.join(hosttestdir, "rootfs-after-failure")),
+ "rootfs-after-failure file was created")
+
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/signing.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/signing.py
index b3d1a8292..a750cfc7b 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/signing.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/signing.py
@@ -87,7 +87,7 @@ class Signing(OESelftestTestCase):
ret = runCmd('%s/rpmkeys --define "_dbpath %s" --checksig %s' %
(staging_bindir_native, rpmdb, pkg_deploy))
# tmp/deploy/rpm/i586/ed-1.9-r0.i586.rpm: rsa sha1 md5 OK
- self.assertIn('rsa sha1 (md5) pgp md5 OK', ret.output, 'Package signed incorrectly.')
+ self.assertIn('digests signatures OK', ret.output, 'Package signed incorrectly.')
shutil.rmtree(rpmdb)
#Check that an image can be built from signed packages
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/sstatetests.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/sstatetests.py
index 47900886a..7b008e409 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/sstatetests.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -2,15 +2,51 @@ import os
import shutil
import glob
import subprocess
+import tempfile
from oeqa.selftest.case import OESelftestTestCase
-from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer, create_temp_layer
from oeqa.selftest.cases.sstate import SStateBase
from oeqa.core.decorator.oeid import OETestID
import bb.siggen
class SStateTests(SStateBase):
+ def test_autorev_sstate_works(self):
+ # Test that a git repository which changes is correctly handled by SRCREV = ${AUTOREV}
+ # when PV does not contain SRCPV
+
+ tempdir = tempfile.mkdtemp(prefix='oeqa')
+ self.track_for_cleanup(tempdir)
+ create_temp_layer(tempdir, 'selftestrecipetool')
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % tempdir)
+ runCmd('bitbake-layers add-layer %s' % tempdir)
+
+ # Use dbus-wait as a local git repo we can add a commit between two builds in
+ pn = 'dbus-wait'
+ srcrev = '6cc6077a36fe2648a5f993fe7c16c9632f946517'
+ url = 'git://git.yoctoproject.org/dbus-wait'
+ result = runCmd('git clone %s noname' % url, cwd=tempdir)
+ srcdir = os.path.join(tempdir, 'noname')
+ result = runCmd('git reset --hard %s' % srcrev, cwd=srcdir)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure script in source directory')
+
+ recipefile = os.path.join(tempdir, "recipes-test", "dbus-wait-test", 'dbus-wait-test_git.bb')
+ os.makedirs(os.path.dirname(recipefile))
+ srcuri = 'git://' + srcdir + ';protocol=file'
+ result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri])
+ self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
+
+ with open(recipefile, 'a') as f:
+ f.write('SRCREV = "${AUTOREV}"\n')
+ f.write('PV = "1.0"\n')
+
+ bitbake("dbus-wait-test -c fetch")
+ with open(os.path.join(srcdir, "bar.txt"), "w") as f:
+ f.write("foo")
+ result = runCmd('git add bar.txt; git commit -asm "add bar"', cwd=srcdir)
+ bitbake("dbus-wait-test -c unpack")
+
# Test sstate files creation and their location
def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True):
@@ -490,7 +526,7 @@ http_proxy = "http://example.com/"
# this is an expensive computation, thus just compare the first 'max_sigfiles_to_compare' k files
max_sigfiles_to_compare = 20
first, rest = files[:max_sigfiles_to_compare], files[max_sigfiles_to_compare:]
- compare_sigfiles(first, files1.keys(), files2.keys(), compare=True)
- compare_sigfiles(rest, files1.keys(), files2.keys(), compare=False)
+ compare_sigfiles(first, files1, files2, compare=True)
+ compare_sigfiles(rest, files1, files2, compare=False)
self.fail("sstate hashes not identical.")
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/wic.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/wic.py
index 651d575dc..b84466d9a 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/wic.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/cases/wic.py
@@ -623,7 +623,7 @@ part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --r
self.assertTrue(os.path.islink(path))
self.assertTrue(os.path.isfile(os.path.realpath(path)))
- @OETestID(1422)
+ @OETestID(1424)
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_qemu(self):
"""Test wic-image-minimal under qemu"""
@@ -634,10 +634,13 @@ part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --r
self.remove_config(config)
with runqemu('wic-image-minimal', ssh=False) as qemu:
- cmd = "mount |grep '^/dev/' | cut -f1,3 -d ' '"
+ cmd = "mount |grep '^/dev/' | cut -f1,3 -d ' ' | sort"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(output, '/dev/root /\r\n/dev/sda1 /boot\r\n/dev/sda3 /media\r\n/dev/sda4 /mnt')
+ cmd = "grep UUID= /etc/fstab"
status, output = qemu.run_serial(cmd)
self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
- self.assertEqual(output, '/dev/root /\r\n/dev/sda1 /boot\r\n/dev/sda3 /mnt')
+ self.assertEqual(output, 'UUID=2c71ef06-a81d-4735-9d3a-379b69c6bdba\t/media\text4\tdefaults\t0\t0')
@only_for_arch(['i586', 'i686', 'x86_64'])
@OETestID(1852)
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/targetcontrol.py b/import-layers/yocto-poky/meta/lib/oeqa/targetcontrol.py
index f63936c3e..59a9c35a0 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/targetcontrol.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/targetcontrol.py
@@ -91,6 +91,8 @@ class QemuTarget(BaseTarget):
def __init__(self, d, logger, image_fstype=None):
+ import oe.types
+
super(QemuTarget, self).__init__(d, logger)
self.rootfs = ''
@@ -107,7 +109,7 @@ class QemuTarget(BaseTarget):
dump_dir = d.getVar("TESTIMAGE_DUMP_DIR")
qemu_use_kvm = d.getVar("QEMU_USE_KVM")
if qemu_use_kvm and \
- (qemu_use_kvm == "True" and "x86" in d.getVar("MACHINE") or \
+ (oe.types.boolean(qemu_use_kvm) and "x86" in d.getVar("MACHINE") or \
d.getVar("MACHINE") in qemu_use_kvm.split()):
use_kvm = True
else:
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/utils/commands.py b/import-layers/yocto-poky/meta/lib/oeqa/utils/commands.py
index 0bb90028d..0d9cf23fe 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/utils/commands.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/utils/commands.py
@@ -227,7 +227,7 @@ def get_bb_vars(variables=None, target=None, postconfig=None):
bbenv = get_bb_env(target, postconfig=postconfig)
if variables is not None:
- variables = variables.copy()
+ variables = list(variables)
var_re = re.compile(r'^(export )?(?P<var>\w+(_.*)?)="(?P<value>.*)"$')
unset_re = re.compile(r'^unset (?P<var>\w+)$')
lastline = None
@@ -285,7 +285,7 @@ def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec=
f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)
-
+ f.write('LAYERSERIES_COMPAT_%s = "${LAYERSERIES_COMPAT_core}"\n' % templayername)
@contextlib.contextmanager
def runqemu(pn, ssh=True, runqemuparams='', image_fstype=None, launch_cmd=None, qemuparams=None, overrides={}, discard_writes=True):
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/utils/package_manager.py b/import-layers/yocto-poky/meta/lib/oeqa/utils/package_manager.py
index 724afb2b5..afd5b8e75 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/utils/package_manager.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/utils/package_manager.py
@@ -14,7 +14,8 @@ def get_package_manager(d, root_path):
if pkg_class == "rpm":
pm = RpmPM(d,
root_path,
- d.getVar('TARGET_VENDOR'))
+ d.getVar('TARGET_VENDOR'),
+ filterbydependencies=False)
pm.create_configs()
elif pkg_class == "ipk":
diff --git a/import-layers/yocto-poky/meta/lib/oeqa/utils/qemurunner.py b/import-layers/yocto-poky/meta/lib/oeqa/utils/qemurunner.py
index 0631d4321..c962602a6 100644
--- a/import-layers/yocto-poky/meta/lib/oeqa/utils/qemurunner.py
+++ b/import-layers/yocto-poky/meta/lib/oeqa/utils/qemurunner.py
@@ -194,7 +194,8 @@ class QemuRunner:
sys.exit(0)
self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid)
- self.logger.debug("waiting at most %s seconds for qemu pid" % self.runqemutime)
+ self.logger.debug("waiting at most %s seconds for qemu pid (%s)" %
+ (self.runqemutime, time.strftime("%D %H:%M:%S")))
endtime = time.time() + self.runqemutime
while not self.is_alive() and time.time() < endtime:
if self.runqemu.poll():
@@ -208,7 +209,8 @@ class QemuRunner:
time.sleep(0.5)
if not self.is_alive():
- self.logger.error("Qemu pid didn't appear in %s seconds" % self.runqemutime)
+ self.logger.error("Qemu pid didn't appear in %s seconds (%s)" %
+ (self.runqemutime, time.strftime("%D %H:%M:%S")))
# Dump all processes to help us to figure out what is going on...
ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command '], stdout=subprocess.PIPE).communicate()[0]
processes = ps.decode("utf-8")
@@ -225,7 +227,9 @@ class QemuRunner:
# We are alive: qemu is running
out = self.getOutput(output)
netconf = False # network configuration is not required by default
- self.logger.debug("qemu started in %s seconds - qemu procces pid is %s" % (time.time() - (endtime - self.runqemutime), self.qemupid))
+ self.logger.debug("qemu started in %s seconds - qemu procces pid is %s (%s)" %
+ (time.time() - (endtime - self.runqemutime),
+ self.qemupid, time.strftime("%D %H:%M:%S")))
if get_ip:
cmdline = ''
with open('/proc/%s/cmdline' % self.qemupid) as p:
@@ -269,7 +273,8 @@ class QemuRunner:
return False
self.logger.debug("Output from runqemu:\n%s", out)
- self.logger.debug("Waiting at most %d seconds for login banner" % self.boottime)
+ self.logger.debug("Waiting at most %d seconds for login banner (%s)" %
+ (self.boottime, time.strftime("%D %H:%M:%S")))
endtime = time.time() + self.boottime
socklist = [self.server_socket]
reachedlogin = False
@@ -298,15 +303,22 @@ class QemuRunner:
self.server_socket = qemusock
stopread = True
reachedlogin = True
- self.logger.debug("Reached login banner")
+ self.logger.debug("Reached login banner in %s seconds (%s)" %
+ (time.time() - (endtime - self.boottime),
+ time.strftime("%D %H:%M:%S")))
else:
+ # no need to check if reachedlogin unless we support multiple connections
+ self.logger.debug("QEMU socket disconnected before login banner reached. (%s)" %
+ time.strftime("%D %H:%M:%S"))
socklist.remove(sock)
sock.close()
stopread = True
if not reachedlogin:
- self.logger.debug("Target didn't reached login boot in %d seconds" % self.boottime)
+ if time.time() >= endtime:
+ self.logger.debug("Target didn't reach login banner in %d seconds (%s)" %
+ (self.boottime, time.strftime("%D %H:%M:%S")))
tail = lambda l: "\n".join(l.splitlines()[-25:])
# in case bootlog is empty, use tail qemu log store at self.msg
lines = tail(bootlog if bootlog else self.msg)