summaryrefslogtreecommitdiff
path: root/poky/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'poky/meta/lib/oe')
-rw-r--r--poky/meta/lib/oe/buildhistory_analysis.py65
-rw-r--r--poky/meta/lib/oe/sstatesig.py79
2 files changed, 104 insertions, 40 deletions
diff --git a/poky/meta/lib/oe/buildhistory_analysis.py b/poky/meta/lib/oe/buildhistory_analysis.py
index 5b28774c9..2d6fa1779 100644
--- a/poky/meta/lib/oe/buildhistory_analysis.py
+++ b/poky/meta/lib/oe/buildhistory_analysis.py
@@ -213,6 +213,7 @@ class FileChange:
changetype_perms = 'P'
changetype_ownergroup = 'O'
changetype_link = 'L'
+ changetype_move = 'M'
def __init__(self, path, changetype, oldvalue = None, newvalue = None):
self.path = path
@@ -251,10 +252,11 @@ class FileChange:
return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_link:
return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_move:
+ return '%s moved to %s' % (self.path, self.oldvalue)
else:
return '%s changed (unknown)' % self.path
-
def blob_to_dict(blob):
alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
adict = {}
@@ -281,11 +283,14 @@ def file_list_to_dict(lines):
adict[path] = splitv[0:3]
return adict
+numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX')
def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
+ additions = []
+ removals = []
for path, splitv in adict.items():
newsplitv = bdict.pop(path, None)
if newsplitv:
@@ -318,11 +323,65 @@ def compare_file_lists(alines, blines, compare_ownership=True):
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
else:
- filechanges.append(FileChange(path, FileChange.changetype_remove))
+ removals.append(path)
# Whatever is left over has been added
for path in bdict:
- filechanges.append(FileChange(path, FileChange.changetype_add))
+ additions.append(path)
+
+ # Rather than print additions and removals, its nicer to print file 'moves'
+ # where names or paths are similar.
+ revmap_remove = {}
+ for removal in removals:
+ translated = removal.translate(numeric_removal)
+ if translated not in revmap_remove:
+ revmap_remove[translated] = []
+ revmap_remove[translated].append(removal)
+
+ #
+ # We want to detect renames of large trees of files like
+ # /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard
+ #
+ renames = {}
+ for addition in additions.copy():
+ if addition not in additions:
+ continue
+ translated = addition.translate(numeric_removal)
+ if translated in revmap_remove:
+ if len(revmap_remove[translated]) != 1:
+ continue
+ removal = revmap_remove[translated][0]
+ commondir = addition.split("/")
+ commondir2 = removal.split("/")
+ idx = None
+ for i in range(len(commondir)):
+ if commondir[i] != commondir2[i]:
+ idx = i
+ break
+ commondir = "/".join(commondir[:i+1])
+ commondir2 = "/".join(commondir2[:i+1])
+ # If the common parent is in one dict and not the other its likely a rename
+ # so iterate through those files and process as such
+ if commondir2 not in bdict and commondir not in adict:
+ if commondir not in renames:
+ renames[commondir] = commondir2
+ for addition2 in additions.copy():
+ if addition2.startswith(commondir):
+ removal2 = addition2.replace(commondir, commondir2)
+ if removal2 in removals:
+ additions.remove(addition2)
+ removals.remove(removal2)
+ continue
+ filechanges.append(FileChange(removal, FileChange.changetype_move, addition))
+ additions.remove(addition)
+ removals.remove(removal)
+ for rename in renames:
+ filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename))
+
+ for addition in additions:
+ filechanges.append(FileChange(addition, FileChange.changetype_add))
+ for removal in removals:
+ filechanges.append(FileChange(removal, FileChange.changetype_remove))
return filechanges
diff --git a/poky/meta/lib/oe/sstatesig.py b/poky/meta/lib/oe/sstatesig.py
index d24e3738a..21ae0a765 100644
--- a/poky/meta/lib/oe/sstatesig.py
+++ b/poky/meta/lib/oe/sstatesig.py
@@ -2,9 +2,10 @@
# SPDX-License-Identifier: GPL-2.0-only
#
import bb.siggen
+import bb.runqueue
import oe
-def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
+def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
def isNative(x):
return x.endswith("-native")
@@ -12,23 +13,26 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
return "-cross-" in x
def isNativeSDK(x):
return x.startswith("nativesdk-")
- def isKernel(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isKernel(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
- def isPackageGroup(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isPackageGroup(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return "/packagegroup.bbclass" in inherits
- def isAllArch(fn):
- inherits = " ".join(dataCache.inherits[fn])
+ def isAllArch(mc, fn):
+ inherits = " ".join(dataCaches[mc].inherits[fn])
return "/allarch.bbclass" in inherits
- def isImage(fn):
- return "/image.bbclass" in " ".join(dataCache.inherits[fn])
-
- # (Almost) always include our own inter-task dependencies.
- # The exception is the special do_kernel_configme->do_unpack_and_patch
- # dependency from archiver.bbclass.
- if recipename == depname:
- if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
+ def isImage(mc, fn):
+ return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
+
+ depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
+ mc, _ = bb.runqueue.split_mc(fn)
+
+ # (Almost) always include our own inter-task dependencies (unless it comes
+ # from a mcdepends). The exception is the special
+ # do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
+ if recipename == depname and depmc == mc:
+ if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch":
return False
return True
@@ -47,11 +51,11 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# Only target packages beyond here
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
- if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
+ if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname):
return False
# Exclude well defined machine specific configurations which don't change ABI
- if depname in siggen.abisaferecipes and not isImage(fn):
+ if depname in siggen.abisaferecipes and not isImage(mc, fn):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
@@ -59,10 +63,9 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
- depfn = dep.rsplit(":", 1)[0]
- if dataCache and isKernel(depfn) and not isKernel(fn):
- for pkg in dataCache.runrecs[fn]:
- if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
+ if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn):
+ for pkg in dataCaches[mc].runrecs[fn]:
+ if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1:
return False
# Default to keep dependencies
@@ -87,10 +90,12 @@ class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
pass
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
class SignatureGeneratorOEBasicHashMixIn(object):
+ supports_multiconfig_datacaches = True
+
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
@@ -126,8 +131,8 @@ class SignatureGeneratorOEBasicHashMixIn(object):
newsafedeps.append(a1 + "->" + a2)
self.saferecipedeps = newsafedeps
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
def get_taskdata(self):
return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
@@ -142,41 +147,41 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def prep_taskhash(self, tid, deps, dataCache):
- super().prep_taskhash(tid, deps, dataCache)
+ def prep_taskhash(self, tid, deps, dataCaches):
+ super().prep_taskhash(tid, deps, dataCaches)
if hasattr(self, "extramethod"):
- (_, _, _, fn) = bb.runqueue.split_tid_mcfn(tid)
- inherits = " ".join(dataCache.inherits[fn])
+ (mc, _, _, fn) = bb.runqueue.split_tid_mcfn(tid)
+ inherits = " ".join(dataCaches[mc].inherits[fn])
if inherits.find("/native.bbclass") != -1 or inherits.find("/cross.bbclass") != -1:
self.extramethod[tid] = ":" + self.buildarch
- def get_taskhash(self, tid, deps, dataCache):
+ def get_taskhash(self, tid, deps, dataCaches):
if tid in self.lockedhashes:
if self.lockedhashes[tid]:
return self.lockedhashes[tid]
else:
- return super().get_taskhash(tid, deps, dataCache)
+ return super().get_taskhash(tid, deps, dataCaches)
- # get_taskhash will call get_unihash internally in the parent class, we
+ # get_taskhash will call get_unihash internally in the parent class, we
# need to disable our filter of it whilst this runs else
# incorrect hashes can be calculated.
self._internal = True
- h = super().get_taskhash(tid, deps, dataCache)
+ h = super().get_taskhash(tid, deps, dataCaches)
self._internal = False
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
- recipename = dataCache.pkg_fn[fn]
+ recipename = dataCaches[mc].pkg_fn[fn]
self.lockedpnmap[fn] = recipename
- self.lockedhashfn[fn] = dataCache.hashfn[fn]
+ self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn]
unlocked = False
if recipename in self.unlockedrecipes:
unlocked = True
else:
def recipename_from_dep(dep):
- fn = bb.runqueue.fn_from_tid(dep)
- return dataCache.pkg_fn[fn]
+ (depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep)
+ return dataCaches[depmc].pkg_fn[depfn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.