summaryrefslogtreecommitdiff
path: root/import-layers/yocto-poky/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/meta/lib/oe')
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/__init__.py2
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py665
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/cachedpath.py233
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/classextend.py122
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/classutils.py44
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py262
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/data.py47
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/distro_check.py308
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/gpg_sign.py128
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/license.py243
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/lsb.py117
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/maketype.py102
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/manifest.py344
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/package.py294
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/package_manager.py1787
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/packagedata.py95
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/packagegroup.py36
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/patch.py895
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/path.py261
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/prservice.py126
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/qa.py171
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/recipeutils.py971
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/rootfs.py973
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/sdk.py473
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/sstatesig.py404
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/terminal.py308
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/types.py153
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/useradd.py68
-rw-r--r--import-layers/yocto-poky/meta/lib/oe/utils.py421
29 files changed, 0 insertions, 10053 deletions
diff --git a/import-layers/yocto-poky/meta/lib/oe/__init__.py b/import-layers/yocto-poky/meta/lib/oe/__init__.py
deleted file mode 100644
index 3ad9513f4..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
diff --git a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
deleted file mode 100644
index b0365abce..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
+++ /dev/null
@@ -1,665 +0,0 @@
-# Report significant differences in the buildhistory repository since a specific revision
-#
-# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
-# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
-#
-# Note: requires GitPython 0.3.1+
-#
-# You can use this from the command line by running scripts/buildhistory-diff
-#
-
-import sys
-import os.path
-import difflib
-import git
-import re
-import hashlib
-import collections
-import bb.utils
-import bb.tinfoil
-
-
-# How to display fields
-list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
-list_order_fields = ['PACKAGES']
-defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
-numeric_fields = ['PKGSIZE', 'IMAGESIZE']
-# Fields to monitor
-monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
-ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
-# Percentage change to alert for numeric fields
-monitor_numeric_threshold = 10
-# Image files to monitor (note that image-info.txt is handled separately)
-img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
-# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
-related_fields = {}
-related_fields['RDEPENDS'] = ['DEPENDS']
-related_fields['RRECOMMENDS'] = ['DEPENDS']
-related_fields['FILELIST'] = ['FILES']
-related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
-related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
-
-colours = {
- 'colour_default': '',
- 'colour_add': '',
- 'colour_remove': '',
-}
-
-def init_colours(use_colours):
- global colours
- if use_colours:
- colours = {
- 'colour_default': '\033[0m',
- 'colour_add': '\033[1;32m',
- 'colour_remove': '\033[1;31m',
- }
- else:
- colours = {
- 'colour_default': '',
- 'colour_add': '',
- 'colour_remove': '',
- }
-
-class ChangeRecord:
- def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
- self.path = path
- self.fieldname = fieldname
- self.oldvalue = oldvalue
- self.newvalue = newvalue
- self.monitored = monitored
- self.related = []
- self.filechanges = None
-
- def __str__(self):
- return self._str_internal(True)
-
- def _str_internal(self, outer):
- if outer:
- if '/image-files/' in self.path:
- prefix = '%s: ' % self.path.split('/image-files/')[0]
- else:
- prefix = '%s: ' % self.path
- else:
- prefix = ''
-
- def pkglist_combine(depver):
- pkglist = []
- for k,v in depver.items():
- if v:
- pkglist.append("%s (%s)" % (k,v))
- else:
- pkglist.append(k)
- return pkglist
-
- def detect_renamed_dirs(aitems, bitems):
- adirs = set(map(os.path.dirname, aitems))
- bdirs = set(map(os.path.dirname, bitems))
- files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
- for name in adirs - bdirs]
- files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
- for name in bdirs - adirs]
- renamed_dirs = []
- for dir1, files1 in files_ab:
- rename = False
- for dir2, files2 in files_ba:
- if files1 == files2 and not rename:
- renamed_dirs.append((dir1,dir2))
- # Make sure that we don't use this (dir, files) pair again.
- files_ba.remove((dir2,files2))
- # If a dir has already been found to have a rename, stop and go no further.
- rename = True
-
- # remove files that belong to renamed dirs from aitems and bitems
- for dir1, dir2 in renamed_dirs:
- aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
- bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
- return renamed_dirs, aitems, bitems
-
- if self.fieldname in list_fields or self.fieldname in list_order_fields:
- renamed_dirs = []
- changed_order = False
- if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
- (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
- aitems = pkglist_combine(depvera)
- bitems = pkglist_combine(depverb)
- else:
- aitems = self.oldvalue.split()
- bitems = self.newvalue.split()
- if self.fieldname == 'FILELIST':
- renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
-
- removed = list(set(aitems) - set(bitems))
- added = list(set(bitems) - set(aitems))
-
- if not removed and not added:
- depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
- depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
- for i, j in zip(depvera.items(), depverb.items()):
- if i[0] != j[0]:
- changed_order = True
- break
-
- lines = []
- if renamed_dirs:
- for dfrom, dto in renamed_dirs:
- lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
- if removed or added:
- if removed and not bitems:
- lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
- else:
- if removed:
- lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
- if added:
- lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
- else:
- lines.append('changed order')
-
- if not (removed or added or changed_order):
- out = ''
- else:
- out = '%s: %s' % (self.fieldname, ', '.join(lines))
-
- elif self.fieldname in numeric_fields:
- aval = int(self.oldvalue or 0)
- bval = int(self.newvalue or 0)
- if aval != 0:
- percentchg = ((bval - aval) / float(aval)) * 100
- else:
- percentchg = 100
- out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
- elif self.fieldname in defaultval_map:
- out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
- if self.fieldname == 'PKG' and '[default]' in self.newvalue:
- out += ' - may indicate debian renaming failure'
- elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
- if self.oldvalue and self.newvalue:
- out = '%s changed:\n ' % self.fieldname
- elif self.newvalue:
- out = '%s added:\n ' % self.fieldname
- elif self.oldvalue:
- out = '%s cleared:\n ' % self.fieldname
- alines = self.oldvalue.splitlines()
- blines = self.newvalue.splitlines()
- diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
- out += '\n '.join(list(diff)[2:])
- out += '\n --'
- elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
- if self.filechanges or (self.oldvalue and self.newvalue):
- fieldname = self.fieldname
- if '/image-files/' in self.path:
- fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
- out = 'Changes to %s:\n ' % fieldname
- else:
- if outer:
- prefix = 'Changes to %s ' % self.path
- out = '(%s):\n ' % self.fieldname
- if self.filechanges:
- out += '\n '.join(['%s' % i for i in self.filechanges])
- else:
- alines = self.oldvalue.splitlines()
- blines = self.newvalue.splitlines()
- diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
- out += '\n '.join(list(diff))
- out += '\n --'
- else:
- out = ''
- else:
- out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
-
- if self.related:
- for chg in self.related:
- if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
- continue
- for line in chg._str_internal(False).splitlines():
- out += '\n * %s' % line
-
- return '%s%s' % (prefix, out) if out else ''
-
-class FileChange:
- changetype_add = 'A'
- changetype_remove = 'R'
- changetype_type = 'T'
- changetype_perms = 'P'
- changetype_ownergroup = 'O'
- changetype_link = 'L'
-
- def __init__(self, path, changetype, oldvalue = None, newvalue = None):
- self.path = path
- self.changetype = changetype
- self.oldvalue = oldvalue
- self.newvalue = newvalue
-
- def _ftype_str(self, ftype):
- if ftype == '-':
- return 'file'
- elif ftype == 'd':
- return 'directory'
- elif ftype == 'l':
- return 'symlink'
- elif ftype == 'c':
- return 'char device'
- elif ftype == 'b':
- return 'block device'
- elif ftype == 'p':
- return 'fifo'
- elif ftype == 's':
- return 'socket'
- else:
- return 'unknown (%s)' % ftype
-
- def __str__(self):
- if self.changetype == self.changetype_add:
- return '%s was added' % self.path
- elif self.changetype == self.changetype_remove:
- return '%s was removed' % self.path
- elif self.changetype == self.changetype_type:
- return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
- elif self.changetype == self.changetype_perms:
- return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
- elif self.changetype == self.changetype_ownergroup:
- return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
- elif self.changetype == self.changetype_link:
- return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
- else:
- return '%s changed (unknown)' % self.path
-
-
-def blob_to_dict(blob):
- alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
- adict = {}
- for line in alines:
- splitv = [i.strip() for i in line.split('=',1)]
- if len(splitv) > 1:
- adict[splitv[0]] = splitv[1]
- return adict
-
-
-def file_list_to_dict(lines):
- adict = {}
- for line in lines:
- # Leave the last few fields intact so we handle file names containing spaces
- splitv = line.split(None,4)
- # Grab the path and remove the leading .
- path = splitv[4][1:].strip()
- # Handle symlinks
- if(' -> ' in path):
- target = path.split(' -> ')[1]
- path = path.split(' -> ')[0]
- adict[path] = splitv[0:3] + [target]
- else:
- adict[path] = splitv[0:3]
- return adict
-
-
-def compare_file_lists(alines, blines):
- adict = file_list_to_dict(alines)
- bdict = file_list_to_dict(blines)
- filechanges = []
- for path, splitv in adict.items():
- newsplitv = bdict.pop(path, None)
- if newsplitv:
- # Check type
- oldvalue = splitv[0][0]
- newvalue = newsplitv[0][0]
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
- # Check permissions
- oldvalue = splitv[0][1:]
- newvalue = newsplitv[0][1:]
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
- # Check owner/group
- oldvalue = '%s/%s' % (splitv[1], splitv[2])
- newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
- # Check symlink target
- if newsplitv[0][0] == 'l':
- if len(splitv) > 3:
- oldvalue = splitv[3]
- else:
- oldvalue = None
- newvalue = newsplitv[3]
- if oldvalue != newvalue:
- filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
- else:
- filechanges.append(FileChange(path, FileChange.changetype_remove))
-
- # Whatever is left over has been added
- for path in bdict:
- filechanges.append(FileChange(path, FileChange.changetype_add))
-
- return filechanges
-
-
-def compare_lists(alines, blines):
- removed = list(set(alines) - set(blines))
- added = list(set(blines) - set(alines))
-
- filechanges = []
- for pkg in removed:
- filechanges.append(FileChange(pkg, FileChange.changetype_remove))
- for pkg in added:
- filechanges.append(FileChange(pkg, FileChange.changetype_add))
-
- return filechanges
-
-
-def compare_pkg_lists(astr, bstr):
- depvera = bb.utils.explode_dep_versions2(astr)
- depverb = bb.utils.explode_dep_versions2(bstr)
-
- # Strip out changes where the version has increased
- remove = []
- for k in depvera:
- if k in depverb:
- dva = depvera[k]
- dvb = depverb[k]
- if dva and dvb and len(dva) == len(dvb):
- # Since length is the same, sort so that prefixes (e.g. >=) will line up
- dva.sort()
- dvb.sort()
- removeit = True
- for dvai, dvbi in zip(dva, dvb):
- if dvai != dvbi:
- aiprefix = dvai.split(' ')[0]
- biprefix = dvbi.split(' ')[0]
- if aiprefix == biprefix and aiprefix in ['>=', '=']:
- if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
- removeit = False
- break
- else:
- removeit = False
- break
- if removeit:
- remove.append(k)
-
- for k in remove:
- depvera.pop(k)
- depverb.pop(k)
-
- return (depvera, depverb)
-
-
-def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
- adict = blob_to_dict(ablob)
- bdict = blob_to_dict(bblob)
-
- pkgname = os.path.basename(path)
-
- defaultvals = {}
- defaultvals['PKG'] = pkgname
- defaultvals['PKGE'] = '0'
-
- changes = []
- keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
- for key in keys:
- astr = adict.get(key, '')
- bstr = bdict.get(key, '')
- if key in ver_monitor_fields:
- monitored = report_ver or astr or bstr
- else:
- monitored = key in monitor_fields
- mapped_key = defaultval_map.get(key, '')
- if mapped_key:
- if not astr:
- astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
- if not bstr:
- bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
-
- if astr != bstr:
- if (not report_all) and key in numeric_fields:
- aval = int(astr or 0)
- bval = int(bstr or 0)
- if aval != 0:
- percentchg = ((bval - aval) / float(aval)) * 100
- else:
- percentchg = 100
- if abs(percentchg) < monitor_numeric_threshold:
- continue
- elif (not report_all) and key in list_fields:
- if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
- continue
- if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
- (depvera, depverb) = compare_pkg_lists(astr, bstr)
- if depvera == depverb:
- continue
- alist = astr.split()
- alist.sort()
- blist = bstr.split()
- blist.sort()
- # We don't care about the removal of self-dependencies
- if pkgname in alist and not pkgname in blist:
- alist.remove(pkgname)
- if ' '.join(alist) == ' '.join(blist):
- continue
-
- if key == 'PKGR' and not report_all:
- vers = []
- # strip leading 'r' and dots
- for ver in (astr.split()[0], bstr.split()[0]):
- if ver.startswith('r'):
- ver = ver[1:]
- vers.append(ver.replace('.', ''))
- maxlen = max(len(vers[0]), len(vers[1]))
- try:
- # pad with '0' and convert to int
- vers = [int(ver.ljust(maxlen, '0')) for ver in vers]
- except ValueError:
- pass
- else:
- # skip decrements and increments
- if abs(vers[0] - vers[1]) == 1:
- continue
-
- chg = ChangeRecord(path, key, astr, bstr, monitored)
- changes.append(chg)
- return changes
-
-
-def compare_siglists(a_blob, b_blob, taskdiff=False):
- # FIXME collapse down a recipe's tasks?
- alines = a_blob.data_stream.read().decode('utf-8').splitlines()
- blines = b_blob.data_stream.read().decode('utf-8').splitlines()
- keys = []
- pnmap = {}
- def readsigs(lines):
- sigs = {}
- for line in lines:
- linesplit = line.split()
- if len(linesplit) > 2:
- sigs[linesplit[0]] = linesplit[2]
- if not linesplit[0] in keys:
- keys.append(linesplit[0])
- pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
- return sigs
- adict = readsigs(alines)
- bdict = readsigs(blines)
- out = []
-
- changecount = 0
- addcount = 0
- removecount = 0
- if taskdiff:
- with bb.tinfoil.Tinfoil() as tinfoil:
- tinfoil.prepare(config_only=True)
-
- changes = collections.OrderedDict()
-
- def compare_hashfiles(pn, taskname, hash1, hash2):
- hashes = [hash1, hash2]
- hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
-
- if not taskname:
- (pn, taskname) = pn.rsplit('.', 1)
- pn = pnmap.get(pn, pn)
- desc = '%s.%s' % (pn, taskname)
-
- if len(hashfiles) == 0:
- out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
- elif not hash1 in hashfiles:
- out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
- elif not hash2 in hashfiles:
- out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
- else:
- out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
- for line in out2:
- m = hashlib.sha256()
- m.update(line.encode('utf-8'))
- entry = changes.get(m.hexdigest(), (line, []))
- if desc not in entry[1]:
- changes[m.hexdigest()] = (line, entry[1] + [desc])
-
- # Define recursion callback
- def recursecb(key, hash1, hash2):
- compare_hashfiles(key, None, hash1, hash2)
- return []
-
- for key in keys:
- siga = adict.get(key, None)
- sigb = bdict.get(key, None)
- if siga is not None and sigb is not None and siga != sigb:
- changecount += 1
- (pn, taskname) = key.rsplit('.', 1)
- compare_hashfiles(pn, taskname, siga, sigb)
- elif siga is None:
- addcount += 1
- elif sigb is None:
- removecount += 1
- for key, item in changes.items():
- line, tasks = item
- if len(tasks) == 1:
- desc = tasks[0]
- elif len(tasks) == 2:
- desc = '%s and %s' % (tasks[0], tasks[1])
- else:
- desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
- out.append('%s: %s' % (desc, line))
- else:
- for key in keys:
- siga = adict.get(key, None)
- sigb = bdict.get(key, None)
- if siga is not None and sigb is not None and siga != sigb:
- out.append('%s changed from %s to %s' % (key, siga, sigb))
- changecount += 1
- elif siga is None:
- out.append('%s was added' % key)
- addcount += 1
- elif sigb is None:
- out.append('%s was removed' % key)
- removecount += 1
- out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
- return '\n'.join(out)
-
-
-def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
- sigs=False, sigsdiff=False, exclude_path=None):
- repo = git.Repo(repopath)
- assert repo.bare == False
- commit = repo.commit(revision1)
- diff = commit.diff(revision2)
-
- changes = []
-
- if sigs or sigsdiff:
- for d in diff.iter_change_type('M'):
- if d.a_blob.path == 'siglist.txt':
- changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
- return changes
-
- for d in diff.iter_change_type('M'):
- path = os.path.dirname(d.a_blob.path)
- if path.startswith('packages/'):
- filename = os.path.basename(d.a_blob.path)
- if filename == 'latest':
- changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
- elif filename.startswith('latest.'):
- chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
- changes.append(chg)
- elif path.startswith('images/'):
- filename = os.path.basename(d.a_blob.path)
- if filename in img_monitor_files:
- if filename == 'files-in-image.txt':
- alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
- blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
- filechanges = compare_file_lists(alines,blines)
- if filechanges:
- chg = ChangeRecord(path, filename, None, None, True)
- chg.filechanges = filechanges
- changes.append(chg)
- elif filename == 'installed-package-names.txt':
- alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
- blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
- filechanges = compare_lists(alines,blines)
- if filechanges:
- chg = ChangeRecord(path, filename, None, None, True)
- chg.filechanges = filechanges
- changes.append(chg)
- else:
- chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
- changes.append(chg)
- elif filename == 'image-info.txt':
- changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
- elif '/image-files/' in path:
- chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
- changes.append(chg)
-
- # Look for added preinst/postinst/prerm/postrm
- # (without reporting newly added recipes)
- addedpkgs = []
- addedchanges = []
- for d in diff.iter_change_type('A'):
- path = os.path.dirname(d.b_blob.path)
- if path.startswith('packages/'):
- filename = os.path.basename(d.b_blob.path)
- if filename == 'latest':
- addedpkgs.append(path)
- elif filename.startswith('latest.'):
- chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read().decode('utf-8'), True)
- addedchanges.append(chg)
- for chg in addedchanges:
- found = False
- for pkg in addedpkgs:
- if chg.path.startswith(pkg):
- found = True
- break
- if not found:
- changes.append(chg)
-
- # Look for cleared preinst/postinst/prerm/postrm
- for d in diff.iter_change_type('D'):
- path = os.path.dirname(d.a_blob.path)
- if path.startswith('packages/'):
- filename = os.path.basename(d.a_blob.path)
- if filename != 'latest' and filename.startswith('latest.'):
- chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
- changes.append(chg)
-
- # Link related changes
- for chg in changes:
- if chg.monitored:
- for chg2 in changes:
- # (Check dirname in the case of fields from recipe info files)
- if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
- if chg2.fieldname in related_fields.get(chg.fieldname, []):
- chg.related.append(chg2)
- elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
- chg.related.append(chg2)
-
- # filter out unwanted paths
- if exclude_path:
- for chg in changes:
- if chg.filechanges:
- fchgs = []
- for fchg in chg.filechanges:
- for epath in exclude_path:
- if fchg.path.startswith(epath):
- break
- else:
- fchgs.append(fchg)
- chg.filechanges = fchgs
-
- if report_all:
- return changes
- else:
- return [chg for chg in changes if chg.monitored]
diff --git a/import-layers/yocto-poky/meta/lib/oe/cachedpath.py b/import-layers/yocto-poky/meta/lib/oe/cachedpath.py
deleted file mode 100644
index 0840cc4c3..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/cachedpath.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#
-# Based on standard python library functions but avoid
-# repeated stat calls. Its assumed the files will not change from under us
-# so we can cache stat calls.
-#
-
-import os
-import errno
-import stat as statmod
-
-class CachedPath(object):
- def __init__(self):
- self.statcache = {}
- self.lstatcache = {}
- self.normpathcache = {}
- return
-
- def updatecache(self, x):
- x = self.normpath(x)
- if x in self.statcache:
- del self.statcache[x]
- if x in self.lstatcache:
- del self.lstatcache[x]
-
- def normpath(self, path):
- if path in self.normpathcache:
- return self.normpathcache[path]
- newpath = os.path.normpath(path)
- self.normpathcache[path] = newpath
- return newpath
-
- def _callstat(self, path):
- if path in self.statcache:
- return self.statcache[path]
- try:
- st = os.stat(path)
- self.statcache[path] = st
- return st
- except os.error:
- self.statcache[path] = False
- return False
-
- # We might as well call lstat and then only
- # call stat as well in the symbolic link case
- # since this turns out to be much more optimal
- # in real world usage of this cache
- def callstat(self, path):
- path = self.normpath(path)
- self.calllstat(path)
- return self.statcache[path]
-
- def calllstat(self, path):
- path = self.normpath(path)
- if path in self.lstatcache:
- return self.lstatcache[path]
- #bb.error("LStatpath:" + path)
- try:
- lst = os.lstat(path)
- self.lstatcache[path] = lst
- if not statmod.S_ISLNK(lst.st_mode):
- self.statcache[path] = lst
- else:
- self._callstat(path)
- return lst
- except (os.error, AttributeError):
- self.lstatcache[path] = False
- self.statcache[path] = False
- return False
-
- # This follows symbolic links, so both islink() and isdir() can be true
- # for the same path ono systems that support symlinks
- def isfile(self, path):
- """Test whether a path is a regular file"""
- st = self.callstat(path)
- if not st:
- return False
- return statmod.S_ISREG(st.st_mode)
-
- # Is a path a directory?
- # This follows symbolic links, so both islink() and isdir()
- # can be true for the same path on systems that support symlinks
- def isdir(self, s):
- """Return true if the pathname refers to an existing directory."""
- st = self.callstat(s)
- if not st:
- return False
- return statmod.S_ISDIR(st.st_mode)
-
- def islink(self, path):
- """Test whether a path is a symbolic link"""
- st = self.calllstat(path)
- if not st:
- return False
- return statmod.S_ISLNK(st.st_mode)
-
- # Does a path exist?
- # This is false for dangling symbolic links on systems that support them.
- def exists(self, path):
- """Test whether a path exists. Returns False for broken symbolic links"""
- if self.callstat(path):
- return True
- return False
-
- def lexists(self, path):
- """Test whether a path exists. Returns True for broken symbolic links"""
- if self.calllstat(path):
- return True
- return False
-
- def stat(self, path):
- return self.callstat(path)
-
- def lstat(self, path):
- return self.calllstat(path)
-
- def walk(self, top, topdown=True, onerror=None, followlinks=False):
- # Matches os.walk, not os.path.walk()
-
- # We may not have read permission for top, in which case we can't
- # get a list of the files the directory contains. os.path.walk
- # always suppressed the exception then, rather than blow up for a
- # minor reason when (say) a thousand readable directories are still
- # left to visit. That logic is copied here.
- try:
- names = os.listdir(top)
- except os.error as err:
- if onerror is not None:
- onerror(err)
- return
-
- dirs, nondirs = [], []
- for name in names:
- if self.isdir(os.path.join(top, name)):
- dirs.append(name)
- else:
- nondirs.append(name)
-
- if topdown:
- yield top, dirs, nondirs
- for name in dirs:
- new_path = os.path.join(top, name)
- if followlinks or not self.islink(new_path):
- for x in self.walk(new_path, topdown, onerror, followlinks):
- yield x
- if not topdown:
- yield top, dirs, nondirs
-
- ## realpath() related functions
- def __is_path_below(self, file, root):
- return (file + os.path.sep).startswith(root)
-
- def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
- """Calculates real path of symlink 'start' + 'rel_path' below
- 'root'; no part of 'start' below 'root' must contain symlinks. """
- have_dir = True
-
- for d in rel_path.split(os.path.sep):
- if not have_dir and not assume_dir:
- raise OSError(errno.ENOENT, "no such directory %s" % start)
-
- if d == os.path.pardir: # '..'
- if len(start) >= len(root):
- # do not follow '..' before root
- start = os.path.dirname(start)
- else:
- # emit warning?
- pass
- else:
- (start, have_dir) = self.__realpath(os.path.join(start, d),
- root, loop_cnt, assume_dir)
-
- assert(self.__is_path_below(start, root))
-
- return start
-
- def __realpath(self, file, root, loop_cnt, assume_dir):
- while self.islink(file) and len(file) >= len(root):
- if loop_cnt == 0:
- raise OSError(errno.ELOOP, file)
-
- loop_cnt -= 1
- target = os.path.normpath(os.readlink(file))
-
- if not os.path.isabs(target):
- tdir = os.path.dirname(file)
- assert(self.__is_path_below(tdir, root))
- else:
- tdir = root
-
- file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
-
- try:
- is_dir = self.isdir(file)
- except:
- is_dir = False
-
- return (file, is_dir)
-
- def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
- """ Returns the canonical path of 'file' with assuming a
- toplevel 'root' directory. When 'use_physdir' is set, all
- preceding path components of 'file' will be resolved first;
- this flag should be set unless it is guaranteed that there is
- no symlink in the path. When 'assume_dir' is not set, missing
- path components will raise an ENOENT error"""
-
- root = os.path.normpath(root)
- file = os.path.normpath(file)
-
- if not root.endswith(os.path.sep):
- # letting root end with '/' makes some things easier
- root = root + os.path.sep
-
- if not self.__is_path_below(file, root):
- raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
-
- try:
- if use_physdir:
- file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
- else:
- file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
- except OSError as e:
- if e.errno == errno.ELOOP:
- # make ELOOP more readable; without catching it, there will
- # be printed a backtrace with 100s of OSError exceptions
- # else
- raise OSError(errno.ELOOP,
- "too much recursions while resolving '%s'; loop in '%s'" %
- (file, e.strerror))
-
- raise
-
- return file
diff --git a/import-layers/yocto-poky/meta/lib/oe/classextend.py b/import-layers/yocto-poky/meta/lib/oe/classextend.py
deleted file mode 100644
index d2eeaf0e5..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/classextend.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import collections
-
-class ClassExtender(object):
- def __init__(self, extname, d):
- self.extname = extname
- self.d = d
- self.pkgs_mapping = []
-
- def extend_name(self, name):
- if name.startswith("kernel-") or name == "virtual/kernel":
- return name
- if name.startswith("rtld"):
- return name
- if name.endswith("-crosssdk"):
- return name
- if name.endswith("-" + self.extname):
- name = name.replace("-" + self.extname, "")
- if name.startswith("virtual/"):
- subs = name.split("/", 1)[1]
- if not subs.startswith(self.extname):
- return "virtual/" + self.extname + "-" + subs
- return name
- if not name.startswith(self.extname):
- return self.extname + "-" + name
- return name
-
- def map_variable(self, varname, setvar = True):
- var = self.d.getVar(varname)
- if not var:
- return ""
- var = var.split()
- newvar = []
- for v in var:
- newvar.append(self.extend_name(v))
- newdata = " ".join(newvar)
- if setvar:
- self.d.setVar(varname, newdata)
- return newdata
-
- def map_regexp_variable(self, varname, setvar = True):
- var = self.d.getVar(varname)
- if not var:
- return ""
- var = var.split()
- newvar = []
- for v in var:
- if v.startswith("^" + self.extname):
- newvar.append(v)
- elif v.startswith("^"):
- newvar.append("^" + self.extname + "-" + v[1:])
- else:
- newvar.append(self.extend_name(v))
- newdata = " ".join(newvar)
- if setvar:
- self.d.setVar(varname, newdata)
- return newdata
-
- def map_depends(self, dep):
- if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
- return dep
- else:
- # Do not extend for that already have multilib prefix
- var = self.d.getVar("MULTILIB_VARIANTS")
- if var:
- var = var.split()
- for v in var:
- if dep.startswith(v):
- return dep
- return self.extend_name(dep)
-
- def map_depends_variable(self, varname, suffix = ""):
- # We need to preserve EXTENDPKGV so it can be expanded correctly later
- if suffix:
- varname = varname + "_" + suffix
- orig = self.d.getVar("EXTENDPKGV", False)
- self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
- deps = self.d.getVar(varname)
- if not deps:
- self.d.setVar("EXTENDPKGV", orig)
- return
- deps = bb.utils.explode_dep_versions2(deps)
- newdeps = collections.OrderedDict()
- for dep in deps:
- newdeps[self.map_depends(dep)] = deps[dep]
-
- self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
- self.d.setVar("EXTENDPKGV", orig)
-
- def map_packagevars(self):
- for pkg in (self.d.getVar("PACKAGES").split() + [""]):
- self.map_depends_variable("RDEPENDS", pkg)
- self.map_depends_variable("RRECOMMENDS", pkg)
- self.map_depends_variable("RSUGGESTS", pkg)
- self.map_depends_variable("RPROVIDES", pkg)
- self.map_depends_variable("RREPLACES", pkg)
- self.map_depends_variable("RCONFLICTS", pkg)
- self.map_depends_variable("PKG", pkg)
-
- def rename_packages(self):
- for pkg in (self.d.getVar("PACKAGES") or "").split():
- if pkg.startswith(self.extname):
- self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
- continue
- self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
-
- self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
-
- def rename_package_variables(self, variables):
- for pkg_mapping in self.pkgs_mapping:
- for subs in variables:
- self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
-
-class NativesdkClassExtender(ClassExtender):
- def map_depends(self, dep):
- if dep.startswith(self.extname):
- return dep
- if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
- return dep + "-crosssdk"
- elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
- return dep
- else:
- return self.extend_name(dep)
diff --git a/import-layers/yocto-poky/meta/lib/oe/classutils.py b/import-layers/yocto-poky/meta/lib/oe/classutils.py
deleted file mode 100644
index 45cd5249b..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/classutils.py
+++ /dev/null
@@ -1,44 +0,0 @@
-
-class ClassRegistryMeta(type):
- """Give each ClassRegistry their own registry"""
- def __init__(cls, name, bases, attrs):
- cls.registry = {}
- type.__init__(cls, name, bases, attrs)
-
-class ClassRegistry(type, metaclass=ClassRegistryMeta):
- """Maintain a registry of classes, indexed by name.
-
-Note that this implementation requires that the names be unique, as it uses
-a dictionary to hold the classes by name.
-
-The name in the registry can be overridden via the 'name' attribute of the
-class, and the 'priority' attribute controls priority. The prioritized()
-method returns the registered classes in priority order.
-
-Subclasses of ClassRegistry may define an 'implemented' property to exert
-control over whether the class will be added to the registry (e.g. to keep
-abstract base classes out of the registry)."""
- priority = 0
- def __init__(cls, name, bases, attrs):
- super(ClassRegistry, cls).__init__(name, bases, attrs)
- try:
- if not cls.implemented:
- return
- except AttributeError:
- pass
-
- try:
- cls.name
- except AttributeError:
- cls.name = name
- cls.registry[cls.name] = cls
-
- @classmethod
- def prioritized(tcls):
- return sorted(list(tcls.registry.values()),
- key=lambda v: (v.priority, v.name), reverse=True)
-
- def unregister(cls):
- for key in cls.registry.keys():
- if cls.registry[key] is cls:
- del cls.registry[key]
diff --git a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
deleted file mode 100644
index 4b94806c7..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# This class should provide easy access to the different aspects of the
-# buildsystem such as layers, bitbake location, etc.
-import stat
-import shutil
-
-def _smart_copy(src, dest):
- import subprocess
- # smart_copy will choose the correct function depending on whether the
- # source is a file or a directory.
- mode = os.stat(src).st_mode
- if stat.S_ISDIR(mode):
- bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
- | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
- else:
- shutil.copyfile(src, dest)
- shutil.copymode(src, dest)
-
-class BuildSystem(object):
- def __init__(self, context, d):
- self.d = d
- self.context = context
- self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
- self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
-
- def copy_bitbake_and_layers(self, destdir, workspace_name=None):
- # Copy in all metadata layers + bitbake (as repositories)
- layers_copied = []
- bb.utils.mkdirhier(destdir)
- layers = list(self.layerdirs)
-
- corebase = os.path.abspath(self.d.getVar('COREBASE'))
- layers.append(corebase)
- # The bitbake build system uses the meta-skeleton layer as a layout
- # for common recipies, e.g: the recipetool script to create kernel recipies
- # Add the meta-skeleton layer to be included as part of the eSDK installation
- layers.append(os.path.join(corebase, 'meta-skeleton'))
-
- # Exclude layers
- for layer_exclude in self.layers_exclude:
- if layer_exclude in layers:
- layers.remove(layer_exclude)
-
- workspace_newname = workspace_name
- if workspace_newname:
- layernames = [os.path.basename(layer) for layer in layers]
- extranum = 0
- while workspace_newname in layernames:
- extranum += 1
- workspace_newname = '%s-%d' % (workspace_name, extranum)
-
- corebase_files = self.d.getVar('COREBASE_FILES').split()
- corebase_files = [corebase + '/' +x for x in corebase_files]
- # Make sure bitbake goes in
- bitbake_dir = bb.__file__.rsplit('/', 3)[0]
- corebase_files.append(bitbake_dir)
-
- for layer in layers:
- layerconf = os.path.join(layer, 'conf', 'layer.conf')
- layernewname = os.path.basename(layer)
- workspace = False
- if os.path.exists(layerconf):
- with open(layerconf, 'r') as f:
- if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
- if workspace_newname:
- layernewname = workspace_newname
- workspace = True
- else:
- bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context))
- continue
-
- # If the layer was already under corebase, leave it there
- # since layers such as meta have issues when moved.
- layerdestpath = destdir
- if corebase == os.path.dirname(layer):
- layerdestpath += '/' + os.path.basename(corebase)
- else:
- layer_relative = os.path.basename(corebase) + '/' + os.path.relpath(layer, corebase)
- if os.path.dirname(layer_relative) != layernewname:
- layerdestpath += '/' + os.path.dirname(layer_relative)
-
- layerdestpath += '/' + layernewname
-
- layer_relative = os.path.relpath(layerdestpath,
- destdir)
- layers_copied.append(layer_relative)
-
- # Treat corebase as special since it typically will contain
- # build directories or other custom items.
- if corebase == layer:
- bb.utils.mkdirhier(layerdestpath)
- for f in corebase_files:
- f_basename = os.path.basename(f)
- destname = os.path.join(layerdestpath, f_basename)
- _smart_copy(f, destname)
- else:
- if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
- bb.note("Skipping layer %s, already handled" % layer)
- else:
- _smart_copy(layer, layerdestpath)
-
- if workspace:
- # Make some adjustments original workspace layer
- # Drop sources (recipe tasks will be locked, so we don't need them)
- srcdir = os.path.join(layerdestpath, 'sources')
- if os.path.isdir(srcdir):
- shutil.rmtree(srcdir)
- # Drop all bbappends except the one for the image the SDK is being built for
- # (because of externalsrc, the workspace bbappends will interfere with the
- # locked signatures if present, and we don't need them anyway)
- image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
- appenddir = os.path.join(layerdestpath, 'appends')
- if os.path.isdir(appenddir):
- for fn in os.listdir(appenddir):
- if fn == image_bbappend:
- continue
- else:
- os.remove(os.path.join(appenddir, fn))
- # Drop README
- readme = os.path.join(layerdestpath, 'README')
- if os.path.exists(readme):
- os.remove(readme)
- # Filter out comments in layer.conf and change layer name
- layerconf = os.path.join(layerdestpath, 'conf', 'layer.conf')
- with open(layerconf, 'r') as f:
- origlines = f.readlines()
- with open(layerconf, 'w') as f:
- for line in origlines:
- if line.startswith('#'):
- continue
- line = line.replace('workspacelayer', workspace_newname)
- f.write(line)
-
- # meta-skeleton layer is added as part of the build system
- # but not as a layer included in the build, therefore it is
- # not reported to the function caller.
- for layer in layers_copied:
- if layer.endswith('/meta-skeleton'):
- layers_copied.remove(layer)
- break
-
- return layers_copied
-
-def generate_locked_sigs(sigfile, d):
- bb.utils.mkdirhier(os.path.dirname(sigfile))
- depd = d.getVar('BB_TASKDEPDATA', False)
- tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()]
- bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
-
-def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
- with open(lockedsigs, 'r') as infile:
- bb.utils.mkdirhier(os.path.dirname(pruned_output))
- with open(pruned_output, 'w') as f:
- invalue = False
- for line in infile:
- if invalue:
- if line.endswith('\\\n'):
- splitval = line.strip().split(':')
- if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
- f.write(line)
- else:
- f.write(line)
- invalue = False
- elif line.startswith('SIGGEN_LOCKEDSIGS'):
- invalue = True
- f.write(line)
-
-def merge_lockedsigs(copy_tasks, lockedsigs_main, lockedsigs_extra, merged_output, copy_output=None):
- merged = {}
- arch_order = []
- with open(lockedsigs_main, 'r') as f:
- invalue = None
- for line in f:
- if invalue:
- if line.endswith('\\\n'):
- merged[invalue].append(line)
- else:
- invalue = None
- elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
- invalue = line[18:].split('=', 1)[0].rstrip()
- merged[invalue] = []
- arch_order.append(invalue)
-
- with open(lockedsigs_extra, 'r') as f:
- invalue = None
- tocopy = {}
- for line in f:
- if invalue:
- if line.endswith('\\\n'):
- if not line in merged[invalue]:
- target, task = line.strip().split(':')[:2]
- if not copy_tasks or task in copy_tasks:
- tocopy[invalue].append(line)
- merged[invalue].append(line)
- else:
- invalue = None
- elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
- invalue = line[18:].split('=', 1)[0].rstrip()
- if not invalue in merged:
- merged[invalue] = []
- arch_order.append(invalue)
- tocopy[invalue] = []
-
- def write_sigs_file(fn, types, sigs):
- fulltypes = []
- bb.utils.mkdirhier(os.path.dirname(fn))
- with open(fn, 'w') as f:
- for typename in types:
- lines = sigs[typename]
- if lines:
- f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % typename)
- for line in lines:
- f.write(line)
- f.write(' "\n')
- fulltypes.append(typename)
- f.write('SIGGEN_LOCKEDSIGS_TYPES = "%s"\n' % ' '.join(fulltypes))
-
- if copy_output:
- write_sigs_file(copy_output, list(tocopy.keys()), tocopy)
- if merged_output:
- write_sigs_file(merged_output, arch_order, merged)
-
-def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring="", filterfile=None):
- import shutil
- bb.note('Generating sstate-cache...')
-
- nativelsbstring = d.getVar('NATIVELSBSTRING')
- bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
- if fixedlsbstring and nativelsbstring != fixedlsbstring:
- nativedir = output_sstate_cache + '/' + nativelsbstring
- if os.path.isdir(nativedir):
- destdir = os.path.join(output_sstate_cache, fixedlsbstring)
- for root, _, files in os.walk(nativedir):
- for fn in files:
- src = os.path.join(root, fn)
- dest = os.path.join(destdir, os.path.relpath(src, nativedir))
- if os.path.exists(dest):
- # Already exists, and it'll be the same file, so just delete it
- os.unlink(src)
- else:
- bb.utils.mkdirhier(os.path.dirname(dest))
- shutil.move(src, dest)
-
-def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, logfile=None):
- import subprocess
-
- bb.note('Generating sstate task list...')
-
- if not cwd:
- cwd = os.getcwd()
- if logfile:
- logparam = '-l %s' % logfile
- else:
- logparam = ''
- cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
- env = dict(d.getVar('BB_ORIGENV', False))
- env.pop('BUILDDIR', '')
- env.pop('BBPATH', '')
- pathitems = env['PATH'].split(':')
- env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
- bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')
diff --git a/import-layers/yocto-poky/meta/lib/oe/data.py b/import-layers/yocto-poky/meta/lib/oe/data.py
deleted file mode 100644
index b8901e63f..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/data.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import json
-import oe.maketype
-
-def typed_value(key, d):
- """Construct a value for the specified metadata variable, using its flags
- to determine the type and parameters for construction."""
- var_type = d.getVarFlag(key, 'type')
- flags = d.getVarFlags(key)
- if flags is not None:
- flags = dict((flag, d.expand(value))
- for flag, value in list(flags.items()))
- else:
- flags = {}
-
- try:
- return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
- except (TypeError, ValueError) as exc:
- bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
-
-def export2json(d, json_file, expand=True, searchString="",replaceString=""):
- data2export = {}
- keys2export = []
-
- for key in d.keys():
- if key.startswith("_"):
- continue
- elif key.startswith("BB"):
- continue
- elif key.startswith("B_pn"):
- continue
- elif key.startswith("do_"):
- continue
- elif d.getVarFlag(key, "func"):
- continue
-
- keys2export.append(key)
-
- for key in keys2export:
- try:
- data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
- except bb.data_smart.ExpansionError:
- data2export[key] = ''
- except AttributeError:
- pass
-
- with open(json_file, "w") as f:
- json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)
diff --git a/import-layers/yocto-poky/meta/lib/oe/distro_check.py b/import-layers/yocto-poky/meta/lib/oe/distro_check.py
deleted file mode 100644
index e775c3a6e..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/distro_check.py
+++ /dev/null
@@ -1,308 +0,0 @@
-def create_socket(url, d):
- import urllib
- from bb.utils import export_proxies
-
- export_proxies(d)
- return urllib.request.urlopen(url)
-
-def get_links_from_url(url, d):
- "Return all the href links found on the web location"
-
- from bs4 import BeautifulSoup, SoupStrainer
-
- soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
- hyperlinks = []
- for line in soup.find_all('a', href=True):
- hyperlinks.append(line['href'].strip('/'))
- return hyperlinks
-
-def find_latest_numeric_release(url, d):
- "Find the latest listed numeric release on the given url"
- max=0
- maxstr=""
- for link in get_links_from_url(url, d):
- try:
- # TODO use LooseVersion
- release = float(link)
- except:
- release = 0
- if release > max:
- max = release
- maxstr = link
- return maxstr
-
-def is_src_rpm(name):
- "Check if the link is pointing to a src.rpm file"
- return name.endswith(".src.rpm")
-
-def package_name_from_srpm(srpm):
- "Strip out the package name from the src.rpm filename"
-
- # ca-certificates-2016.2.7-1.0.fc24.src.rpm
- # ^name ^ver ^release^removed
- (name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
- return name
-
-def get_source_package_list_from_url(url, section, d):
- "Return a sectioned list of package names from a URL list"
-
- bb.note("Reading %s: %s" % (url, section))
- links = get_links_from_url(url, d)
- srpms = filter(is_src_rpm, links)
- names_list = map(package_name_from_srpm, srpms)
-
- new_pkgs = set()
- for pkgs in names_list:
- new_pkgs.add(pkgs + ":" + section)
- return new_pkgs
-
-def get_source_package_list_from_url_by_letter(url, section, d):
- import string
- from urllib.error import HTTPError
- packages = set()
- for letter in (string.ascii_lowercase + string.digits):
- # Not all subfolders may exist, so silently handle 404
- try:
- packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
- except HTTPError as e:
- if e.code != 404: raise
- return packages
-
-def get_latest_released_fedora_source_package_list(d):
- "Returns list of all the name os packages in the latest fedora distro"
- latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
- package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
- package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
- return latest, package_names
-
-def get_latest_released_opensuse_source_package_list(d):
- "Returns list of all the name os packages in the latest opensuse distro"
- latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
-
- package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
- package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
- return latest, package_names
-
-def get_latest_released_clear_source_package_list(d):
- latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
- package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
- return latest, package_names
-
-def find_latest_debian_release(url, d):
- "Find the latest listed debian release on the given url"
-
- releases = [link.replace("Debian", "")
- for link in get_links_from_url(url, d)
- if link.startswith("Debian")]
- releases.sort()
- try:
- return releases[-1]
- except:
- return "_NotFound_"
-
-def get_debian_style_source_package_list(url, section, d):
- "Return the list of package-names stored in the debian style Sources.gz file"
- import gzip
-
- package_names = set()
- for line in gzip.open(create_socket(url, d), mode="rt"):
- if line.startswith("Package:"):
- pkg = line.split(":", 1)[1].strip()
- package_names.add(pkg + ":" + section)
- return package_names
-
-def get_latest_released_debian_source_package_list(d):
- "Returns list of all the name of packages in the latest debian distro"
- latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
- url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
- package_names = get_debian_style_source_package_list(url, "main", d)
- url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
- package_names |= get_debian_style_source_package_list(url, "updates", d)
- return latest, package_names
-
-def find_latest_ubuntu_release(url, d):
- """
- Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
-
- To avoid matching development releases look for distributions that have
- updates, so the resulting distro could be any supported release.
- """
- url += "?C=M;O=D" # Descending Sort by Last Modified
- for link in get_links_from_url(url, d):
- if "-updates" in link:
- distro = link.replace("-updates", "")
- return distro
- return "_NotFound_"
-
-def get_latest_released_ubuntu_source_package_list(d):
- "Returns list of all the name os packages in the latest ubuntu distro"
- latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
- url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
- package_names = get_debian_style_source_package_list(url, "main", d)
- url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
- package_names |= get_debian_style_source_package_list(url, "updates", d)
- return latest, package_names
-
-def create_distro_packages_list(distro_check_dir, d):
- import shutil
-
- pkglst_dir = os.path.join(distro_check_dir, "package_lists")
- bb.utils.remove(pkglst_dir, True)
- bb.utils.mkdirhier(pkglst_dir)
-
- per_distro_functions = (
- ("Debian", get_latest_released_debian_source_package_list),
- ("Ubuntu", get_latest_released_ubuntu_source_package_list),
- ("Fedora", get_latest_released_fedora_source_package_list),
- ("openSUSE", get_latest_released_opensuse_source_package_list),
- ("Clear", get_latest_released_clear_source_package_list),
- )
-
- for name, fetcher_func in per_distro_functions:
- try:
- release, package_list = fetcher_func(d)
- except Exception as e:
- bb.warn("Cannot fetch packages for %s: %s" % (name, e))
- bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
- if len(package_list) == 0:
- bb.error("Didn't fetch any packages for %s %s" % (name, release))
-
- package_list_file = os.path.join(pkglst_dir, name + "-" + release)
- with open(package_list_file, 'w') as f:
- for pkg in sorted(package_list):
- f.write(pkg + "\n")
-
-def update_distro_data(distro_check_dir, datetime, d):
- """
- If distro packages list data is old then rebuild it.
- The operations has to be protected by a lock so that
- only one thread performes it at a time.
- """
- if not os.path.isdir (distro_check_dir):
- try:
- bb.note ("Making new directory: %s" % distro_check_dir)
- os.makedirs (distro_check_dir)
- except OSError:
- raise Exception('Unable to create directory %s' % (distro_check_dir))
-
-
- datetime_file = os.path.join(distro_check_dir, "build_datetime")
- saved_datetime = "_invalid_"
- import fcntl
- try:
- if not os.path.exists(datetime_file):
- open(datetime_file, 'w+').close() # touch the file so that the next open won't fail
-
- f = open(datetime_file, "r+")
- fcntl.lockf(f, fcntl.LOCK_EX)
- saved_datetime = f.read()
- if saved_datetime[0:8] != datetime[0:8]:
- bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
- bb.note("Regenerating distro package lists")
- create_distro_packages_list(distro_check_dir, d)
- f.seek(0)
- f.write(datetime)
-
- except OSError as e:
- raise Exception('Unable to open timestamp: %s' % e)
- finally:
- fcntl.lockf(f, fcntl.LOCK_UN)
- f.close()
-
-def compare_in_distro_packages_list(distro_check_dir, d):
- if not os.path.isdir(distro_check_dir):
- raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
-
- localdata = bb.data.createCopy(d)
- pkglst_dir = os.path.join(distro_check_dir, "package_lists")
- matching_distros = []
- pn = recipe_name = d.getVar('PN')
- bb.note("Checking: %s" % pn)
-
- if pn.find("-native") != -1:
- pnstripped = pn.split("-native")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
- recipe_name = pnstripped[0]
-
- if pn.startswith("nativesdk-"):
- pnstripped = pn.split("nativesdk-")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
- recipe_name = pnstripped[1]
-
- if pn.find("-cross") != -1:
- pnstripped = pn.split("-cross")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
- recipe_name = pnstripped[0]
-
- if pn.find("-initial") != -1:
- pnstripped = pn.split("-initial")
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
- recipe_name = pnstripped[0]
-
- bb.note("Recipe: %s" % recipe_name)
-
- distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
- tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
- for str in tmp.split():
- if str and str.find("=") == -1 and distro_exceptions[str]:
- matching_distros.append(str)
-
- distro_pn_aliases = {}
- for str in tmp.split():
- if "=" in str:
- (dist, pn_alias) = str.split('=')
- distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
-
- for file in os.listdir(pkglst_dir):
- (distro, distro_release) = file.split("-")
- f = open(os.path.join(pkglst_dir, file), "r")
- for line in f:
- (pkg, section) = line.split(":")
- if distro.lower() in distro_pn_aliases:
- pn = distro_pn_aliases[distro.lower()]
- else:
- pn = recipe_name
- if pn == pkg:
- matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
- f.close()
- break
- f.close()
-
- for item in tmp.split():
- matching_distros.append(item)
- bb.note("Matching: %s" % matching_distros)
- return matching_distros
-
-def create_log_file(d, logname):
- logpath = d.getVar('LOG_DIR')
- bb.utils.mkdirhier(logpath)
- logfn, logsuffix = os.path.splitext(logname)
- logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
- if not os.path.exists(logfile):
- slogfile = os.path.join(logpath, logname)
- if os.path.exists(slogfile):
- os.remove(slogfile)
- open(logfile, 'w+').close()
- os.symlink(logfile, slogfile)
- d.setVar('LOG_FILE', logfile)
- return logfile
-
-
-def save_distro_check_result(result, datetime, result_file, d):
- pn = d.getVar('PN')
- logdir = d.getVar('LOG_DIR')
- if not logdir:
- bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
- return
- bb.utils.mkdirhier(logdir)
-
- line = pn
- for i in result:
- line = line + "," + i
- f = open(result_file, "a")
- import fcntl
- fcntl.lockf(f, fcntl.LOCK_EX)
- f.seek(0, os.SEEK_END) # seek to the end of file
- f.write(line + "\n")
- fcntl.lockf(f, fcntl.LOCK_UN)
- f.close()
diff --git a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
deleted file mode 100644
index b17272928..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""Helper module for GPG signing"""
-import os
-
-import bb
-import oe.utils
-
-class LocalSigner(object):
- """Class for handling local (on the build host) signing"""
- def __init__(self, d):
- self.gpg_bin = d.getVar('GPG_BIN') or \
- bb.utils.which(os.getenv('PATH'), 'gpg')
- self.gpg_path = d.getVar('GPG_PATH')
- self.gpg_version = self.get_gpg_version()
- self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
- self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
-
- def export_pubkey(self, output_file, keyid, armor=True):
- """Export GPG public key to a file"""
- cmd = '%s --no-permission-warning --batch --yes --export -o %s ' % \
- (self.gpg_bin, output_file)
- if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
- if armor:
- cmd += "--armor "
- cmd += keyid
- status, output = oe.utils.getstatusoutput(cmd)
- if status:
- raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
- (keyid, output))
-
- def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
- """Sign RPM files"""
-
- cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
- gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
- if self.gpg_version > (2,1,):
- gpg_args += ' --pinentry-mode=loopback'
- cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
- cmd += "--define '_binary_filedigest_algorithm %s' " % digest
- if self.gpg_bin:
- cmd += "--define '__gpg %s' " % self.gpg_bin
- if self.gpg_path:
- cmd += "--define '_gpg_path %s' " % self.gpg_path
- if fsk:
- cmd += "--signfiles --fskpath %s " % fsk
- if fsk_password:
- cmd += "--define '_file_signing_key_password %s' " % fsk_password
-
- # Sign in chunks
- for i in range(0, len(files), sign_chunk):
- status, output = oe.utils.getstatusoutput(cmd + ' '.join(files[i:i+sign_chunk]))
- if status:
- raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
-
- def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
- """Create a detached signature of a file"""
- import subprocess
-
- if passphrase_file and passphrase:
- raise Exception("You should use either passphrase_file of passphrase, not both")
-
- cmd = [self.gpg_bin, '--detach-sign', '--no-permission-warning', '--batch',
- '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
-
- if self.gpg_path:
- cmd += ['--homedir', self.gpg_path]
- if armor:
- cmd += ['--armor']
-
- #gpg > 2.1 supports password pipes only through the loopback interface
- #gpg < 2.1 errors out if given unknown parameters
- if self.gpg_version > (2,1,):
- cmd += ['--pinentry-mode', 'loopback']
-
- if self.gpg_agent_bin:
- cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
-
- cmd += [input_file]
-
- try:
- if passphrase_file:
- with open(passphrase_file) as fobj:
- passphrase = fobj.readline();
-
- job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
- (_, stderr) = job.communicate(passphrase.encode("utf-8"))
-
- if job.returncode:
- raise bb.build.FuncFailed("GPG exited with code %d: %s" %
- (job.returncode, stderr.decode("utf-8")))
-
- except IOError as e:
- bb.error("IO error (%s): %s" % (e.errno, e.strerror))
- raise Exception("Failed to sign '%s'" % input_file)
-
- except OSError as e:
- bb.error("OS error (%s): %s" % (e.errno, e.strerror))
- raise Exception("Failed to sign '%s" % input_file)
-
-
- def get_gpg_version(self):
- """Return the gpg version as a tuple of ints"""
- import subprocess
- try:
- ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8")
- return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
- except subprocess.CalledProcessError as e:
- raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
-
-
- def verify(self, sig_file):
- """Verify signature"""
- cmd = self.gpg_bin + " --verify --no-permission-warning "
- if self.gpg_path:
- cmd += "--homedir %s " % self.gpg_path
- cmd += sig_file
- status, _ = oe.utils.getstatusoutput(cmd)
- ret = False if status else True
- return ret
-
-
-def get_signer(d, backend):
- """Get signer object for the specified backend"""
- # Use local signing by default
- if backend == 'local':
- return LocalSigner(d)
- else:
- bb.fatal("Unsupported signing backend '%s'" % backend)
diff --git a/import-layers/yocto-poky/meta/lib/oe/license.py b/import-layers/yocto-poky/meta/lib/oe/license.py
deleted file mode 100644
index ca385d518..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/license.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# vi:sts=4:sw=4:et
-"""Code for parsing OpenEmbedded license strings"""
-
-import ast
-import re
-from fnmatch import fnmatchcase as fnmatch
-
-def license_ok(license, dont_want_licenses):
- """ Return False if License exist in dont_want_licenses else True """
- for dwl in dont_want_licenses:
- # If you want to exclude license named generically 'X', we
- # surely want to exclude 'X+' as well. In consequence, we
- # will exclude a trailing '+' character from LICENSE in
- # case INCOMPATIBLE_LICENSE is not a 'X+' license.
- lic = license
- if not re.search('\+$', dwl):
- lic = re.sub('\+', '', license)
- if fnmatch(lic, dwl):
- return False
- return True
-
-class LicenseError(Exception):
- pass
-
-class LicenseSyntaxError(LicenseError):
- def __init__(self, licensestr, exc):
- self.licensestr = licensestr
- self.exc = exc
- LicenseError.__init__(self)
-
- def __str__(self):
- return "error in '%s': %s" % (self.licensestr, self.exc)
-
-class InvalidLicense(LicenseError):
- def __init__(self, license):
- self.license = license
- LicenseError.__init__(self)
-
- def __str__(self):
- return "invalid characters in license '%s'" % self.license
-
-license_operator_chars = '&|() '
-license_operator = re.compile('([' + license_operator_chars + '])')
-license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
-
-class LicenseVisitor(ast.NodeVisitor):
- """Get elements based on OpenEmbedded license strings"""
- def get_elements(self, licensestr):
- new_elements = []
- elements = list([x for x in license_operator.split(licensestr) if x.strip()])
- for pos, element in enumerate(elements):
- if license_pattern.match(element):
- if pos > 0 and license_pattern.match(elements[pos-1]):
- new_elements.append('&')
- element = '"' + element + '"'
- elif not license_operator.match(element):
- raise InvalidLicense(element)
- new_elements.append(element)
-
- return new_elements
-
- """Syntax tree visitor which can accept elements previously generated with
- OpenEmbedded license string"""
- def visit_elements(self, elements):
- self.visit(ast.parse(' '.join(elements)))
-
- """Syntax tree visitor which can accept OpenEmbedded license strings"""
- def visit_string(self, licensestr):
- self.visit_elements(self.get_elements(licensestr))
-
-class FlattenVisitor(LicenseVisitor):
- """Flatten a license tree (parsed from a string) by selecting one of each
- set of OR options, in the way the user specifies"""
- def __init__(self, choose_licenses):
- self.choose_licenses = choose_licenses
- self.licenses = []
- LicenseVisitor.__init__(self)
-
- def visit_Str(self, node):
- self.licenses.append(node.s)
-
- def visit_BinOp(self, node):
- if isinstance(node.op, ast.BitOr):
- left = FlattenVisitor(self.choose_licenses)
- left.visit(node.left)
-
- right = FlattenVisitor(self.choose_licenses)
- right.visit(node.right)
-
- selected = self.choose_licenses(left.licenses, right.licenses)
- self.licenses.extend(selected)
- else:
- self.generic_visit(node)
-
-def flattened_licenses(licensestr, choose_licenses):
- """Given a license string and choose_licenses function, return a flat list of licenses"""
- flatten = FlattenVisitor(choose_licenses)
- try:
- flatten.visit_string(licensestr)
- except SyntaxError as exc:
- raise LicenseSyntaxError(licensestr, exc)
- return flatten.licenses
-
-def is_included(licensestr, whitelist=None, blacklist=None):
- """Given a license string and whitelist and blacklist, determine if the
- license string matches the whitelist and does not match the blacklist.
-
- Returns a tuple holding the boolean state and a list of the applicable
- licenses that were excluded if state is False, or the licenses that were
- included if the state is True.
- """
-
- def include_license(license):
- return any(fnmatch(license, pattern) for pattern in whitelist)
-
- def exclude_license(license):
- return any(fnmatch(license, pattern) for pattern in blacklist)
-
- def choose_licenses(alpha, beta):
- """Select the option in an OR which is the 'best' (has the most
- included licenses and no excluded licenses)."""
- # The factor 1000 below is arbitrary, just expected to be much larger
- # that the number of licenses actually specified. That way the weight
- # will be negative if the list of licenses contains an excluded license,
- # but still gives a higher weight to the list with the most included
- # licenses.
- alpha_weight = (len(list(filter(include_license, alpha))) -
- 1000 * (len(list(filter(exclude_license, alpha))) > 0))
- beta_weight = (len(list(filter(include_license, beta))) -
- 1000 * (len(list(filter(exclude_license, beta))) > 0))
- if alpha_weight >= beta_weight:
- return alpha
- else:
- return beta
-
- if not whitelist:
- whitelist = ['*']
-
- if not blacklist:
- blacklist = []
-
- licenses = flattened_licenses(licensestr, choose_licenses)
- excluded = [lic for lic in licenses if exclude_license(lic)]
- included = [lic for lic in licenses if include_license(lic)]
- if excluded:
- return False, excluded
- else:
- return True, included
-
-class ManifestVisitor(LicenseVisitor):
- """Walk license tree (parsed from a string) removing the incompatible
- licenses specified"""
- def __init__(self, dont_want_licenses, canonical_license, d):
- self._dont_want_licenses = dont_want_licenses
- self._canonical_license = canonical_license
- self._d = d
- self._operators = []
-
- self.licenses = []
- self.licensestr = ''
-
- LicenseVisitor.__init__(self)
-
- def visit(self, node):
- if isinstance(node, ast.Str):
- lic = node.s
-
- if license_ok(self._canonical_license(self._d, lic),
- self._dont_want_licenses) == True:
- if self._operators:
- ops = []
- for op in self._operators:
- if op == '[':
- ops.append(op)
- elif op == ']':
- ops.append(op)
- else:
- if not ops:
- ops.append(op)
- elif ops[-1] in ['[', ']']:
- ops.append(op)
- else:
- ops[-1] = op
-
- for op in ops:
- if op == '[' or op == ']':
- self.licensestr += op
- elif self.licenses:
- self.licensestr += ' ' + op + ' '
-
- self._operators = []
-
- self.licensestr += lic
- self.licenses.append(lic)
- elif isinstance(node, ast.BitAnd):
- self._operators.append("&")
- elif isinstance(node, ast.BitOr):
- self._operators.append("|")
- elif isinstance(node, ast.List):
- self._operators.append("[")
- elif isinstance(node, ast.Load):
- self.licensestr += "]"
-
- self.generic_visit(node)
-
-def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d):
- """Given a license string and dont_want_licenses list,
- return license string filtered and a list of licenses"""
- manifest = ManifestVisitor(dont_want_licenses, canonical_license, d)
-
- try:
- elements = manifest.get_elements(licensestr)
-
- # Replace '()' to '[]' for handle in ast as List and Load types.
- elements = ['[' if e == '(' else e for e in elements]
- elements = [']' if e == ')' else e for e in elements]
-
- manifest.visit_elements(elements)
- except SyntaxError as exc:
- raise LicenseSyntaxError(licensestr, exc)
-
- # Replace '[]' to '()' for output correct license.
- manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')')
-
- return (manifest.licensestr, manifest.licenses)
-
-class ListVisitor(LicenseVisitor):
- """Record all different licenses found in the license string"""
- def __init__(self):
- self.licenses = set()
-
- def visit_Str(self, node):
- self.licenses.add(node.s)
-
-def list_licenses(licensestr):
- """Simply get a list of all licenses mentioned in a license string.
- Binary operators are not applied or taken into account in any way"""
- visitor = ListVisitor()
- try:
- visitor.visit_string(licensestr)
- except SyntaxError as exc:
- raise LicenseSyntaxError(licensestr, exc)
- return visitor.licenses
diff --git a/import-layers/yocto-poky/meta/lib/oe/lsb.py b/import-layers/yocto-poky/meta/lib/oe/lsb.py
deleted file mode 100644
index 71c0992c5..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/lsb.py
+++ /dev/null
@@ -1,117 +0,0 @@
-def get_os_release():
- """Get all key-value pairs from /etc/os-release as a dict"""
- from collections import OrderedDict
-
- data = OrderedDict()
- if os.path.exists('/etc/os-release'):
- with open('/etc/os-release') as f:
- for line in f:
- try:
- key, val = line.rstrip().split('=', 1)
- except ValueError:
- continue
- data[key.strip()] = val.strip('"')
- return data
-
-def release_dict_osr():
- """ Populate a dict with pertinent values from /etc/os-release """
- data = {}
- os_release = get_os_release()
- if 'ID' in os_release:
- data['DISTRIB_ID'] = os_release['ID']
- if 'VERSION_ID' in os_release:
- data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
-
- return data
-
-def release_dict_lsb():
- """ Return the output of lsb_release -ir as a dictionary """
- from subprocess import PIPE
-
- try:
- output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
- except bb.process.CmdError as exc:
- return {}
-
- lsb_map = { 'Distributor ID': 'DISTRIB_ID',
- 'Release': 'DISTRIB_RELEASE'}
- lsb_keys = lsb_map.keys()
-
- data = {}
- for line in output.splitlines():
- if line.startswith("-e"):
- line = line[3:]
- try:
- key, value = line.split(":\t", 1)
- except ValueError:
- continue
- if key in lsb_keys:
- data[lsb_map[key]] = value
-
- if len(data.keys()) != 2:
- return None
-
- return data
-
-def release_dict_file():
- """ Try to gather release information manually when other methods fail """
- data = {}
- try:
- if os.path.exists('/etc/lsb-release'):
- data = {}
- with open('/etc/lsb-release') as f:
- for line in f:
- key, value = line.split("=", 1)
- data[key] = value.strip()
- elif os.path.exists('/etc/redhat-release'):
- data = {}
- with open('/etc/redhat-release') as f:
- distro = f.readline().strip()
- import re
- match = re.match(r'(.*) release (.*) \((.*)\)', distro)
- if match:
- data['DISTRIB_ID'] = match.group(1)
- data['DISTRIB_RELEASE'] = match.group(2)
- elif os.path.exists('/etc/SuSE-release'):
- data = {}
- data['DISTRIB_ID'] = 'SUSE LINUX'
- with open('/etc/SuSE-release') as f:
- for line in f:
- if line.startswith('VERSION = '):
- data['DISTRIB_RELEASE'] = line[10:].rstrip()
- break
-
- except IOError:
- return {}
- return data
-
-def distro_identifier(adjust_hook=None):
- """Return a distro identifier string based upon lsb_release -ri,
- with optional adjustment via a hook"""
-
- import re
-
- # Try /etc/os-release first, then the output of `lsb_release -ir` and
- # finally fall back on parsing various release files in order to determine
- # host distro name and version.
- distro_data = release_dict_osr()
- if not distro_data:
- distro_data = release_dict_lsb()
- if not distro_data:
- distro_data = release_dict_file()
-
- distro_id = distro_data.get('DISTRIB_ID', '')
- release = distro_data.get('DISTRIB_RELEASE', '')
-
- if adjust_hook:
- distro_id, release = adjust_hook(distro_id, release)
- if not distro_id:
- return "Unknown"
- # Filter out any non-alphanumerics
- distro_id = re.sub(r'\W', '', distro_id)
-
- if release:
- id_str = '{0}-{1}'.format(distro_id.lower(), release)
- else:
- id_str = distro_id
- return id_str.replace(' ','-').replace('/','-')
diff --git a/import-layers/yocto-poky/meta/lib/oe/maketype.py b/import-layers/yocto-poky/meta/lib/oe/maketype.py
deleted file mode 100644
index f88981dd9..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/maketype.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""OpenEmbedded variable typing support
-
-Types are defined in the metadata by name, using the 'type' flag on a
-variable. Other flags may be utilized in the construction of the types. See
-the arguments of the type's factory for details.
-"""
-
-import inspect
-import oe.types as types
-import collections
-
-available_types = {}
-
-class MissingFlag(TypeError):
- """A particular flag is required to construct the type, but has not been
- provided."""
- def __init__(self, flag, type):
- self.flag = flag
- self.type = type
- TypeError.__init__(self)
-
- def __str__(self):
- return "Type '%s' requires flag '%s'" % (self.type, self.flag)
-
-def factory(var_type):
- """Return the factory for a specified type."""
- if var_type is None:
- raise TypeError("No type specified. Valid types: %s" %
- ', '.join(available_types))
- try:
- return available_types[var_type]
- except KeyError:
- raise TypeError("Invalid type '%s':\n Valid types: %s" %
- (var_type, ', '.join(available_types)))
-
-def create(value, var_type, **flags):
- """Create an object of the specified type, given the specified flags and
- string value."""
- obj = factory(var_type)
- objflags = {}
- for flag in obj.flags:
- if flag not in flags:
- if flag not in obj.optflags:
- raise MissingFlag(flag, var_type)
- else:
- objflags[flag] = flags[flag]
-
- return obj(value, **objflags)
-
-def get_callable_args(obj):
- """Grab all but the first argument of the specified callable, returning
- the list, as well as a list of which of the arguments have default
- values."""
- if type(obj) is type:
- obj = obj.__init__
-
- sig = inspect.signature(obj)
- args = list(sig.parameters.keys())
- defaults = list(s for s in sig.parameters.keys() if sig.parameters[s].default != inspect.Parameter.empty)
- flaglist = []
- if args:
- if len(args) > 1 and args[0] == 'self':
- args = args[1:]
- flaglist.extend(args)
-
- optional = set()
- if defaults:
- optional |= set(flaglist[-len(defaults):])
- return flaglist, optional
-
-def factory_setup(name, obj):
- """Prepare a factory for use."""
- args, optional = get_callable_args(obj)
- extra_args = args[1:]
- if extra_args:
- obj.flags, optional = extra_args, optional
- obj.optflags = set(optional)
- else:
- obj.flags = obj.optflags = ()
-
- if not hasattr(obj, 'name'):
- obj.name = name
-
-def register(name, factory):
- """Register a type, given its name and a factory callable.
-
- Determines the required and optional flags from the factory's
- arguments."""
- factory_setup(name, factory)
- available_types[factory.name] = factory
-
-
-# Register all our included types
-for name in dir(types):
- if name.startswith('_'):
- continue
-
- obj = getattr(types, name)
- if not isinstance(obj, collections.Callable):
- continue
-
- register(name, obj)
diff --git a/import-layers/yocto-poky/meta/lib/oe/manifest.py b/import-layers/yocto-poky/meta/lib/oe/manifest.py
deleted file mode 100644
index 674303c86..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/manifest.py
+++ /dev/null
@@ -1,344 +0,0 @@
-from abc import ABCMeta, abstractmethod
-import os
-import re
-import bb
-
-
-class Manifest(object, metaclass=ABCMeta):
- """
- This is an abstract class. Do not instantiate this directly.
- """
-
- PKG_TYPE_MUST_INSTALL = "mip"
- PKG_TYPE_MULTILIB = "mlp"
- PKG_TYPE_LANGUAGE = "lgp"
- PKG_TYPE_ATTEMPT_ONLY = "aop"
-
- MANIFEST_TYPE_IMAGE = "image"
- MANIFEST_TYPE_SDK_HOST = "sdk_host"
- MANIFEST_TYPE_SDK_TARGET = "sdk_target"
-
- var_maps = {
- MANIFEST_TYPE_IMAGE: {
- "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
- "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
- "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
- },
- MANIFEST_TYPE_SDK_HOST: {
- "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
- "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
- },
- MANIFEST_TYPE_SDK_TARGET: {
- "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
- "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
- }
- }
-
- INSTALL_ORDER = [
- PKG_TYPE_LANGUAGE,
- PKG_TYPE_MUST_INSTALL,
- PKG_TYPE_ATTEMPT_ONLY,
- PKG_TYPE_MULTILIB
- ]
-
- initial_manifest_file_header = \
- "# This file was generated automatically and contains the packages\n" \
- "# passed on to the package manager in order to create the rootfs.\n\n" \
- "# Format:\n" \
- "# <package_type>,<package_name>\n" \
- "# where:\n" \
- "# <package_type> can be:\n" \
- "# 'mip' = must install package\n" \
- "# 'aop' = attempt only package\n" \
- "# 'mlp' = multilib package\n" \
- "# 'lgp' = language package\n\n"
-
- def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
- self.d = d
- self.manifest_type = manifest_type
-
- if manifest_dir is None:
- if manifest_type != self.MANIFEST_TYPE_IMAGE:
- self.manifest_dir = self.d.getVar('SDK_DIR')
- else:
- self.manifest_dir = self.d.getVar('WORKDIR')
- else:
- self.manifest_dir = manifest_dir
-
- bb.utils.mkdirhier(self.manifest_dir)
-
- self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
- self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
- self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
-
- # packages in the following vars will be split in 'must install' and
- # 'multilib'
- self.vars_to_split = ["PACKAGE_INSTALL",
- "TOOLCHAIN_HOST_TASK",
- "TOOLCHAIN_TARGET_TASK"]
-
- """
- This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
- This will be used for testing until the class is implemented properly!
- """
- def _create_dummy_initial(self):
- image_rootfs = self.d.getVar('IMAGE_ROOTFS')
- pkg_list = dict()
- if image_rootfs.find("core-image-sato-sdk") > 0:
- pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
- "packagegroup-core-x11-sato-games packagegroup-base-extended " \
- "packagegroup-core-x11-sato packagegroup-core-x11-base " \
- "packagegroup-core-sdk packagegroup-core-tools-debug " \
- "packagegroup-core-boot packagegroup-core-tools-testapps " \
- "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
- "apt packagegroup-core-tools-profile psplash " \
- "packagegroup-core-standalone-sdk-target " \
- "packagegroup-core-ssh-openssh dpkg kernel-dev"
- pkg_list[self.PKG_TYPE_LANGUAGE] = \
- "locale-base-en-us locale-base-en-gb"
- elif image_rootfs.find("core-image-sato") > 0:
- pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
- "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
- "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
- "packagegroup-core-x11-sato packagegroup-core-boot"
- pkg_list['lgp'] = \
- "locale-base-en-us locale-base-en-gb"
- elif image_rootfs.find("core-image-minimal") > 0:
- pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for pkg_type in pkg_list:
- for pkg in pkg_list[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- """
- This will create the initial manifest which will be used by Rootfs class to
- generate the rootfs
- """
- @abstractmethod
- def create_initial(self):
- pass
-
- """
- This creates the manifest after everything has been installed.
- """
- @abstractmethod
- def create_final(self):
- pass
-
- """
- This creates the manifest after the package in initial manifest has been
- dummy installed. It lists all *to be installed* packages. There is no real
- installation, just a test.
- """
- @abstractmethod
- def create_full(self, pm):
- pass
-
- """
- The following function parses an initial manifest and returns a dictionary
- object with the must install, attempt only, multilib and language packages.
- """
- def parse_initial_manifest(self):
- pkgs = dict()
-
- with open(self.initial_manifest) as manifest:
- for line in manifest.read().split('\n'):
- comment = re.match("^#.*", line)
- pattern = "^(%s|%s|%s|%s),(.*)$" % \
- (self.PKG_TYPE_MUST_INSTALL,
- self.PKG_TYPE_ATTEMPT_ONLY,
- self.PKG_TYPE_MULTILIB,
- self.PKG_TYPE_LANGUAGE)
- pkg = re.match(pattern, line)
-
- if comment is not None:
- continue
-
- if pkg is not None:
- pkg_type = pkg.group(1)
- pkg_name = pkg.group(2)
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = [pkg_name]
- else:
- pkgs[pkg_type].append(pkg_name)
-
- return pkgs
-
- '''
- This following function parses a full manifest and return a list
- object with packages.
- '''
- def parse_full_manifest(self):
- installed_pkgs = list()
- if not os.path.exists(self.full_manifest):
- bb.note('full manifest not exist')
- return installed_pkgs
-
- with open(self.full_manifest, 'r') as manifest:
- for pkg in manifest.read().split('\n'):
- installed_pkgs.append(pkg.strip())
-
- return installed_pkgs
-
-
-class RpmManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
-
-class OpkgManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in sorted(pkgs):
- for pkg in sorted(pkgs[pkg_type].split()):
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- if not os.path.exists(self.initial_manifest):
- self.create_initial()
-
- initial_manifest = self.parse_initial_manifest()
- pkgs_to_install = list()
- for pkg_type in initial_manifest:
- pkgs_to_install += initial_manifest[pkg_type]
- if len(pkgs_to_install) == 0:
- return
-
- output = pm.dummy_install(pkgs_to_install)
-
- with open(self.full_manifest, 'w+') as manifest:
- pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
- for line in set(output.split('\n')):
- m = pkg_re.match(line)
- if m:
- manifest.write(m.group(1) + '\n')
-
- return
-
-
-class DpkgManifest(Manifest):
- def create_initial(self):
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- pkg_list = self.d.getVar(var)
-
- if pkg_list is None:
- continue
-
- for pkg in pkg_list.split():
- manifest.write("%s,%s\n" %
- (self.var_maps[self.manifest_type][var], pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
-
-def create_manifest(d, final_manifest=False, manifest_dir=None,
- manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
- manifest_map = {'rpm': RpmManifest,
- 'ipk': OpkgManifest,
- 'deb': DpkgManifest}
-
- manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
-
- if final_manifest:
- manifest.create_final()
- else:
- manifest.create_initial()
-
-
-if __name__ == "__main__":
- pass
diff --git a/import-layers/yocto-poky/meta/lib/oe/package.py b/import-layers/yocto-poky/meta/lib/oe/package.py
deleted file mode 100644
index 4f3e21ad4..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/package.py
+++ /dev/null
@@ -1,294 +0,0 @@
-def runstrip(arg):
- # Function to strip a single file, called from split_and_strip_files below
- # A working 'file' (one which works on the target architecture)
- #
- # The elftype is a bit pattern (explained in split_and_strip_files) to tell
- # us what type of file we're processing...
- # 4 - executable
- # 8 - shared library
- # 16 - kernel module
-
- import stat, subprocess
-
- (file, elftype, strip) = arg
-
- newmode = None
- if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
- origmode = os.stat(file)[stat.ST_MODE]
- newmode = origmode | stat.S_IWRITE | stat.S_IREAD
- os.chmod(file, newmode)
-
- stripcmd = [strip]
-
- # kernel module
- if elftype & 16:
- stripcmd.extend(["--strip-debug", "--remove-section=.comment",
- "--remove-section=.note", "--preserve-dates"])
- # .so and shared library
- elif ".so" in file and elftype & 8:
- stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
- # shared or executable:
- elif elftype & 8 or elftype & 4:
- stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
-
- stripcmd.append(file)
- bb.debug(1, "runstrip: %s" % stripcmd)
-
- try:
- output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
-
- if newmode:
- os.chmod(file, origmode)
-
- return
-
-
-def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=False):
- """
- Strip executable code (like executables, shared libraries) _in_place_
- - Based on sysroot_strip in staging.bbclass
- :param dstdir: directory in which to strip files
- :param strip_cmd: Strip command (usually ${STRIP})
- :param libdir: ${libdir} - strip .so files in this directory
- :param base_libdir: ${base_libdir} - strip .so files in this directory
- :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
- This is for proper logging and messages only.
- """
- import stat, errno, oe.path, oe.utils, mmap
-
- # Detect .ko module by searching for "vermagic=" string
- def is_kernel_module(path):
- with open(path) as f:
- return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
-
- # Return type (bits):
- # 0 - not elf
- # 1 - ELF
- # 2 - stripped
- # 4 - executable
- # 8 - shared library
- # 16 - kernel module
- def is_elf(path):
- exec_type = 0
- ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
-
- if ret:
- bb.error("split_and_strip_files: 'file %s' failed" % path)
- return exec_type
-
- if "ELF" in result:
- exec_type |= 1
- if "not stripped" not in result:
- exec_type |= 2
- if "executable" in result:
- exec_type |= 4
- if "shared" in result:
- exec_type |= 8
- if "relocatable" in result and is_kernel_module(path):
- exec_type |= 16
- return exec_type
-
- elffiles = {}
- inodes = {}
- libdir = os.path.abspath(dstdir + os.sep + libdir)
- base_libdir = os.path.abspath(dstdir + os.sep + base_libdir)
- exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
- #
- # First lets figure out all of the files we may have to process
- #
- for root, dirs, files in os.walk(dstdir):
- for f in files:
- file = os.path.join(root, f)
-
- try:
- ltarget = oe.path.realpath(file, dstdir, False)
- s = os.lstat(ltarget)
- except OSError as e:
- (err, strerror) = e.args
- if err != errno.ENOENT:
- raise
- # Skip broken symlinks
- continue
- if not s:
- continue
- # Check its an excutable
- if s[stat.ST_MODE] & exec_mask \
- or ((file.startswith(libdir) or file.startswith(base_libdir)) and ".so" in f) \
- or file.endswith('.ko'):
- # If it's a symlink, and points to an ELF file, we capture the readlink target
- if os.path.islink(file):
- continue
-
- # It's a file (or hardlink), not a link
- # ...but is it ELF, and is it already stripped?
- elf_file = is_elf(file)
- if elf_file & 1:
- if elf_file & 2:
- if qa_already_stripped:
- bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dstdir):], pn))
- else:
- bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
- continue
-
- if s.st_ino in inodes:
- os.unlink(file)
- os.link(inodes[s.st_ino], file)
- else:
- # break hardlinks so that we do not strip the original.
- inodes[s.st_ino] = file
- bb.utils.copyfile(file, file)
- elffiles[file] = elf_file
-
- #
- # Now strip them (in parallel)
- #
- sfiles = []
- for file in elffiles:
- elf_file = int(elffiles[file])
- sfiles.append((file, elf_file, strip_cmd))
-
- oe.utils.multiprocess_exec(sfiles, runstrip)
-
-
-
-def file_translate(file):
- ft = file.replace("@", "@at@")
- ft = ft.replace(" ", "@space@")
- ft = ft.replace("\t", "@tab@")
- ft = ft.replace("[", "@openbrace@")
- ft = ft.replace("]", "@closebrace@")
- ft = ft.replace("_", "@underscore@")
- return ft
-
-def filedeprunner(arg):
- import re, subprocess, shlex
-
- (pkg, pkgfiles, rpmdeps, pkgdest) = arg
- provides = {}
- requires = {}
-
- file_re = re.compile(r'\s+\d+\s(.*)')
- dep_re = re.compile(r'\s+(\S)\s+(.*)')
- r = re.compile(r'[<>=]+\s+\S*')
-
- def process_deps(pipe, pkg, pkgdest, provides, requires):
- file = None
- for line in pipe.split("\n"):
-
- m = file_re.match(line)
- if m:
- file = m.group(1)
- file = file.replace(pkgdest + "/" + pkg, "")
- file = file_translate(file)
- continue
-
- m = dep_re.match(line)
- if not m or not file:
- continue
-
- type, dep = m.groups()
-
- if type == 'R':
- i = requires
- elif type == 'P':
- i = provides
- else:
- continue
-
- if dep.startswith("python("):
- continue
-
- # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These
- # are typically used conditionally from the Perl code, but are
- # generated as unconditional dependencies.
- if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'):
- continue
-
- # Ignore perl dependencies on .pl files.
- if dep.startswith('perl(') and dep.endswith('.pl)'):
- continue
-
- # Remove perl versions and perl module versions since they typically
- # do not make sense when used as package versions.
- if dep.startswith('perl') and r.search(dep):
- dep = dep.split()[0]
-
- # Put parentheses around any version specifications.
- dep = r.sub(r'(\g<0>)',dep)
-
- if file not in i:
- i[file] = []
- i[file].append(dep)
-
- return provides, requires
-
- output = subprocess.check_output(shlex.split(rpmdeps) + pkgfiles, stderr=subprocess.STDOUT).decode("utf-8")
- provides, requires = process_deps(output, pkg, pkgdest, provides, requires)
-
- return (pkg, provides, requires)
-
-
-def read_shlib_providers(d):
- import re
-
- shlib_provider = {}
- shlibs_dirs = d.getVar('SHLIBSDIRS').split()
- list_re = re.compile('^(.*)\.list$')
- # Go from least to most specific since the last one found wins
- for dir in reversed(shlibs_dirs):
- bb.debug(2, "Reading shlib providers in %s" % (dir))
- if not os.path.exists(dir):
- continue
- for file in os.listdir(dir):
- m = list_re.match(file)
- if m:
- dep_pkg = m.group(1)
- try:
- fd = open(os.path.join(dir, file))
- except IOError:
- # During a build unrelated shlib files may be deleted, so
- # handle files disappearing between the listdirs and open.
- continue
- lines = fd.readlines()
- fd.close()
- for l in lines:
- s = l.strip().split(":")
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
- return shlib_provider
-
-
-def npm_split_package_dirs(pkgdir):
- """
- Work out the packages fetched and unpacked by BitBake's npm fetcher
- Returns a dict of packagename -> (relpath, package.json) ordered
- such that it is suitable for use in PACKAGES and FILES
- """
- from collections import OrderedDict
- import json
- packages = {}
- for root, dirs, files in os.walk(pkgdir):
- if os.path.basename(root) == 'node_modules':
- for dn in dirs:
- relpth = os.path.relpath(os.path.join(root, dn), pkgdir)
- pkgitems = ['${PN}']
- for pathitem in relpth.split('/'):
- if pathitem == 'node_modules':
- continue
- pkgitems.append(pathitem)
- pkgname = '-'.join(pkgitems).replace('_', '-')
- pkgname = pkgname.replace('@', '')
- pkgfile = os.path.join(root, dn, 'package.json')
- data = None
- if os.path.exists(pkgfile):
- with open(pkgfile, 'r') as f:
- data = json.loads(f.read())
- packages[pkgname] = (relpth, data)
- # We want the main package for a module sorted *after* its subpackages
- # (so that it doesn't otherwise steal the files for the subpackage), so
- # this is a cheap way to do that whilst still having an otherwise
- # alphabetical sort
- return OrderedDict((key, packages[key]) for key in sorted(packages, key=lambda pkg: pkg + '~'))
diff --git a/import-layers/yocto-poky/meta/lib/oe/package_manager.py b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
deleted file mode 100644
index 2d8aeba03..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/package_manager.py
+++ /dev/null
@@ -1,1787 +0,0 @@
-from abc import ABCMeta, abstractmethod
-import os
-import glob
-import subprocess
-import shutil
-import multiprocessing
-import re
-import collections
-import bb
-import tempfile
-import oe.utils
-import oe.path
-import string
-from oe.gpg_sign import get_signer
-import hashlib
-
-# this can be used by all PM backends to create the index files in parallel
-def create_index(arg):
- index_cmd = arg
-
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- if result:
- bb.note(result)
-
-def opkg_query(cmd_output):
- """
- This method parse the output from the package managerand return
- a dictionary with the information of the packages. This is used
- when the packages are in deb or ipk format.
- """
- verregex = re.compile(' \([=<>]* [^ )]*\)')
- output = dict()
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- pkgarch = ""
- for line in cmd_output.splitlines():
- line = line.rstrip()
- if ':' in line:
- if line.startswith("Package: "):
- pkg = line.split(": ")[1]
- elif line.startswith("Architecture: "):
- arch = line.split(": ")[1]
- elif line.startswith("Version: "):
- ver = line.split(": ")[1]
- elif line.startswith("File: ") or line.startswith("Filename:"):
- filename = line.split(": ")[1]
- if "/" in filename:
- filename = os.path.basename(filename)
- elif line.startswith("Depends: "):
- depends = verregex.sub('', line.split(": ")[1])
- for depend in depends.split(", "):
- dep.append(depend)
- elif line.startswith("Recommends: "):
- recommends = verregex.sub('', line.split(": ")[1])
- for recommend in recommends.split(", "):
- dep.append("%s [REC]" % recommend)
- elif line.startswith("PackageArch: "):
- pkgarch = line.split(": ")[1]
-
- # When there is a blank line save the package information
- elif not line:
- # IPK doesn't include the filename
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- if pkg:
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep, "pkgarch":pkgarch }
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- pkgarch = ""
-
- if pkg:
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep }
-
- return output
-
-# Note: this should be bb.fatal in the future.
-def failed_postinsts_warn(pkgs, log_path):
- bb.warn("""Intentionally failing postinstall scriptlets of %s to defer them to first boot is deprecated. Please place them into pkg_postinst_ontarget_${PN} ().
-If deferring to first boot wasn't the intent, then scriptlet failure may mean an issue in the recipe, or a regression elsewhere.
-Details of the failure are in %s.""" %(pkgs, log_path))
-
-class Indexer(object, metaclass=ABCMeta):
- def __init__(self, d, deploy_dir):
- self.d = d
- self.deploy_dir = deploy_dir
-
- @abstractmethod
- def write_index(self):
- pass
-
-
-class RpmIndexer(Indexer):
- def write_index(self):
- self.do_write_index(self.deploy_dir)
-
- def do_write_index(self, deploy_dir):
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
- result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
- if result:
- bb.fatal(result)
-
- # Sign repomd
- if signer:
- sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (sig_type.upper() != "BIN")
- signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-class RpmSubdirIndexer(RpmIndexer):
- def write_index(self):
- bb.note("Generating package index for %s" %(self.deploy_dir))
- self.do_write_index(self.deploy_dir)
- for entry in os.walk(self.deploy_dir):
- if os.path.samefile(self.deploy_dir, entry[0]):
- for dir in entry[1]:
- if dir != 'repodata':
- dir_path = oe.path.join(self.deploy_dir, dir)
- bb.note("Generating package index for %s" %(dir_path))
- self.do_write_index(dir_path)
-
-class OpkgIndexer(Indexer):
- def write_index(self):
- arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
- "SDK_PACKAGE_ARCHS",
- ]
-
- opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
- open(os.path.join(self.deploy_dir, "Packages"), "w").close()
-
- index_cmds = set()
- index_sign_files = set()
- for arch_var in arch_vars:
- archs = self.d.getVar(arch_var)
- if archs is None:
- continue
-
- for arch in archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- pkgs_file = os.path.join(pkgs_dir, "Packages")
-
- if not os.path.isdir(pkgs_dir):
- continue
-
- if not os.path.exists(pkgs_file):
- open(pkgs_file, "w").close()
-
- index_cmds.add('%s -r %s -p %s -m %s' %
- (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
-
- index_sign_files.add(pkgs_file)
-
- if len(index_cmds) == 0:
- bb.note("There are no packages in %s!" % self.deploy_dir)
- return
-
- oe.utils.multiprocess_exec(index_cmds, create_index)
-
- if signer:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (feed_sig_type.upper() != "BIN")
- for f in index_sign_files:
- signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-
-class DpkgIndexer(Indexer):
- def _create_configs(self):
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
-
- with open(os.path.join(self.apt_conf_dir, "preferences"),
- "w") as prefs_file:
- pass
- with open(os.path.join(self.apt_conf_dir, "sources.list"),
- "w+") as sources_file:
- pass
-
- with open(self.apt_conf_file, "w") as apt_conf:
- with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
- "apt", "apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- line = re.sub("#ROOTFS#", "/dev/null", line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- def write_index(self):
- self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
- "apt-ftparchive")
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self._create_configs()
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- pkg_archs = self.d.getVar('PACKAGE_ARCHS')
- if pkg_archs is not None:
- arch_list = pkg_archs.split()
- sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
- if sdk_pkg_archs is not None:
- for a in sdk_pkg_archs.split():
- if a not in pkg_archs:
- arch_list.append(a)
-
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
-
- apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
- gzip = bb.utils.which(os.getenv('PATH'), "gzip")
-
- index_cmds = []
- deb_dirs_found = False
- for arch in arch_list:
- arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.isdir(arch_dir):
- continue
-
- cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
-
- cmd += "%s -fcn Packages > Packages.gz;" % gzip
-
- with open(os.path.join(arch_dir, "Release"), "w+") as release:
- release.write("Label: %s\n" % arch)
-
- cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
- index_cmds.append(cmd)
-
- deb_dirs_found = True
-
- if not deb_dirs_found:
- bb.note("There are no packages in %s" % self.deploy_dir)
- return
-
- oe.utils.multiprocess_exec(index_cmds, create_index)
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- raise NotImplementedError('Package feed signing not implementd for dpkg')
-
-
-
-class PkgsList(object, metaclass=ABCMeta):
- def __init__(self, d, rootfs_dir):
- self.d = d
- self.rootfs_dir = rootfs_dir
-
- @abstractmethod
- def list_pkgs(self):
- pass
-
-class RpmPkgsList(PkgsList):
- def list_pkgs(self):
- return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
-
-class OpkgPkgsList(PkgsList):
- def __init__(self, d, rootfs_dir, config_file):
- super(OpkgPkgsList, self).__init__(d, rootfs_dir)
-
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- def list_pkgs(self, format=None):
- cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
-
- # opkg returns success even when it printed some
- # "Collected errors:" report to stderr. Mixing stderr into
- # stdout then leads to random failures later on when
- # parsing the output. To avoid this we need to collect both
- # output streams separately and check for empty stderr.
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
- cmd_output, cmd_stderr = p.communicate()
- cmd_output = cmd_output.decode("utf-8")
- cmd_stderr = cmd_stderr.decode("utf-8")
- if p.returncode or cmd_stderr:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
-
- return opkg_query(cmd_output)
-
-
-class DpkgPkgsList(PkgsList):
-
- def list_pkgs(self):
- cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
- "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
- "-W"]
-
- cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
-
- try:
- cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- return opkg_query(cmd_output)
-
-
-class PackageManager(object, metaclass=ABCMeta):
- """
- This is an abstract class. Do not instantiate this directly.
- """
-
- def __init__(self, d, target_rootfs):
- self.d = d
- self.target_rootfs = target_rootfs
- self.deploy_dir = None
- self.deploy_lock = None
- self._initialize_intercepts()
-
- def _initialize_intercepts(self):
- bb.note("Initializing intercept dir for %s" % self.target_rootfs)
- postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
- if not postinst_intercepts_dir:
- postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- # As there might be more than one instance of PackageManager operating at the same time
- # we need to isolate the intercept_scripts directories from each other,
- # hence the ugly hash digest in dir name.
- self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
- "intercept_scripts-%s" %(hashlib.sha256(self.target_rootfs.encode()).hexdigest()) )
-
- bb.utils.remove(self.intercepts_dir, True)
- shutil.copytree(postinst_intercepts_dir, self.intercepts_dir)
-
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
-
- def _postpone_to_first_boot(self, postinst_intercept_hook):
- with open(postinst_intercept_hook) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.note("If an image is being built, the postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
-
- def run_intercepts(self):
- intercepts_dir = self.intercepts_dir
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.target_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- if script == "delay_to_first_boot":
- self._postpone_to_first_boot(script_full)
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
- if output: bb.note(output.decode("utf-8"))
- except subprocess.CalledProcessError as e:
- bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
- self._postpone_to_first_boot(script_full)
-
- @abstractmethod
- def update(self):
- """
- Update the package manager package database.
- """
- pass
-
- @abstractmethod
- def install(self, pkgs, attempt_only=False):
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
- pass
-
- @abstractmethod
- def remove(self, pkgs, with_dependencies=True):
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, then any dependencies are left in place.
- """
- pass
-
- @abstractmethod
- def write_index(self):
- """
- This function creates the index files
- """
- pass
-
- @abstractmethod
- def remove_packaging_data(self):
- pass
-
- @abstractmethod
- def list_installed(self):
- pass
-
- @abstractmethod
- def extract(self, pkg):
- """
- Returns the path to a tmpdir where resides the contents of a package.
- Deleting the tmpdir is responsability of the caller.
- """
- pass
-
- @abstractmethod
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- """
- Add remote package feeds into repository manager configuration. The parameters
- for the feeds are set by feed_uris, feed_base_paths and feed_archs.
- See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
- for their description.
- """
- pass
-
- def install_glob(self, globs, sdk=False):
- """
- Install all packages that match a glob.
- """
- # TODO don't have sdk here but have a property on the superclass
- # (and respect in install_complementary)
- if sdk:
- pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
- else:
- pkgdatadir = self.d.getVar("PKGDATA_DIR")
-
- try:
- bb.note("Installing globbed packages...")
- cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
- pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- self.install(pkgs.split(), attempt_only=True)
- except subprocess.CalledProcessError as e:
- # Return code 1 means no packages matched
- if e.returncode != 1:
- bb.fatal("Could not compute globbed packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def install_complementary(self, globs=None):
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
- if globs is None:
- globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
- split_linguas = set()
-
- for translation in self.d.getVar('IMAGE_LINGUAS').split():
- split_linguas.add(translation)
- split_linguas.add(translation.split('-')[0])
-
- split_linguas = sorted(split_linguas)
-
- for lang in split_linguas:
- globs += " *-locale-%s" % lang
-
- if globs is None:
- return
-
- # we need to write the list of installed packages to a file because the
- # oe-pkgdata-util reads it from a file
- with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
- pkgs = self.list_installed()
- output = oe.utils.format_pkg_list(pkgs, "arch")
- installed_pkgs.write(output)
- installed_pkgs.flush()
-
- cmd = ["oe-pkgdata-util",
- "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
- globs]
- exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
- if exclude:
- cmd.extend(['--exclude=' + '|'.join(exclude.split())])
- try:
- bb.note("Installing complementary packages ...")
- bb.note('Running %s' % cmd)
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- self.install(complementary_pkgs.split(), attempt_only=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def deploy_dir_lock(self):
- if self.deploy_dir is None:
- raise RuntimeError("deploy_dir is not set!")
-
- lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
-
- self.deploy_lock = bb.utils.lockfile(lock_file_name)
-
- def deploy_dir_unlock(self):
- if self.deploy_lock is None:
- return
-
- bb.utils.unlockfile(self.deploy_lock)
-
- self.deploy_lock = None
-
- def construct_uris(self, uris, base_paths):
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
- def _append(arr1, arr2, sep='/'):
- res = []
- narr1 = [a.rstrip(sep) for a in arr1]
- narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
- for a1 in narr1:
- if arr2:
- for a2 in narr2:
- res.append("%s%s%s" % (a1, sep, a2))
- else:
- res.append(a1)
- return res
- return _append(uris, base_paths)
-
-def create_packages_dir(d, rpm_repo_dir, deploydir, taskname, filterbydependencies):
- """
- Go through our do_package_write_X dependencies and hardlink the packages we depend
- upon into the repo directory. This prevents us seeing other packages that may
- have been built that we don't depend upon and also packages for architectures we don't
- support.
- """
- import errno
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- mytaskname = d.getVar("BB_RUNTASK")
- pn = d.getVar("PN")
- seendirs = set()
- multilibs = {}
-
- rpm_subrepo_dir = oe.path.join(rpm_repo_dir, "rpm")
-
- bb.utils.remove(rpm_subrepo_dir, recurse=True)
- bb.utils.mkdirhier(rpm_subrepo_dir)
-
- # Detect bitbake -b usage
- nodeps = d.getVar("BB_LIMITEDDEPS") or False
- if nodeps or not filterbydependencies:
- oe.path.symlink(deploydir, rpm_subrepo_dir, True)
- return
-
- start = None
- for dep in taskdepdata:
- data = taskdepdata[dep]
- if data[1] == mytaskname and data[0] == pn:
- start = dep
- break
- if start is None:
- bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
- rpmdeps = set()
- start = [start]
- seen = set(start)
- # Support direct dependencies (do_rootfs -> rpms)
- # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> rpms)
- while start:
- next = []
- for dep2 in start:
- for dep in taskdepdata[dep2][3]:
- if taskdepdata[dep][0] != pn:
- if "do_" + taskname in dep:
- rpmdeps.add(dep)
- elif dep not in seen:
- next.append(dep)
- seen.add(dep)
- start = next
-
- for dep in rpmdeps:
- c = taskdepdata[dep][0]
- manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
- if not manifest:
- bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
- if not os.path.exists(manifest):
- continue
- with open(manifest, "r") as f:
- for l in f:
- l = l.strip()
- dest = l.replace(deploydir, "")
- dest = rpm_subrepo_dir + dest
- if l.endswith("/"):
- if dest not in seendirs:
- bb.utils.mkdirhier(dest)
- seendirs.add(dest)
- continue
- # Try to hardlink the file, copy if that fails
- destdir = os.path.dirname(dest)
- if destdir not in seendirs:
- bb.utils.mkdirhier(destdir)
- seendirs.add(destdir)
- try:
- os.link(l, dest)
- except OSError as err:
- if err.errno == errno.EXDEV:
- bb.utils.copyfile(l, dest)
- else:
- raise
-
-class RpmPM(PackageManager):
- def __init__(self,
- d,
- target_rootfs,
- target_vendor,
- task_name='target',
- arch_var=None,
- os_var=None,
- rpm_repo_workdir="oe-rootfs-repo",
- filterbydependencies=True):
- super(RpmPM, self).__init__(d, target_rootfs)
- self.target_vendor = target_vendor
- self.task_name = task_name
- if arch_var == None:
- self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
- else:
- self.archs = self.d.getVar(arch_var).replace("-","_")
- if task_name == "host":
- self.primary_arch = self.d.getVar('SDK_ARCH')
- else:
- self.primary_arch = self.d.getVar('MACHINE_ARCH')
-
- self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
- create_packages_dir(self.d, self.rpm_repo_dir, d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
-
- self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
- self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
- self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
- self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- def _configure_dnf(self):
- # libsolv handles 'noarch' internally, we don't need to specify it explicitly
- archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
- # This prevents accidental matching against libsolv's built-in policies
- if len(archs) <= 1:
- archs = archs + ["bogusarch"]
- confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
- bb.utils.mkdirhier(confdir)
- open(confdir + "arch", 'w').write(":".join(archs))
- distro_codename = self.d.getVar('DISTRO_CODENAME')
- open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
-
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
-
-
- def _configure_rpm(self):
- # We need to configure rpm to use our primary package architecture as the installation architecture,
- # and to make it compatible with other package architectures that we use.
- # Otherwise it will refuse to proceed with packages installation.
- platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
- rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
- bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
- open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
-
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
- if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
- else:
- open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
-
- if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
- signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
- pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
- signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
- rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
- cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Importing GPG key failed. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def create_configs(self):
- self._configure_dnf()
- self._configure_rpm()
-
- def write_index(self):
- lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- RpmIndexer(self.d, self.rpm_repo_dir).write_index()
- bb.utils.unlockfile(lf)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- from urllib.parse import urlparse
-
- if feed_uris == "":
- return
-
- gpg_opts = ''
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- gpg_opts += 'repo_gpgcheck=1\n'
- gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
-
- if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
- gpg_opts += 'gpgcheck=0\n'
-
- bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
- remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- for uri in remote_uris:
- repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
- if feed_archs is not None:
- for arch in feed_archs.split():
- repo_uri = uri + "/" + arch
- repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
- repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
- else:
- repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
- repo_uri = uri
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
-
- def _prepare_pkg_transaction(self):
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = self.intercepts_dir
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
-
- def install(self, pkgs, attempt_only = False):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
- package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
- exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
-
- output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
- (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
- (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
- (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
- ["install"] +
- pkgs)
-
- failed_scriptlets_pkgnames = collections.OrderedDict()
- for line in output.splitlines():
- if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
- failed_scriptlets_pkgnames[line.split()[-1]] = True
-
- if len(failed_scriptlets_pkgnames) > 0:
- failed_postinsts_warn(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
- for pkg in failed_scriptlets_pkgnames.keys():
- self.save_rpmpostinst(pkg)
-
- def remove(self, pkgs, with_dependencies = True):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- if with_dependencies:
- self._invoke_dnf(["remove"] + pkgs)
- else:
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
-
- try:
- bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
- output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
-
- def upgrade(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["upgrade"])
-
- def autoremove(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["autoremove"])
-
- def remove_packaging_data(self):
- self._invoke_dnf(["clean", "all"])
- for dir in self.packaging_data_dirs:
- bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
-
- def backup_packaging_data(self):
- # Save the packaging dirs for increment rpm image generation
- if os.path.exists(self.saved_packaging_data):
- bb.utils.remove(self.saved_packaging_data, True)
- for i in self.packaging_data_dirs:
- source_dir = oe.path.join(self.target_rootfs, i)
- target_dir = oe.path.join(self.saved_packaging_data, i)
- shutil.copytree(source_dir, target_dir, symlinks=True)
-
- def recovery_packaging_data(self):
- # Move the rpmlib back
- if os.path.exists(self.saved_packaging_data):
- for i in self.packaging_data_dirs:
- target_dir = oe.path.join(self.target_rootfs, i)
- if os.path.exists(target_dir):
- bb.utils.remove(target_dir, True)
- source_dir = oe.path.join(self.saved_packaging_data, i)
- shutil.copytree(source_dir,
- target_dir,
- symlinks=True)
-
- def list_installed(self):
- output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
- print_output = False)
- packages = {}
- current_package = None
- current_deps = None
- current_state = "initial"
- for line in output.splitlines():
- if line.startswith("Package:"):
- package_info = line.split(" ")[1:]
- current_package = package_info[0]
- package_arch = package_info[1]
- package_version = package_info[2]
- package_rpm = package_info[3]
- packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
- current_deps = []
- elif line.startswith("Dependencies:"):
- current_state = "dependencies"
- elif line.startswith("Recommendations"):
- current_state = "recommendations"
- elif line.startswith("DependenciesEndHere:"):
- current_state = "initial"
- packages[current_package]["deps"] = current_deps
- elif len(line) > 0:
- if current_state == "dependencies":
- current_deps.append(line)
- elif current_state == "recommendations":
- current_deps.append("%s [REC]" % line)
-
- return packages
-
- def update(self):
- self._invoke_dnf(["makecache", "--refresh"])
-
- def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
- os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
-
- dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
- standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
- "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
- "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
- "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
- "--installroot=%s" % (self.target_rootfs),
- "--setopt=logdir=%s" % (self.d.getVar('T'))
- ]
- cmd = [dnf_cmd] + standard_dnf_args + dnf_args
- bb.note('Running %s' % ' '.join(cmd))
- try:
- output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
- if print_output:
- bb.note(output)
- return output
- except subprocess.CalledProcessError as e:
- if print_output:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- else:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:" % (' '.join(cmd), e.returncode))
- return e.output.decode("utf-8")
-
- def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
- return pkgs
-
- def load_old_install_solution(self):
- if not os.path.exists(self.solution_manifest):
- return []
-
- return open(self.solution_manifest, 'r').read().split()
-
- def _script_num_prefix(self, path):
- files = os.listdir(path)
- numbers = set()
- numbers.add(99)
- for f in files:
- numbers.add(int(f.split("-")[0]))
- return max(numbers) + 1
-
- def save_rpmpostinst(self, pkg):
- bb.note("Saving postinstall script of %s" % (pkg))
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
-
- try:
- output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
-
- # may need to prepend #!/bin/sh to output
-
- target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
- bb.utils.mkdirhier(target_path)
- num = self._script_num_prefix(target_path)
- saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
- os.chmod(saved_script_name, 0o755)
-
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.save_rpmpostinst(pkg)
-
- def extract(self, pkg):
- output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
- pkg_name = output.splitlines()[-1]
- if not pkg_name.endswith(".rpm"):
- bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
- pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
-
- cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
- rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
-
- try:
- cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- os.chdir(current_dir)
-
- return tmp_dir
-
-
-class OpkgDpkgPM(PackageManager):
- def __init__(self, d, target_rootfs):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- super(OpkgDpkgPM, self).__init__(d, target_rootfs)
-
- def package_info(self, pkg, cmd):
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
-
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- return opkg_query(output)
-
- def extract(self, pkg, pkg_info):
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- This method extracts the common parts for Opkg and Dpkg
- """
-
- ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
- tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
- pkg_path = pkg_info[pkg]["filepath"]
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
- if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
- data_tar = 'data.tar.xz'
- else:
- data_tar = 'data.tar.gz'
-
- try:
- cmd = [ar_cmd, 'x', pkg_path]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- cmd = [tar_cmd, 'xf', data_tar]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
- bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
- os.chdir(current_dir)
-
- return tmp_dir
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.mark_packages("unpacked", registered_pkgs.split())
-
-class OpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
- super(OpkgPM, self).__init__(d, target_rootfs)
-
- self.config_file = config_file
- self.pkg_archs = archs
- self.task_name = task_name
-
- self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
- self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
-
- self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
-
- bb.utils.mkdirhier(self.opkg_dir)
-
- self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
- if self.from_feeds:
- self._create_custom_config()
- else:
- self._create_config()
-
- self.indexer = OpkgIndexer(self.d, self.deploy_dir)
-
- def mark_packages(self, status_tag, packages=None):
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- status_file = os.path.join(self.opkg_dir, "status")
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- def _create_custom_config(self):
- bb.note("Building from feeds activated!")
-
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
- feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
-
- if feed_match is not None:
- feed_name = feed_match.group(1)
- feed_uri = feed_match.group(2)
-
- bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
-
- config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
-
- """
- Allow to use package deploy directory contents as quick devel-testing
- feed. This creates individual feed configs for each arch subdir of those
- specified as compatible for the current machine.
- NOTE: Development-helper feature, NOT a full-fledged feed.
- """
- if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
- for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
- self.d.getVar("sysconfdir"),
- "opkg",
- "local-%s-feed.conf" % arch)
-
- with open(cfg_file_name, "w+") as cfg_file:
- cfg_file.write("src/gz local-%s %s/%s" %
- (arch,
- self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
- arch))
-
- if self.d.getVar('OPKGLIBDIR') != '/var/lib':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
- cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
- cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
-
- def _create_config(self):
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- config_file.write("src oe file:%s\n" % self.deploy_dir)
-
- for arch in self.pkg_archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- if os.path.isdir(pkgs_dir):
- config_file.write("src oe-%s file:%s\n" %
- (arch, pkgs_dir))
-
- if self.d.getVar('OPKGLIBDIR') != '/var/lib':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
- config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
- config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
- % self.target_rootfs)
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
-
- with open(rootfs_config, "w+") as config_file:
- uri_iterator = 0
- for uri in feed_uris:
- if archs:
- for arch in archs:
- if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
- continue
- bb.note('Adding opkg feed url-%s-%d (%s)' %
- (arch, uri_iterator, uri))
- config_file.write("src/gz uri-%s-%d %s/%s\n" %
- (arch, uri_iterator, uri, arch))
- else:
- bb.note('Adding opkg feed url-%d (%s)' %
- (uri_iterator, uri))
- config_file.write("src/gz uri-%d %s\n" %
- (uri_iterator, uri))
-
- uri_iterator += 1
-
- def update(self):
- self.deploy_dir_lock()
-
- cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- self.deploy_dir_unlock()
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if not pkgs:
- return
-
- cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = self.intercepts_dir
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- failed_pkgs = []
- for line in output.split('\n'):
- if line.endswith("configuration required on target."):
- bb.warn(line)
- failed_pkgs.append(line.split(".")[0])
- if failed_pkgs:
- failed_postinsts_warn(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- def remove(self, pkgs, with_dependencies=True):
- if with_dependencies:
- cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
- else:
- cmd = "%s %s --force-depends remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- try:
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def remove_packaging_data(self):
- bb.utils.remove(self.opkg_dir, True)
- # create the directory back, it's needed by PM lock
- bb.utils.mkdirhier(self.opkg_dir)
-
- def remove_lists(self):
- if not self.from_feeds:
- bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
-
- def list_installed(self):
- return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
-
- def handle_bad_recommendations(self):
- bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
- if bad_recommendations.strip() == "":
- return
-
- status_file = os.path.join(self.opkg_dir, "status")
-
- # If status file existed, it means the bad recommendations has already
- # been handled
- if os.path.exists(status_file):
- return
-
- cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
-
- with open(status_file, "w+") as status:
- for pkg in bad_recommendations.split():
- pkg_info = cmd + pkg
-
- try:
- output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get package info. Command '%s' "
- "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
-
- if output == "":
- bb.note("Ignored bad recommendation: '%s' is "
- "not a package" % pkg)
- continue
-
- for line in output.split('\n'):
- if line.startswith("Status:"):
- status.write("Status: deinstall hold not-installed\n")
- else:
- status.write(line + "\n")
-
- # Append a blank line after each package entry to ensure that it
- # is separated from the following entry
- status.write("\n")
-
- def dummy_install(self, pkgs):
- """
- The following function dummy installs pkgs and returns the log of output.
- """
- if len(pkgs) == 0:
- return
-
- # Create an temp dir as opkg root for dummy installation
- temp_rootfs = self.d.expand('${T}/opkg')
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
- temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
- bb.utils.mkdirhier(temp_opkg_dir)
-
- opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
- opkg_args += self.d.getVar("OPKG_ARGS")
-
- cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- # Dummy installation
- cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
- opkg_args,
- ' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to dummy install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- bb.utils.remove(temp_rootfs, True)
-
- return output
-
- def backup_packaging_data(self):
- # Save the opkglib for increment ipk image generation
- if os.path.exists(self.saved_opkg_dir):
- bb.utils.remove(self.saved_opkg_dir, True)
- shutil.copytree(self.opkg_dir,
- self.saved_opkg_dir,
- symlinks=True)
-
- def recover_packaging_data(self):
- # Move the opkglib back
- if os.path.exists(self.saved_opkg_dir):
- if os.path.exists(self.opkg_dir):
- bb.utils.remove(self.opkg_dir, True)
-
- bb.note('Recover packaging data')
- shutil.copytree(self.saved_opkg_dir,
- self.opkg_dir,
- symlinks=True)
-
- def package_info(self, pkg):
- """
- Returns a dictionary with the package info.
- """
- cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
- pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["arch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- def extract(self, pkg):
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
-
- return tmp_dir
-
-class DpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
- super(DpkgPM, self).__init__(d, target_rootfs)
- self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
- if apt_conf_dir is None:
- self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
- else:
- self.apt_conf_dir = apt_conf_dir
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
- self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
-
- self.apt_args = d.getVar("APT_ARGS")
-
- self.all_arch_list = archs.split()
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
-
- self._create_configs(archs, base_archs)
-
- self.indexer = DpkgIndexer(self.d, self.deploy_dir)
-
- def mark_packages(self, status_tag, packages=None):
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- def run_pre_post_installs(self, package_name=None):
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
- info_dir = self.target_rootfs + "/var/lib/dpkg/info"
- ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
- control_scripts = [
- ControlScript(".preinst", "Preinstall", "install"),
- ControlScript(".postinst", "Postinstall", "configure")]
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
- installed_pkgs = []
-
- with open(status_file, "r") as status:
- for line in status.read().split('\n'):
- m = re.match("^Package: (.*)", line)
- if m is not None:
- installed_pkgs.append(m.group(1))
-
- if package_name is not None and not package_name in installed_pkgs:
- return
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = self.intercepts_dir
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- failed_pkgs = []
- for pkg_name in installed_pkgs:
- for control_script in control_scripts:
- p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
- if os.path.exists(p_full):
- try:
- bb.note("Executing %s for package: %s ..." %
- (control_script.name.lower(), pkg_name))
- output = subprocess.check_output([p_full, control_script.argument],
- stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.warn("%s for package %s failed with %d:\n%s" %
- (control_script.name, pkg_name, e.returncode,
- e.output.decode("utf-8")))
- failed_postinsts_warn([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
- failed_pkgs.append(pkg_name)
- break
-
- if len(failed_pkgs):
- self.mark_packages("unpacked", failed_pkgs)
-
- def update(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- self.deploy_dir_lock()
-
- cmd = "%s update" % self.apt_get_cmd
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if attempt_only and len(pkgs) == 0:
- return
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
- (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- # rename *.dpkg-new files/dirs
- for root, dirs, files in os.walk(self.target_rootfs):
- for dir in dirs:
- new_dir = re.sub("\.dpkg-new", "", dir)
- if dir != new_dir:
- os.rename(os.path.join(root, dir),
- os.path.join(root, new_dir))
-
- for file in files:
- new_file = re.sub("\.dpkg-new", "", file)
- if file != new_file:
- os.rename(os.path.join(root, file),
- os.path.join(root, new_file))
-
-
- def remove(self, pkgs, with_dependencies=True):
- if with_dependencies:
- os.environ['APT_CONFIG'] = self.apt_conf_file
- cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
- else:
- cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
- " -P --force-depends %s" % \
- (bb.utils.which(os.getenv('PATH'), "dpkg"),
- self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- sources_conf = os.path.join("%s/etc/apt/sources.list"
- % self.target_rootfs)
- arch_list = []
-
- if feed_archs is None:
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
- else:
- arch_list = feed_archs.split()
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
-
- with open(sources_conf, "w+") as sources_file:
- for uri in feed_uris:
- if arch_list:
- for arch in arch_list:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s/%s ./\n" %
- (uri, arch))
- else:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s ./\n" % uri)
-
- def _create_configs(self, archs, base_archs):
- base_archs = re.sub("_", "-", base_archs)
-
- if os.path.exists(self.apt_conf_dir):
- bb.utils.remove(self.apt_conf_dir, True)
-
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
-
- arch_list = []
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
-
- with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
- priority = 801
- for arch in arch_list:
- prefs_file.write(
- "Package: *\n"
- "Pin: release l=%s\n"
- "Pin-Priority: %d\n\n" % (arch, priority))
-
- priority += 5
-
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
- for pkg in pkg_exclude.split():
- prefs_file.write(
- "Package: %s\n"
- "Pin: release *\n"
- "Pin-Priority: -1\n\n" % pkg)
-
- arch_list.reverse()
-
- with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
- for arch in arch_list:
- sources_file.write("deb file:%s/ ./\n" %
- os.path.join(self.deploy_dir, arch))
-
- base_arch_list = base_archs.split()
- multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
- for variant in multilib_variants.split():
- localdata = bb.data.createCopy(self.d)
- variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
- orig_arch = localdata.getVar("DPKG_ARCH")
- localdata.setVar("DEFAULTTUNE", variant_tune)
- variant_arch = localdata.getVar("DPKG_ARCH")
- if variant_arch not in base_arch_list:
- base_arch_list.append(variant_arch)
-
- with open(self.apt_conf_file, "w+") as apt_conf:
- with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(" Architecture \".*\";$", line)
- architectures = ""
- if match_arch:
- for base_arch in base_arch_list:
- architectures += "\"%s\";" % base_arch
- apt_conf.write(" Architectures {%s};\n" % architectures);
- apt_conf.write(" Architecture \"%s\";\n" % base_archs)
- else:
- line = re.sub("#ROOTFS#", self.target_rootfs, line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
-
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
-
- if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
- open(os.path.join(target_dpkg_dir, "status"), "w+").close()
- if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
- open(os.path.join(target_dpkg_dir, "available"), "w+").close()
-
- def remove_packaging_data(self):
- bb.utils.remove(os.path.join(self.target_rootfs,
- self.d.getVar('opkglibdir')), True)
- bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
-
- def fix_broken_dependencies(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot fix broken dependencies. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- def list_installed(self):
- return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
-
- def package_info(self, pkg):
- """
- Returns a dictionary with the package info.
- """
- cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
- pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["pkgarch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- def extract(self, pkg):
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
-
- return tmp_dir
-
-def generate_index_files(d):
- classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
-
- indexer_map = {
- "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
- "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
- "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
- }
-
- result = None
-
- for pkg_class in classes:
- if not pkg_class in indexer_map:
- continue
-
- if os.path.exists(indexer_map[pkg_class][1]):
- result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
-
- if result is not None:
- bb.fatal(result)
diff --git a/import-layers/yocto-poky/meta/lib/oe/packagedata.py b/import-layers/yocto-poky/meta/lib/oe/packagedata.py
deleted file mode 100644
index 32e5c82a9..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/packagedata.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import codecs
-import os
-
-def packaged(pkg, d):
- return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
-
-def read_pkgdatafile(fn):
- pkgdata = {}
-
- def decode(str):
- c = codecs.getdecoder("unicode_escape")
- return c(str)[0]
-
- if os.access(fn, os.R_OK):
- import re
- f = open(fn, 'r')
- lines = f.readlines()
- f.close()
- r = re.compile("([^:]+):\s*(.*)")
- for l in lines:
- m = r.match(l)
- if m:
- pkgdata[m.group(1)] = decode(m.group(2))
-
- return pkgdata
-
-def get_subpkgedata_fn(pkg, d):
- return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
-
-def has_subpkgdata(pkg, d):
- return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
-
-def read_subpkgdata(pkg, d):
- return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
-
-def has_pkgdata(pn, d):
- fn = d.expand('${PKGDATA_DIR}/%s' % pn)
- return os.access(fn, os.R_OK)
-
-def read_pkgdata(pn, d):
- fn = d.expand('${PKGDATA_DIR}/%s' % pn)
- return read_pkgdatafile(fn)
-
-#
-# Collapse FOO_pkg variables into FOO
-#
-def read_subpkgdata_dict(pkg, d):
- ret = {}
- subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
- for var in subd:
- newvar = var.replace("_" + pkg, "")
- if newvar == var and var + "_" + pkg in subd:
- continue
- ret[newvar] = subd[var]
- return ret
-
-def _pkgmap(d):
- """Return a dictionary mapping package to recipe name."""
-
- pkgdatadir = d.getVar("PKGDATA_DIR")
-
- pkgmap = {}
- try:
- files = os.listdir(pkgdatadir)
- except OSError:
- bb.warn("No files in %s?" % pkgdatadir)
- files = []
-
- for pn in [f for f in files if not os.path.isdir(os.path.join(pkgdatadir, f))]:
- try:
- pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
- except OSError:
- continue
-
- packages = pkgdata.get("PACKAGES") or ""
- for pkg in packages.split():
- pkgmap[pkg] = pn
-
- return pkgmap
-
-def pkgmap(d):
- """Return a dictionary mapping package to recipe name.
- Cache the mapping in the metadata"""
-
- pkgmap_data = d.getVar("__pkgmap_data", False)
- if pkgmap_data is None:
- pkgmap_data = _pkgmap(d)
- d.setVar("__pkgmap_data", pkgmap_data)
-
- return pkgmap_data
-
-def recipename(pkg, d):
- """Return the recipe name for the given binary package name."""
-
- return pkgmap(d).get(pkg)
diff --git a/import-layers/yocto-poky/meta/lib/oe/packagegroup.py b/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
deleted file mode 100644
index 4bc5d3e4b..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import itertools
-
-def is_optional(feature, d):
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
- if packages:
- return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
- else:
- return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
-
-def packages(features, d):
- for feature in features:
- packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
- if not packages:
- packages = d.getVar("PACKAGE_GROUP_%s" % feature)
- for pkg in (packages or "").split():
- yield pkg
-
-def required_packages(features, d):
- req = [feature for feature in features if not is_optional(feature, d)]
- return packages(req, d)
-
-def optional_packages(features, d):
- opt = [feature for feature in features if is_optional(feature, d)]
- return packages(opt, d)
-
-def active_packages(features, d):
- return itertools.chain(required_packages(features, d),
- optional_packages(features, d))
-
-def active_recipes(features, d):
- import oe.packagedata
-
- for pkg in active_packages(features, d):
- recipe = oe.packagedata.recipename(pkg, d)
- if recipe:
- yield recipe
diff --git a/import-layers/yocto-poky/meta/lib/oe/patch.py b/import-layers/yocto-poky/meta/lib/oe/patch.py
deleted file mode 100644
index af7aa5235..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/patch.py
+++ /dev/null
@@ -1,895 +0,0 @@
-import oe.path
-import oe.types
-
-class NotFoundError(bb.BBHandledException):
- def __init__(self, path):
- self.path = path
-
- def __str__(self):
- return "Error: %s not found." % self.path
-
-class CmdError(bb.BBHandledException):
- def __init__(self, command, exitstatus, output):
- self.command = command
- self.status = exitstatus
- self.output = output
-
- def __str__(self):
- return "Command Error: '%s' exited with %d Output:\n%s" % \
- (self.command, self.status, self.output)
-
-
-def runcmd(args, dir = None):
- import pipes
-
- if dir:
- olddir = os.path.abspath(os.curdir)
- if not os.path.exists(dir):
- raise NotFoundError(dir)
- os.chdir(dir)
- # print("cwd: %s -> %s" % (olddir, dir))
-
- try:
- args = [ pipes.quote(str(arg)) for arg in args ]
- cmd = " ".join(args)
- # print("cmd: %s" % cmd)
- (exitstatus, output) = oe.utils.getstatusoutput(cmd)
- if exitstatus != 0:
- raise CmdError(cmd, exitstatus >> 8, output)
- if " fuzz " in output:
- bb.warn("""
-Some of the context lines in patches were ignored. This can lead to incorrectly applied patches.
-The context lines in the patches can be updated with devtool:
-
- devtool modify <recipe>
- devtool finish --force-patch-refresh <recipe> <layer_path>
-
-Then the updated patches and the source tree (in devtool's workspace)
-should be reviewed to make sure the patches apply in the correct place
-and don't introduce duplicate lines (which can, and does happen
-when some of the context is ignored). Further information:
-http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
-https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
-Details:
-{}""".format(output))
- return output
-
- finally:
- if dir:
- os.chdir(olddir)
-
-class PatchError(Exception):
- def __init__(self, msg):
- self.msg = msg
-
- def __str__(self):
- return "Patch Error: %s" % self.msg
-
-class PatchSet(object):
- defaults = {
- "strippath": 1
- }
-
- def __init__(self, dir, d):
- self.dir = dir
- self.d = d
- self.patches = []
- self._current = None
-
- def current(self):
- return self._current
-
- def Clean(self):
- """
- Clean out the patch set. Generally includes unapplying all
- patches and wiping out all associated metadata.
- """
- raise NotImplementedError()
-
- def Import(self, patch, force):
- if not patch.get("file"):
- if not patch.get("remote"):
- raise PatchError("Patch file must be specified in patch import.")
- else:
- patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
-
- for param in PatchSet.defaults:
- if not patch.get(param):
- patch[param] = PatchSet.defaults[param]
-
- if patch.get("remote"):
- patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d))
-
- patch["filemd5"] = bb.utils.md5_file(patch["file"])
-
- def Push(self, force):
- raise NotImplementedError()
-
- def Pop(self, force):
- raise NotImplementedError()
-
- def Refresh(self, remote = None, all = None):
- raise NotImplementedError()
-
- @staticmethod
- def getPatchedFiles(patchfile, striplevel, srcdir=None):
- """
- Read a patch file and determine which files it will modify.
- Params:
- patchfile: the patch file to read
- striplevel: the strip level at which the patch is going to be applied
- srcdir: optional path to join onto the patched file paths
- Returns:
- A list of tuples of file path and change mode ('A' for add,
- 'D' for delete or 'M' for modify)
- """
-
- def patchedpath(patchline):
- filepth = patchline.split()[1]
- if filepth.endswith('/dev/null'):
- return '/dev/null'
- filesplit = filepth.split(os.sep)
- if striplevel > len(filesplit):
- bb.error('Patch %s has invalid strip level %d' % (patchfile, striplevel))
- return None
- return os.sep.join(filesplit[striplevel:])
-
- for encoding in ['utf-8', 'latin-1']:
- try:
- copiedmode = False
- filelist = []
- with open(patchfile) as f:
- for line in f:
- if line.startswith('--- '):
- patchpth = patchedpath(line)
- if not patchpth:
- break
- if copiedmode:
- addedfile = patchpth
- else:
- removedfile = patchpth
- elif line.startswith('+++ '):
- addedfile = patchedpath(line)
- if not addedfile:
- break
- elif line.startswith('*** '):
- copiedmode = True
- removedfile = patchedpath(line)
- if not removedfile:
- break
- else:
- removedfile = None
- addedfile = None
-
- if addedfile and removedfile:
- if removedfile == '/dev/null':
- mode = 'A'
- elif addedfile == '/dev/null':
- mode = 'D'
- else:
- mode = 'M'
- if srcdir:
- fullpath = os.path.abspath(os.path.join(srcdir, addedfile))
- else:
- fullpath = addedfile
- filelist.append((fullpath, mode))
- except UnicodeDecodeError:
- continue
- break
- else:
- raise PatchError('Unable to decode %s' % patchfile)
-
- return filelist
-
-
-class PatchTree(PatchSet):
- def __init__(self, dir, d):
- PatchSet.__init__(self, dir, d)
- self.patchdir = os.path.join(self.dir, 'patches')
- self.seriespath = os.path.join(self.dir, 'patches', 'series')
- bb.utils.mkdirhier(self.patchdir)
-
- def _appendPatchFile(self, patch, strippath):
- with open(self.seriespath, 'a') as f:
- f.write(os.path.basename(patch) + "," + strippath + "\n")
- shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
- runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
-
- def _removePatch(self, p):
- patch = {}
- patch['file'] = p.split(",")[0]
- patch['strippath'] = p.split(",")[1]
- self._applypatch(patch, False, True)
-
- def _removePatchFile(self, all = False):
- if not os.path.exists(self.seriespath):
- return
- with open(self.seriespath, 'r+') as f:
- patches = f.readlines()
- if all:
- for p in reversed(patches):
- self._removePatch(os.path.join(self.patchdir, p.strip()))
- patches = []
- else:
- self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
- patches.pop()
- with open(self.seriespath, 'w') as f:
- for p in patches:
- f.write(p)
-
- def Import(self, patch, force = None):
- """"""
- PatchSet.Import(self, patch, force)
-
- if self._current is not None:
- i = self._current + 1
- else:
- i = 0
- self.patches.insert(i, patch)
-
- def _applypatch(self, patch, force = False, reverse = False, run = True):
- shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
- if reverse:
- shellcmd.append('-R')
-
- if not run:
- return "sh" + "-c" + " ".join(shellcmd)
-
- if not force:
- shellcmd.append('--dry-run')
-
- try:
- output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
-
- if force:
- return
-
- shellcmd.pop(len(shellcmd) - 1)
- output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- except CmdError as err:
- raise bb.BBHandledException("Applying '%s' failed:\n%s" %
- (os.path.basename(patch['file']), err.output))
-
- if not reverse:
- self._appendPatchFile(patch['file'], patch['strippath'])
-
- return output
-
- def Push(self, force = False, all = False, run = True):
- bb.note("self._current is %s" % self._current)
- bb.note("patches is %s" % self.patches)
- if all:
- for i in self.patches:
- bb.note("applying patch %s" % i)
- self._applypatch(i, force)
- self._current = i
- else:
- if self._current is not None:
- next = self._current + 1
- else:
- next = 0
-
- bb.note("applying patch %s" % self.patches[next])
- ret = self._applypatch(self.patches[next], force)
-
- self._current = next
- return ret
-
- def Pop(self, force = None, all = None):
- if all:
- self._removePatchFile(True)
- self._current = None
- else:
- self._removePatchFile(False)
-
- if self._current == 0:
- self._current = None
-
- if self._current is not None:
- self._current = self._current - 1
-
- def Clean(self):
- """"""
- self.Pop(all=True)
-
-class GitApplyTree(PatchTree):
- patch_line_prefix = '%% original patch'
- ignore_commit_prefix = '%% ignore'
-
- def __init__(self, dir, d):
- PatchTree.__init__(self, dir, d)
- self.commituser = d.getVar('PATCH_GIT_USER_NAME')
- self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
-
- @staticmethod
- def extractPatchHeader(patchfile):
- """
- Extract just the header lines from the top of a patch file
- """
- for encoding in ['utf-8', 'latin-1']:
- lines = []
- try:
- with open(patchfile, 'r', encoding=encoding) as f:
- for line in f:
- if line.startswith('Index: ') or line.startswith('diff -') or line.startswith('---'):
- break
- lines.append(line)
- except UnicodeDecodeError:
- continue
- break
- else:
- raise PatchError('Unable to find a character encoding to decode %s' % patchfile)
- return lines
-
- @staticmethod
- def decodeAuthor(line):
- from email.header import decode_header
- authorval = line.split(':', 1)[1].strip().replace('"', '')
- result = decode_header(authorval)[0][0]
- if hasattr(result, 'decode'):
- result = result.decode('utf-8')
- return result
-
- @staticmethod
- def interpretPatchHeader(headerlines):
- import re
- author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
- from_commit_re = re.compile('^From [a-z0-9]{40} .*')
- outlines = []
- author = None
- date = None
- subject = None
- for line in headerlines:
- if line.startswith('Subject: '):
- subject = line.split(':', 1)[1]
- # Remove any [PATCH][oe-core] etc.
- subject = re.sub(r'\[.+?\]\s*', '', subject)
- continue
- elif line.startswith('From: ') or line.startswith('Author: '):
- authorval = GitApplyTree.decodeAuthor(line)
- # git is fussy about author formatting i.e. it must be Name <email@domain>
- if author_re.match(authorval):
- author = authorval
- continue
- elif line.startswith('Date: '):
- if date is None:
- dateval = line.split(':', 1)[1].strip()
- # Very crude check for date format, since git will blow up if it's not in the right
- # format. Without e.g. a python-dateutils dependency we can't do a whole lot more
- if len(dateval) > 12:
- date = dateval
- continue
- elif not author and line.lower().startswith('signed-off-by: '):
- authorval = GitApplyTree.decodeAuthor(line)
- # git is fussy about author formatting i.e. it must be Name <email@domain>
- if author_re.match(authorval):
- author = authorval
- elif from_commit_re.match(line):
- # We don't want the From <commit> line - if it's present it will break rebasing
- continue
- outlines.append(line)
-
- if not subject:
- firstline = None
- for line in headerlines:
- line = line.strip()
- if firstline:
- if line:
- # Second line is not blank, the first line probably isn't usable
- firstline = None
- break
- elif line:
- firstline = line
- if firstline and not firstline.startswith(('#', 'Index:', 'Upstream-Status:')) and len(firstline) < 100:
- subject = firstline
-
- return outlines, author, date, subject
-
- @staticmethod
- def gitCommandUserOptions(cmd, commituser=None, commitemail=None, d=None):
- if d:
- commituser = d.getVar('PATCH_GIT_USER_NAME')
- commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
- if commituser:
- cmd += ['-c', 'user.name="%s"' % commituser]
- if commitemail:
- cmd += ['-c', 'user.email="%s"' % commitemail]
-
- @staticmethod
- def prepareCommit(patchfile, commituser=None, commitemail=None):
- """
- Prepare a git commit command line based on the header from a patch file
- (typically this is useful for patches that cannot be applied with "git am" due to formatting)
- """
- import tempfile
- # Process patch header and extract useful information
- lines = GitApplyTree.extractPatchHeader(patchfile)
- outlines, author, date, subject = GitApplyTree.interpretPatchHeader(lines)
- if not author or not subject or not date:
- try:
- shellcmd = ["git", "log", "--format=email", "--follow", "--diff-filter=A", "--", patchfile]
- out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.dirname(patchfile))
- except CmdError:
- out = None
- if out:
- _, newauthor, newdate, newsubject = GitApplyTree.interpretPatchHeader(out.splitlines())
- if not author:
- # If we're setting the author then the date should be set as well
- author = newauthor
- date = newdate
- elif not date:
- # If we don't do this we'll get the current date, at least this will be closer
- date = newdate
- if not subject:
- subject = newsubject
- if subject and outlines and not outlines[0].strip() == subject:
- outlines.insert(0, '%s\n\n' % subject.strip())
-
- # Write out commit message to a file
- with tempfile.NamedTemporaryFile('w', delete=False) as tf:
- tmpfile = tf.name
- for line in outlines:
- tf.write(line)
- # Prepare git command
- cmd = ["git"]
- GitApplyTree.gitCommandUserOptions(cmd, commituser, commitemail)
- cmd += ["commit", "-F", tmpfile]
- # git doesn't like plain email addresses as authors
- if author and '<' in author:
- cmd.append('--author="%s"' % author)
- if date:
- cmd.append('--date="%s"' % date)
- return (tmpfile, cmd)
-
- @staticmethod
- def extractPatches(tree, startcommit, outdir, paths=None):
- import tempfile
- import shutil
- import re
- tempdir = tempfile.mkdtemp(prefix='oepatch')
- try:
- shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
- if paths:
- shellcmd.append('--')
- shellcmd.extend(paths)
- out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
- if out:
- for srcfile in out.split():
- for encoding in ['utf-8', 'latin-1']:
- patchlines = []
- outfile = None
- try:
- with open(srcfile, 'r', encoding=encoding) as f:
- for line in f:
- checkline = line
- if checkline.startswith('Subject: '):
- checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
- if checkline.startswith(GitApplyTree.patch_line_prefix):
- outfile = line.split()[-1].strip()
- continue
- if checkline.startswith(GitApplyTree.ignore_commit_prefix):
- continue
- patchlines.append(line)
- except UnicodeDecodeError:
- continue
- break
- else:
- raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
-
- if not outfile:
- outfile = os.path.basename(srcfile)
- with open(os.path.join(outdir, outfile), 'w') as of:
- for line in patchlines:
- of.write(line)
- finally:
- shutil.rmtree(tempdir)
-
- def _applypatch(self, patch, force = False, reverse = False, run = True):
- import shutil
-
- def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True):
- if reverse:
- shellcmd.append('-R')
-
- shellcmd.append(patch['file'])
-
- if not run:
- return "sh" + "-c" + " ".join(shellcmd)
-
- return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
-
- # Add hooks which add a pointer to the original patch file name in the commit message
- reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
- if not reporoot:
- raise Exception("Cannot get repository root for directory %s" % self.dir)
- hooks_dir = os.path.join(reporoot, '.git', 'hooks')
- hooks_dir_backup = hooks_dir + '.devtool-orig'
- if os.path.lexists(hooks_dir_backup):
- raise Exception("Git hooks backup directory already exists: %s" % hooks_dir_backup)
- if os.path.lexists(hooks_dir):
- shutil.move(hooks_dir, hooks_dir_backup)
- os.mkdir(hooks_dir)
- commithook = os.path.join(hooks_dir, 'commit-msg')
- applyhook = os.path.join(hooks_dir, 'applypatch-msg')
- with open(commithook, 'w') as f:
- # NOTE: the formatting here is significant; if you change it you'll also need to
- # change other places which read it back
- f.write('echo >> $1\n')
- f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix)
- os.chmod(commithook, 0o755)
- shutil.copy2(commithook, applyhook)
- try:
- patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
- try:
- shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
- self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
- shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
- return _applypatchhelper(shellcmd, patch, force, reverse, run)
- except CmdError:
- # Need to abort the git am, or we'll still be within it at the end
- try:
- shellcmd = ["git", "--work-tree=%s" % reporoot, "am", "--abort"]
- runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- except CmdError:
- pass
- # git am won't always clean up after itself, sadly, so...
- shellcmd = ["git", "--work-tree=%s" % reporoot, "reset", "--hard", "HEAD"]
- runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Also need to take care of any stray untracked files
- shellcmd = ["git", "--work-tree=%s" % reporoot, "clean", "-f"]
- runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
-
- # Fall back to git apply
- shellcmd = ["git", "--git-dir=%s" % reporoot, "apply", "-p%s" % patch['strippath']]
- try:
- output = _applypatchhelper(shellcmd, patch, force, reverse, run)
- except CmdError:
- # Fall back to patch
- output = PatchTree._applypatch(self, patch, force, reverse, run)
- # Add all files
- shellcmd = ["git", "add", "-f", "-A", "."]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Exclude the patches directory
- shellcmd = ["git", "reset", "HEAD", self.patchdir]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Commit the result
- (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
- try:
- shellcmd.insert(0, patchfilevar)
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- finally:
- os.remove(tmpfile)
- return output
- finally:
- shutil.rmtree(hooks_dir)
- if os.path.lexists(hooks_dir_backup):
- shutil.move(hooks_dir_backup, hooks_dir)
-
-
-class QuiltTree(PatchSet):
- def _runcmd(self, args, run = True):
- quiltrc = self.d.getVar('QUILTRCFILE')
- if not run:
- return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
- runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
-
- def _quiltpatchpath(self, file):
- return os.path.join(self.dir, "patches", os.path.basename(file))
-
-
- def __init__(self, dir, d):
- PatchSet.__init__(self, dir, d)
- self.initialized = False
- p = os.path.join(self.dir, 'patches')
- if not os.path.exists(p):
- os.makedirs(p)
-
- def Clean(self):
- try:
- self._runcmd(["pop", "-a", "-f"])
- oe.path.remove(os.path.join(self.dir, "patches","series"))
- except Exception:
- pass
- self.initialized = True
-
- def InitFromDir(self):
- # read series -> self.patches
- seriespath = os.path.join(self.dir, 'patches', 'series')
- if not os.path.exists(self.dir):
- raise NotFoundError(self.dir)
- if os.path.exists(seriespath):
- with open(seriespath, 'r') as f:
- for line in f.readlines():
- patch = {}
- parts = line.strip().split()
- patch["quiltfile"] = self._quiltpatchpath(parts[0])
- patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
- if len(parts) > 1:
- patch["strippath"] = parts[1][2:]
- self.patches.append(patch)
-
- # determine which patches are applied -> self._current
- try:
- output = runcmd(["quilt", "applied"], self.dir)
- except CmdError:
- import sys
- if sys.exc_value.output.strip() == "No patches applied":
- return
- else:
- raise
- output = [val for val in output.split('\n') if not val.startswith('#')]
- for patch in self.patches:
- if os.path.basename(patch["quiltfile"]) == output[-1]:
- self._current = self.patches.index(patch)
- self.initialized = True
-
- def Import(self, patch, force = None):
- if not self.initialized:
- self.InitFromDir()
- PatchSet.Import(self, patch, force)
- oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
- with open(os.path.join(self.dir, "patches", "series"), "a") as f:
- f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"] + "\n")
- patch["quiltfile"] = self._quiltpatchpath(patch["file"])
- patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
-
- # TODO: determine if the file being imported:
- # 1) is already imported, and is the same
- # 2) is already imported, but differs
-
- self.patches.insert(self._current or 0, patch)
-
-
- def Push(self, force = False, all = False, run = True):
- # quilt push [-f]
-
- args = ["push"]
- if force:
- args.append("-f")
- if all:
- args.append("-a")
- if not run:
- return self._runcmd(args, run)
-
- self._runcmd(args)
-
- if self._current is not None:
- self._current = self._current + 1
- else:
- self._current = 0
-
- def Pop(self, force = None, all = None):
- # quilt pop [-f]
- args = ["pop"]
- if force:
- args.append("-f")
- if all:
- args.append("-a")
-
- self._runcmd(args)
-
- if self._current == 0:
- self._current = None
-
- if self._current is not None:
- self._current = self._current - 1
-
- def Refresh(self, **kwargs):
- if kwargs.get("remote"):
- patch = self.patches[kwargs["patch"]]
- if not patch:
- raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
- (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"])
- if type == "file":
- import shutil
- if not patch.get("file") and patch.get("remote"):
- patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
-
- shutil.copyfile(patch["quiltfile"], patch["file"])
- else:
- raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
- else:
- # quilt refresh
- args = ["refresh"]
- if kwargs.get("quiltfile"):
- args.append(os.path.basename(kwargs["quiltfile"]))
- elif kwargs.get("patch"):
- args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
- self._runcmd(args)
-
-class Resolver(object):
- def __init__(self, patchset, terminal):
- raise NotImplementedError()
-
- def Resolve(self):
- raise NotImplementedError()
-
- def Revert(self):
- raise NotImplementedError()
-
- def Finalize(self):
- raise NotImplementedError()
-
-class NOOPResolver(Resolver):
- def __init__(self, patchset, terminal):
- self.patchset = patchset
- self.terminal = terminal
-
- def Resolve(self):
- olddir = os.path.abspath(os.curdir)
- os.chdir(self.patchset.dir)
- try:
- self.patchset.Push()
- except Exception:
- import sys
- os.chdir(olddir)
- raise
-
-# Patch resolver which relies on the user doing all the work involved in the
-# resolution, with the exception of refreshing the remote copy of the patch
-# files (the urls).
-class UserResolver(Resolver):
- def __init__(self, patchset, terminal):
- self.patchset = patchset
- self.terminal = terminal
-
- # Force a push in the patchset, then drop to a shell for the user to
- # resolve any rejected hunks
- def Resolve(self):
- olddir = os.path.abspath(os.curdir)
- os.chdir(self.patchset.dir)
- try:
- self.patchset.Push(False)
- except CmdError as v:
- # Patch application failed
- patchcmd = self.patchset.Push(True, False, False)
-
- t = self.patchset.d.getVar('T')
- if not t:
- bb.msg.fatal("Build", "T not set")
- bb.utils.mkdirhier(t)
- import random
- rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
- with open(rcfile, "w") as f:
- f.write("echo '*** Manual patch resolution mode ***'\n")
- f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
- f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
- f.write("echo ''\n")
- f.write(" ".join(patchcmd) + "\n")
- os.chmod(rcfile, 0o775)
-
- self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
-
- # Construct a new PatchSet after the user's changes, compare the
- # sets, checking patches for modifications, and doing a remote
- # refresh on each.
- oldpatchset = self.patchset
- self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
-
- for patch in self.patchset.patches:
- oldpatch = None
- for opatch in oldpatchset.patches:
- if opatch["quiltfile"] == patch["quiltfile"]:
- oldpatch = opatch
-
- if oldpatch:
- patch["remote"] = oldpatch["remote"]
- if patch["quiltfile"] == oldpatch["quiltfile"]:
- if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
- bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
- # user change? remote refresh
- self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
- else:
- # User did not fix the problem. Abort.
- raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
- except Exception:
- os.chdir(olddir)
- raise
- os.chdir(olddir)
-
-
-def patch_path(url, fetch, workdir, expand=True):
- """Return the local path of a patch, or None if this isn't a patch"""
-
- local = fetch.localpath(url)
- base, ext = os.path.splitext(os.path.basename(local))
- if ext in ('.gz', '.bz2', '.xz', '.Z'):
- if expand:
- local = os.path.join(workdir, base)
- ext = os.path.splitext(base)[1]
-
- urldata = fetch.ud[url]
- if "apply" in urldata.parm:
- apply = oe.types.boolean(urldata.parm["apply"])
- if not apply:
- return
- elif ext not in (".diff", ".patch"):
- return
-
- return local
-
-def src_patches(d, all=False, expand=True):
- workdir = d.getVar('WORKDIR')
- fetch = bb.fetch2.Fetch([], d)
- patches = []
- sources = []
- for url in fetch.urls:
- local = patch_path(url, fetch, workdir, expand)
- if not local:
- if all:
- local = fetch.localpath(url)
- sources.append(local)
- continue
-
- urldata = fetch.ud[url]
- parm = urldata.parm
- patchname = parm.get('pname') or os.path.basename(local)
-
- apply, reason = should_apply(parm, d)
- if not apply:
- if reason:
- bb.note("Patch %s %s" % (patchname, reason))
- continue
-
- patchparm = {'patchname': patchname}
- if "striplevel" in parm:
- striplevel = parm["striplevel"]
- elif "pnum" in parm:
- #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
- striplevel = parm["pnum"]
- else:
- striplevel = '1'
- patchparm['striplevel'] = striplevel
-
- patchdir = parm.get('patchdir')
- if patchdir:
- patchparm['patchdir'] = patchdir
-
- localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
- patches.append(localurl)
-
- if all:
- return sources
-
- return patches
-
-
-def should_apply(parm, d):
- if "mindate" in parm or "maxdate" in parm:
- pn = d.getVar('PN')
- srcdate = d.getVar('SRCDATE_%s' % pn)
- if not srcdate:
- srcdate = d.getVar('SRCDATE')
-
- if srcdate == "now":
- srcdate = d.getVar('DATE')
-
- if "maxdate" in parm and parm["maxdate"] < srcdate:
- return False, 'is outdated'
-
- if "mindate" in parm and parm["mindate"] > srcdate:
- return False, 'is predated'
-
-
- if "minrev" in parm:
- srcrev = d.getVar('SRCREV')
- if srcrev and srcrev < parm["minrev"]:
- return False, 'applies to later revisions'
-
- if "maxrev" in parm:
- srcrev = d.getVar('SRCREV')
- if srcrev and srcrev > parm["maxrev"]:
- return False, 'applies to earlier revisions'
-
- if "rev" in parm:
- srcrev = d.getVar('SRCREV')
- if srcrev and parm["rev"] not in srcrev:
- return False, "doesn't apply to revision"
-
- if "notrev" in parm:
- srcrev = d.getVar('SRCREV')
- if srcrev and parm["notrev"] in srcrev:
- return False, "doesn't apply to revision"
-
- return True, None
-
diff --git a/import-layers/yocto-poky/meta/lib/oe/path.py b/import-layers/yocto-poky/meta/lib/oe/path.py
deleted file mode 100644
index 76c58fa76..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/path.py
+++ /dev/null
@@ -1,261 +0,0 @@
-import errno
-import glob
-import shutil
-import subprocess
-import os.path
-
-def join(*paths):
- """Like os.path.join but doesn't treat absolute RHS specially"""
- return os.path.normpath("/".join(paths))
-
-def relative(src, dest):
- """ Return a relative path from src to dest.
-
- >>> relative("/usr/bin", "/tmp/foo/bar")
- ../../tmp/foo/bar
-
- >>> relative("/usr/bin", "/usr/lib")
- ../lib
-
- >>> relative("/tmp", "/tmp/foo/bar")
- foo/bar
- """
-
- return os.path.relpath(dest, src)
-
-def make_relative_symlink(path):
- """ Convert an absolute symlink to a relative one """
- if not os.path.islink(path):
- return
- link = os.readlink(path)
- if not os.path.isabs(link):
- return
-
- # find the common ancestor directory
- ancestor = path
- depth = 0
- while ancestor and not link.startswith(ancestor):
- ancestor = ancestor.rpartition('/')[0]
- depth += 1
-
- if not ancestor:
- print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
- return
-
- base = link.partition(ancestor)[2].strip('/')
- while depth > 1:
- base = "../" + base
- depth -= 1
-
- os.remove(path)
- os.symlink(base, path)
-
-def replace_absolute_symlinks(basedir, d):
- """
- Walk basedir looking for absolute symlinks and replacing them with relative ones.
- The absolute links are assumed to be relative to basedir
- (compared to make_relative_symlink above which tries to compute common ancestors
- using pattern matching instead)
- """
- for walkroot, dirs, files in os.walk(basedir):
- for file in files + dirs:
- path = os.path.join(walkroot, file)
- if not os.path.islink(path):
- continue
- link = os.readlink(path)
- if not os.path.isabs(link):
- continue
- walkdir = os.path.dirname(path.rpartition(basedir)[2])
- base = os.path.relpath(link, walkdir)
- bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
- os.remove(path)
- os.symlink(base, path)
-
-def format_display(path, metadata):
- """ Prepare a path for display to the user. """
- rel = relative(metadata.getVar("TOPDIR"), path)
- if len(rel) > len(path):
- return path
- else:
- return rel
-
-def copytree(src, dst):
- # We could use something like shutil.copytree here but it turns out to
- # to be slow. It takes twice as long copying to an empty directory.
- # If dst already has contents performance can be 15 time slower
- # This way we also preserve hardlinks between files in the tree.
-
- bb.utils.mkdirhier(dst)
- cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
-
-def copyhardlinktree(src, dst):
- """ Make the hard link when possible, otherwise copy. """
- bb.utils.mkdirhier(dst)
- if os.path.isdir(src) and not len(os.listdir(src)):
- return
-
- if (os.stat(src).st_dev == os.stat(dst).st_dev):
- # Need to copy directories only with tar first since cp will error if two
- # writers try and create a directory at the same time
- cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
- source = ''
- if os.path.isdir(src):
- if len(glob.glob('%s/.??*' % src)) > 0:
- source = './.??* '
- source += './*'
- s_dir = src
- else:
- source = src
- s_dir = os.getcwd()
- cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst))
- subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT)
- else:
- copytree(src, dst)
-
-def remove(path, recurse=True):
- """
- Equivalent to rm -f or rm -rf
- NOTE: be careful about passing paths that may contain filenames with
- wildcards in them (as opposed to passing an actual wildcarded path) -
- since we use glob.glob() to expand the path. Filenames containing
- square brackets are particularly problematic since the they may not
- actually expand to match the original filename.
- """
- for name in glob.glob(path):
- try:
- os.unlink(name)
- except OSError as exc:
- if recurse and exc.errno == errno.EISDIR:
- shutil.rmtree(name)
- elif exc.errno != errno.ENOENT:
- raise
-
-def symlink(source, destination, force=False):
- """Create a symbolic link"""
- try:
- if force:
- remove(destination)
- os.symlink(source, destination)
- except OSError as e:
- if e.errno != errno.EEXIST or os.readlink(destination) != source:
- raise
-
-def find(dir, **walkoptions):
- """ Given a directory, recurses into that directory,
- returning all files as absolute paths. """
-
- for root, dirs, files in os.walk(dir, **walkoptions):
- for file in files:
- yield os.path.join(root, file)
-
-
-## realpath() related functions
-def __is_path_below(file, root):
- return (file + os.path.sep).startswith(root)
-
-def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
- """Calculates real path of symlink 'start' + 'rel_path' below
- 'root'; no part of 'start' below 'root' must contain symlinks. """
- have_dir = True
-
- for d in rel_path.split(os.path.sep):
- if not have_dir and not assume_dir:
- raise OSError(errno.ENOENT, "no such directory %s" % start)
-
- if d == os.path.pardir: # '..'
- if len(start) >= len(root):
- # do not follow '..' before root
- start = os.path.dirname(start)
- else:
- # emit warning?
- pass
- else:
- (start, have_dir) = __realpath(os.path.join(start, d),
- root, loop_cnt, assume_dir)
-
- assert(__is_path_below(start, root))
-
- return start
-
-def __realpath(file, root, loop_cnt, assume_dir):
- while os.path.islink(file) and len(file) >= len(root):
- if loop_cnt == 0:
- raise OSError(errno.ELOOP, file)
-
- loop_cnt -= 1
- target = os.path.normpath(os.readlink(file))
-
- if not os.path.isabs(target):
- tdir = os.path.dirname(file)
- assert(__is_path_below(tdir, root))
- else:
- tdir = root
-
- file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
-
- try:
- is_dir = os.path.isdir(file)
- except:
- is_dir = false
-
- return (file, is_dir)
-
-def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
- """ Returns the canonical path of 'file' with assuming a
- toplevel 'root' directory. When 'use_physdir' is set, all
- preceding path components of 'file' will be resolved first;
- this flag should be set unless it is guaranteed that there is
- no symlink in the path. When 'assume_dir' is not set, missing
- path components will raise an ENOENT error"""
-
- root = os.path.normpath(root)
- file = os.path.normpath(file)
-
- if not root.endswith(os.path.sep):
- # letting root end with '/' makes some things easier
- root = root + os.path.sep
-
- if not __is_path_below(file, root):
- raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
-
- try:
- if use_physdir:
- file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
- else:
- file = __realpath(file, root, loop_cnt, assume_dir)[0]
- except OSError as e:
- if e.errno == errno.ELOOP:
- # make ELOOP more readable; without catching it, there will
- # be printed a backtrace with 100s of OSError exceptions
- # else
- raise OSError(errno.ELOOP,
- "too much recursions while resolving '%s'; loop in '%s'" %
- (file, e.strerror))
-
- raise
-
- return file
-
-def is_path_parent(possible_parent, *paths):
- """
- Return True if a path is the parent of another, False otherwise.
- Multiple paths to test can be specified in which case all
- specified test paths must be under the parent in order to
- return True.
- """
- def abs_path_trailing(pth):
- pth_abs = os.path.abspath(pth)
- if not pth_abs.endswith(os.sep):
- pth_abs += os.sep
- return pth_abs
-
- possible_parent_abs = abs_path_trailing(possible_parent)
- if not paths:
- return False
- for path in paths:
- path_abs = abs_path_trailing(path)
- if not path_abs.startswith(possible_parent_abs):
- return False
- return True
diff --git a/import-layers/yocto-poky/meta/lib/oe/prservice.py b/import-layers/yocto-poky/meta/lib/oe/prservice.py
deleted file mode 100644
index 32dfc15e8..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/prservice.py
+++ /dev/null
@@ -1,126 +0,0 @@
-
-def prserv_make_conn(d, check = False):
- import prserv.serv
- host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
- try:
- conn = None
- conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
- if check:
- if not conn.ping():
- raise Exception('service not available')
- d.setVar("__PRSERV_CONN",conn)
- except Exception as exc:
- bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
-
- return conn
-
-def prserv_dump_db(d):
- if not d.getVar('PRSERV_HOST'):
- bb.error("Not using network based PR service")
- return None
-
- conn = d.getVar("__PRSERV_CONN")
- if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
-
- #dump db
- opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
- opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
- opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
- opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
- return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
-
-def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
- if not d.getVar('PRSERV_HOST'):
- bb.error("Not using network based PR service")
- return None
-
- conn = d.getVar("__PRSERV_CONN")
- if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
- #get the entry values
- imported = []
- prefix = "PRAUTO$"
- for v in d.keys():
- if v.startswith(prefix):
- (remain, sep, checksum) = v.rpartition('$')
- (remain, sep, pkgarch) = remain.rpartition('$')
- (remain, sep, version) = remain.rpartition('$')
- if (remain + '$' != prefix) or \
- (filter_version and filter_version != version) or \
- (filter_pkgarch and filter_pkgarch != pkgarch) or \
- (filter_checksum and filter_checksum != checksum):
- continue
- try:
- value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
- except BaseException as exc:
- bb.debug("Not valid value of %s:%s" % (v,str(exc)))
- continue
- ret = conn.importone(version,pkgarch,checksum,value)
- if ret != value:
- bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
- else:
- imported.append((version,pkgarch,checksum,value))
- return imported
-
-def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
- import bb.utils
- #initilize the output file
- bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
- df = d.getVar('PRSERV_DUMPFILE')
- #write data
- lf = bb.utils.lockfile("%s.lock" % df)
- f = open(df, "a")
- if metainfo:
- #dump column info
- f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
- f.write("#Table: %s\n" % metainfo['tbl_name'])
- f.write("#Columns:\n")
- f.write("#name \t type \t notn \t dflt \t pk\n")
- f.write("#----------\t --------\t --------\t --------\t ----\n")
- for i in range(len(metainfo['col_info'])):
- f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
- (metainfo['col_info'][i]['name'],
- metainfo['col_info'][i]['type'],
- metainfo['col_info'][i]['notnull'],
- metainfo['col_info'][i]['dflt_value'],
- metainfo['col_info'][i]['pk']))
- f.write("\n")
-
- if lockdown:
- f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
-
- if datainfo:
- idx = {}
- for i in range(len(datainfo)):
- pkgarch = datainfo[i]['pkgarch']
- value = datainfo[i]['value']
- if pkgarch not in idx:
- idx[pkgarch] = i
- elif value > datainfo[idx[pkgarch]]['value']:
- idx[pkgarch] = i
- f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
- (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
- if not nomax:
- for i in idx:
- f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
- f.close()
- bb.utils.unlockfile(lf)
-
-def prserv_check_avail(d):
- host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
- try:
- if len(host_params) != 2:
- raise TypeError
- else:
- int(host_params[1])
- except TypeError:
- bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
- else:
- prserv_make_conn(d, True)
diff --git a/import-layers/yocto-poky/meta/lib/oe/qa.py b/import-layers/yocto-poky/meta/lib/oe/qa.py
deleted file mode 100644
index 3231e60ce..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/qa.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import os, struct, mmap
-
-class NotELFFileError(Exception):
- pass
-
-class ELFFile:
- EI_NIDENT = 16
-
- EI_CLASS = 4
- EI_DATA = 5
- EI_VERSION = 6
- EI_OSABI = 7
- EI_ABIVERSION = 8
-
- E_MACHINE = 0x12
-
- # possible values for EI_CLASS
- ELFCLASSNONE = 0
- ELFCLASS32 = 1
- ELFCLASS64 = 2
-
- # possible value for EI_VERSION
- EV_CURRENT = 1
-
- # possible values for EI_DATA
- EI_DATA_NONE = 0
- EI_DATA_LSB = 1
- EI_DATA_MSB = 2
-
- PT_INTERP = 3
-
- def my_assert(self, expectation, result):
- if not expectation == result:
- #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
- raise NotELFFileError("%s is not an ELF" % self.name)
-
- def __init__(self, name):
- self.name = name
- self.objdump_output = {}
-
- # Context Manager functions to close the mmap explicitly
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.data.close()
-
- def open(self):
- with open(self.name, "rb") as f:
- try:
- self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
- except ValueError:
- # This means the file is empty
- raise NotELFFileError("%s is empty" % self.name)
-
- # Check the file has the minimum number of ELF table entries
- if len(self.data) < ELFFile.EI_NIDENT + 4:
- raise NotELFFileError("%s is not an ELF" % self.name)
-
- # ELF header
- self.my_assert(self.data[0], 0x7f)
- self.my_assert(self.data[1], ord('E'))
- self.my_assert(self.data[2], ord('L'))
- self.my_assert(self.data[3], ord('F'))
- if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
- self.bits = 32
- elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
- self.bits = 64
- else:
- # Not 32-bit or 64.. lets assert
- raise NotELFFileError("ELF but not 32 or 64 bit.")
- self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT)
-
- self.endian = self.data[ELFFile.EI_DATA]
- if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB):
- raise NotELFFileError("Unexpected EI_DATA %x" % self.endian)
-
- def osAbi(self):
- return self.data[ELFFile.EI_OSABI]
-
- def abiVersion(self):
- return self.data[ELFFile.EI_ABIVERSION]
-
- def abiSize(self):
- return self.bits
-
- def isLittleEndian(self):
- return self.endian == ELFFile.EI_DATA_LSB
-
- def isBigEndian(self):
- return self.endian == ELFFile.EI_DATA_MSB
-
- def getStructEndian(self):
- return {ELFFile.EI_DATA_LSB: "<",
- ELFFile.EI_DATA_MSB: ">"}[self.endian]
-
- def getShort(self, offset):
- return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0]
-
- def getWord(self, offset):
- return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0]
-
- def isDynamic(self):
- """
- Return True if there is a .interp segment (therefore dynamically
- linked), otherwise False (statically linked).
- """
- offset = self.getWord(self.bits == 32 and 0x1C or 0x20)
- size = self.getShort(self.bits == 32 and 0x2A or 0x36)
- count = self.getShort(self.bits == 32 and 0x2C or 0x38)
-
- for i in range(0, count):
- p_type = self.getWord(offset + i * size)
- if p_type == ELFFile.PT_INTERP:
- return True
- return False
-
- def machine(self):
- """
- We know the endian stored in self.endian and we
- know the position
- """
- return self.getShort(ELFFile.E_MACHINE)
-
- def run_objdump(self, cmd, d):
- import bb.process
- import sys
-
- if cmd in self.objdump_output:
- return self.objdump_output[cmd]
-
- objdump = d.getVar('OBJDUMP')
-
- env = os.environ.copy()
- env["LC_ALL"] = "C"
- env["PATH"] = d.getVar('PATH')
-
- try:
- bb.note("%s %s %s" % (objdump, cmd, self.name))
- self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
- return self.objdump_output[cmd]
- except Exception as e:
- bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
- return ""
-
-def elf_machine_to_string(machine):
- """
- Return the name of a given ELF e_machine field or the hex value as a string
- if it isn't recognised.
- """
- try:
- return {
- 0x02: "SPARC",
- 0x03: "x86",
- 0x08: "MIPS",
- 0x14: "PowerPC",
- 0x28: "ARM",
- 0x2A: "SuperH",
- 0x32: "IA-64",
- 0x3E: "x86-64",
- 0xB7: "AArch64"
- }[machine]
- except:
- return "Unknown (%s)" % repr(machine)
-
-if __name__ == "__main__":
- import sys
-
- with ELFFile(sys.argv[1]) as elf:
- elf.open()
- print(elf.isDynamic())
diff --git a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
deleted file mode 100644
index aa64553c0..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
+++ /dev/null
@@ -1,971 +0,0 @@
-# Utility functions for reading and modifying recipes
-#
-# Some code borrowed from the OE layer index
-#
-# Copyright (C) 2013-2017 Intel Corporation
-#
-
-import sys
-import os
-import os.path
-import tempfile
-import textwrap
-import difflib
-from . import utils
-import shutil
-import re
-import fnmatch
-import glob
-from collections import OrderedDict, defaultdict
-
-
-# Help us to find places to insert values
-recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
-# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI\[(.+\.)?md5sum\]', 'SRC_URI\[(.+\.)?sha256sum\]']
-list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
-meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
-
-
-def pn_to_recipe(cooker, pn, mc=''):
- """Convert a recipe name (PN) to the path to the recipe file"""
-
- best = cooker.findBestProvider(pn, mc)
- return best[3]
-
-
-def get_unavailable_reasons(cooker, pn):
- """If a recipe could not be found, find out why if possible"""
- import bb.taskdata
- taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
- return taskdata.get_reasons(pn)
-
-
-def parse_recipe(cooker, fn, appendfiles):
- """
- Parse an individual recipe file, optionally with a list of
- bbappend files.
- """
- import bb.cache
- parser = bb.cache.NoCache(cooker.databuilder)
- envdata = parser.loadDataFull(fn, appendfiles)
- return envdata
-
-
-def get_var_files(fn, varlist, d):
- """Find the file in which each of a list of variables is set.
- Note: requires variable history to be enabled when parsing.
- """
- varfiles = {}
- for v in varlist:
- history = d.varhistory.variable(v)
- files = []
- for event in history:
- if 'file' in event and not 'flag' in event:
- files.append(event['file'])
- if files:
- actualfile = files[-1]
- else:
- actualfile = None
- varfiles[v] = actualfile
-
- return varfiles
-
-
-def split_var_value(value, assignment=True):
- """
- Split a space-separated variable's value into a list of items,
- taking into account that some of the items might be made up of
- expressions containing spaces that should not be split.
- Parameters:
- value:
- The string value to split
- assignment:
- True to assume that the value represents an assignment
- statement, False otherwise. If True, and an assignment
- statement is passed in the first item in
- the returned list will be the part of the assignment
- statement up to and including the opening quote character,
- and the last item will be the closing quote.
- """
- inexpr = 0
- lastchar = None
- out = []
- buf = ''
- for char in value:
- if char == '{':
- if lastchar == '$':
- inexpr += 1
- elif char == '}':
- inexpr -= 1
- elif assignment and char in '"\'' and inexpr == 0:
- if buf:
- out.append(buf)
- out.append(char)
- char = ''
- buf = ''
- elif char.isspace() and inexpr == 0:
- char = ''
- if buf:
- out.append(buf)
- buf = ''
- buf += char
- lastchar = char
- if buf:
- out.append(buf)
-
- # Join together assignment statement and opening quote
- outlist = out
- if assignment:
- assigfound = False
- for idx, item in enumerate(out):
- if '=' in item:
- assigfound = True
- if assigfound:
- if '"' in item or "'" in item:
- outlist = [' '.join(out[:idx+1])]
- outlist.extend(out[idx+1:])
- break
- return outlist
-
-
-def patch_recipe_lines(fromlines, values, trailing_newline=True):
- """Update or insert variable values into lines from a recipe.
- Note that some manual inspection/intervention may be required
- since this cannot handle all situations.
- """
-
- import bb.utils
-
- if trailing_newline:
- newline = '\n'
- else:
- newline = ''
-
- nowrap_vars_res = []
- for item in nowrap_vars:
- nowrap_vars_res.append(re.compile('^%s$' % item))
-
- recipe_progression_res = []
- recipe_progression_restrs = []
- for item in recipe_progression:
- if item.endswith('()'):
- key = item[:-2]
- else:
- key = item
- restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
- if item.endswith('()'):
- recipe_progression_restrs.append(restr + '()')
- else:
- recipe_progression_restrs.append(restr)
- recipe_progression_res.append(re.compile('^%s$' % restr))
-
- def get_recipe_pos(variable):
- for i, p in enumerate(recipe_progression_res):
- if p.match(variable):
- return i
- return -1
-
- remainingnames = {}
- for k in values.keys():
- remainingnames[k] = get_recipe_pos(k)
- remainingnames = OrderedDict(sorted(remainingnames.items(), key=lambda x: x[1]))
-
- modifying = False
-
- def outputvalue(name, lines, rewindcomments=False):
- if values[name] is None:
- return
- rawtext = '%s = "%s"%s' % (name, values[name], newline)
- addlines = []
- nowrap = False
- for nowrap_re in nowrap_vars_res:
- if nowrap_re.match(name):
- nowrap = True
- break
- if nowrap:
- addlines.append(rawtext)
- elif name in list_vars:
- splitvalue = split_var_value(values[name], assignment=False)
- if len(splitvalue) > 1:
- linesplit = ' \\\n' + (' ' * (len(name) + 4))
- addlines.append('%s = "%s%s"%s' % (name, linesplit.join(splitvalue), linesplit, newline))
- else:
- addlines.append(rawtext)
- else:
- wrapped = textwrap.wrap(rawtext)
- for wrapline in wrapped[:-1]:
- addlines.append('%s \\%s' % (wrapline, newline))
- addlines.append('%s%s' % (wrapped[-1], newline))
-
- # Split on newlines - this isn't strictly necessary if you are only
- # going to write the output to disk, but if you want to compare it
- # (as patch_recipe_file() will do if patch=True) then it's important.
- addlines = [line for l in addlines for line in l.splitlines(True)]
- if rewindcomments:
- # Ensure we insert the lines before any leading comments
- # (that we'd want to ensure remain leading the next value)
- for i, ln in reversed(list(enumerate(lines))):
- if not ln.startswith('#'):
- lines[i+1:i+1] = addlines
- break
- else:
- lines.extend(addlines)
- else:
- lines.extend(addlines)
-
- existingnames = []
- def patch_recipe_varfunc(varname, origvalue, op, newlines):
- if modifying:
- # Insert anything that should come before this variable
- pos = get_recipe_pos(varname)
- for k in list(remainingnames):
- if remainingnames[k] > -1 and pos >= remainingnames[k] and not k in existingnames:
- outputvalue(k, newlines, rewindcomments=True)
- del remainingnames[k]
- # Now change this variable, if it needs to be changed
- if varname in existingnames and op in ['+=', '=', '=+']:
- if varname in remainingnames:
- outputvalue(varname, newlines)
- del remainingnames[varname]
- return None, None, 0, True
- else:
- if varname in values:
- existingnames.append(varname)
- return origvalue, None, 0, True
-
- # First run - establish which values we want to set are already in the file
- varlist = [re.escape(item) for item in values.keys()]
- bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc)
- # Second run - actually set everything
- modifying = True
- varlist.extend(recipe_progression_restrs)
- changed, tolines = bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc, match_overrides=True)
-
- if remainingnames:
- if tolines and tolines[-1].strip() != '':
- tolines.append('\n')
- for k in remainingnames.keys():
- outputvalue(k, tolines)
-
- return changed, tolines
-
-
-def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
- """Update or insert variable values into a recipe file (assuming you
- have already identified the exact file you want to update.)
- Note that some manual inspection/intervention may be required
- since this cannot handle all situations.
- """
-
- with open(fn, 'r') as f:
- fromlines = f.readlines()
-
- _, tolines = patch_recipe_lines(fromlines, values)
-
- if redirect_output:
- with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
- f.writelines(tolines)
- return None
- elif patch:
- relfn = os.path.relpath(fn, relpath)
- diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
- return diff
- else:
- with open(fn, 'w') as f:
- f.writelines(tolines)
- return None
-
-
-def localise_file_vars(fn, varfiles, varlist):
- """Given a list of variables and variable history (fetched with get_var_files())
- find where each variable should be set/changed. This handles for example where a
- recipe includes an inc file where variables might be changed - in most cases
- we want to update the inc file when changing the variable value rather than adding
- it to the recipe itself.
- """
- fndir = os.path.dirname(fn) + os.sep
-
- first_meta_file = None
- for v in meta_vars:
- f = varfiles.get(v, None)
- if f:
- actualdir = os.path.dirname(f) + os.sep
- if actualdir.startswith(fndir):
- first_meta_file = f
- break
-
- filevars = defaultdict(list)
- for v in varlist:
- f = varfiles[v]
- # Only return files that are in the same directory as the recipe or in some directory below there
- # (this excludes bbclass files and common inc files that wouldn't be appropriate to set the variable
- # in if we were going to set a value specific to this recipe)
- if f:
- actualfile = f
- else:
- # Variable isn't in a file, if it's one of the "meta" vars, use the first file with a meta var in it
- if first_meta_file:
- actualfile = first_meta_file
- else:
- actualfile = fn
-
- actualdir = os.path.dirname(actualfile) + os.sep
- if not actualdir.startswith(fndir):
- actualfile = fn
- filevars[actualfile].append(v)
-
- return filevars
-
-def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
- """Modify a list of variable values in the specified recipe. Handles inc files if
- used by the recipe.
- """
- varlist = varvalues.keys()
- varfiles = get_var_files(fn, varlist, d)
- locs = localise_file_vars(fn, varfiles, varlist)
- patches = []
- for f,v in locs.items():
- vals = {k: varvalues[k] for k in v}
- patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
- if patch:
- patches.append(patchdata)
-
- if patch:
- return patches
- else:
- return None
-
-
-
-def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=False):
- """Copy (local) recipe files, including both files included via include/require,
- and files referred to in the SRC_URI variable."""
- import bb.fetch2
- import oe.path
-
- # FIXME need a warning if the unexpanded SRC_URI value contains variable references
-
- uri_values = []
- localpaths = []
- def fetch_urls(rdata):
- # Collect the local paths from SRC_URI
- srcuri = rdata.getVar('SRC_URI') or ""
- if srcuri not in uri_values:
- fetch = bb.fetch2.Fetch(srcuri.split(), rdata)
- if download:
- fetch.download()
- for pth in fetch.localpaths():
- if pth not in localpaths:
- localpaths.append(pth)
- uri_values.append(srcuri)
-
- fetch_urls(d)
- if all_variants:
- # Get files for other variants e.g. in the case of a SRC_URI_append
- localdata = bb.data.createCopy(d)
- variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
- if variants:
- # Ensure we handle class-target if we're dealing with one of the variants
- variants.append('target')
- for variant in variants:
- localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
- fetch_urls(localdata)
-
- # Copy local files to target directory and gather any remote files
- bb_dir = os.path.abspath(os.path.dirname(d.getVar('FILE'))) + os.sep
- remotes = []
- copied = []
- # Need to do this in two steps since we want to check against the absolute path
- includes = [os.path.abspath(path) for path in d.getVar('BBINCLUDED').split() if os.path.exists(path)]
- # We also check this below, but we don't want any items in this list being considered remotes
- includes = [path for path in includes if path.startswith(bb_dir)]
- for path in localpaths + includes:
- # Only import files that are under the meta directory
- if path.startswith(bb_dir):
- if not whole_dir:
- relpath = os.path.relpath(path, bb_dir)
- subdir = os.path.join(tgt_dir, os.path.dirname(relpath))
- if not os.path.exists(subdir):
- os.makedirs(subdir)
- shutil.copy2(path, os.path.join(tgt_dir, relpath))
- copied.append(relpath)
- else:
- remotes.append(path)
- # Simply copy whole meta dir, if requested
- if whole_dir:
- shutil.copytree(bb_dir, tgt_dir)
-
- return copied, remotes
-
-
-def get_recipe_local_files(d, patches=False, archives=False):
- """Get a list of local files in SRC_URI within a recipe."""
- import oe.patch
- uris = (d.getVar('SRC_URI') or "").split()
- fetch = bb.fetch2.Fetch(uris, d)
- # FIXME this list should be factored out somewhere else (such as the
- # fetcher) though note that this only encompasses actual container formats
- # i.e. that can contain multiple files as opposed to those that only
- # contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
- archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
- ret = {}
- for uri in uris:
- if fetch.ud[uri].type == 'file':
- if (not patches and
- oe.patch.patch_path(uri, fetch, '', expand=False)):
- continue
- # Skip files that are referenced by absolute path
- fname = fetch.ud[uri].basepath
- if os.path.isabs(fname):
- continue
- # Handle subdir=
- subdir = fetch.ud[uri].parm.get('subdir', '')
- if subdir:
- if os.path.isabs(subdir):
- continue
- fname = os.path.join(subdir, fname)
- localpath = fetch.localpath(uri)
- if not archives:
- # Ignore archives that will be unpacked
- if localpath.endswith(tuple(archive_exts)):
- unpack = fetch.ud[uri].parm.get('unpack', True)
- if unpack:
- continue
- ret[fname] = localpath
- return ret
-
-
-def get_recipe_patches(d):
- """Get a list of the patches included in SRC_URI within a recipe."""
- import oe.patch
- patches = oe.patch.src_patches(d, expand=False)
- patchfiles = []
- for patch in patches:
- _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
- patchfiles.append(local)
- return patchfiles
-
-
-def get_recipe_patched_files(d):
- """
- Get the list of patches for a recipe along with the files each patch modifies.
- Params:
- d: the datastore for the recipe
- Returns:
- a dict mapping patch file path to a list of tuples of changed files and
- change mode ('A' for add, 'D' for delete or 'M' for modify)
- """
- import oe.patch
- patches = oe.patch.src_patches(d, expand=False)
- patchedfiles = {}
- for patch in patches:
- _, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
- striplevel = int(parm['striplevel'])
- patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S'), parm.get('patchdir', '')))
- return patchedfiles
-
-
-def validate_pn(pn):
- """Perform validation on a recipe name (PN) for a new recipe."""
- reserved_names = ['forcevariable', 'append', 'prepend', 'remove']
- if not re.match('^[0-9a-z-.+]+$', pn):
- return 'Recipe name "%s" is invalid: only characters 0-9, a-z, -, + and . are allowed' % pn
- elif pn in reserved_names:
- return 'Recipe name "%s" is invalid: is a reserved keyword' % pn
- elif pn.startswith('pn-'):
- return 'Recipe name "%s" is invalid: names starting with "pn-" are reserved' % pn
- elif pn.endswith(('.bb', '.bbappend', '.bbclass', '.inc', '.conf')):
- return 'Recipe name "%s" is invalid: should be just a name, not a file name' % pn
- return ''
-
-
-def get_bbfile_path(d, destdir, extrapathhint=None):
- """
- Determine the correct path for a recipe within a layer
- Parameters:
- d: Recipe-specific datastore
- destdir: destination directory. Can be the path to the base of the layer or a
- partial path somewhere within the layer.
- extrapathhint: a path relative to the base of the layer to try
- """
- import bb.cookerdata
-
- destdir = os.path.abspath(destdir)
- destlayerdir = find_layerdir(destdir)
-
- # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf
- confdata = d.createCopy()
- confdata.setVar('BBFILES', '')
- confdata.setVar('LAYERDIR', destlayerdir)
- destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
- confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
- pn = d.getVar('PN')
-
- bbfilespecs = (confdata.getVar('BBFILES') or '').split()
- if destdir == destlayerdir:
- for bbfilespec in bbfilespecs:
- if not bbfilespec.endswith('.bbappend'):
- for match in glob.glob(bbfilespec):
- splitext = os.path.splitext(os.path.basename(match))
- if splitext[1] == '.bb':
- mpn = splitext[0].split('_')[0]
- if mpn == pn:
- return os.path.dirname(match)
-
- # Try to make up a path that matches BBFILES
- # this is a little crude, but better than nothing
- bpn = d.getVar('BPN')
- recipefn = os.path.basename(d.getVar('FILE'))
- pathoptions = [destdir]
- if extrapathhint:
- pathoptions.append(os.path.join(destdir, extrapathhint))
- if destdir == destlayerdir:
- pathoptions.append(os.path.join(destdir, 'recipes-%s' % bpn, bpn))
- pathoptions.append(os.path.join(destdir, 'recipes', bpn))
- pathoptions.append(os.path.join(destdir, bpn))
- elif not destdir.endswith(('/' + pn, '/' + bpn)):
- pathoptions.append(os.path.join(destdir, bpn))
- closepath = ''
- for pathoption in pathoptions:
- bbfilepath = os.path.join(pathoption, 'test.bb')
- for bbfilespec in bbfilespecs:
- if fnmatch.fnmatchcase(bbfilepath, bbfilespec):
- return pathoption
- return None
-
-def get_bbappend_path(d, destlayerdir, wildcardver=False):
- """Determine how a bbappend for a recipe should be named and located within another layer"""
-
- import bb.cookerdata
-
- destlayerdir = os.path.abspath(destlayerdir)
- recipefile = d.getVar('FILE')
- recipefn = os.path.splitext(os.path.basename(recipefile))[0]
- if wildcardver and '_' in recipefn:
- recipefn = recipefn.split('_', 1)[0] + '_%'
- appendfn = recipefn + '.bbappend'
-
- # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf
- confdata = d.createCopy()
- confdata.setVar('BBFILES', '')
- confdata.setVar('LAYERDIR', destlayerdir)
- destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
- confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
-
- origlayerdir = find_layerdir(recipefile)
- if not origlayerdir:
- return (None, False)
- # Now join this to the path where the bbappend is going and check if it is covered by BBFILES
- appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
- closepath = ''
- pathok = True
- for bbfilespec in confdata.getVar('BBFILES').split():
- if fnmatch.fnmatchcase(appendpath, bbfilespec):
- # Our append path works, we're done
- break
- elif bbfilespec.startswith(destlayerdir) and fnmatch.fnmatchcase('test.bbappend', os.path.basename(bbfilespec)):
- # Try to find the longest matching path
- if len(bbfilespec) > len(closepath):
- closepath = bbfilespec
- else:
- # Unfortunately the bbappend layer and the original recipe's layer don't have the same structure
- if closepath:
- # bbappend layer's layer.conf at least has a spec that picks up .bbappend files
- # Now we just need to substitute out any wildcards
- appendsubdir = os.path.relpath(os.path.dirname(closepath), destlayerdir)
- if 'recipes-*' in appendsubdir:
- # Try to copy this part from the original recipe path
- res = re.search('/recipes-[^/]+/', recipefile)
- if res:
- appendsubdir = appendsubdir.replace('/recipes-*/', res.group(0))
- # This is crude, but we have to do something
- appendsubdir = appendsubdir.replace('*', recipefn.split('_')[0])
- appendsubdir = appendsubdir.replace('?', 'a')
- appendpath = os.path.join(destlayerdir, appendsubdir, appendfn)
- else:
- pathok = False
- return (appendpath, pathok)
-
-
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
- """
- Writes a bbappend file for a recipe
- Parameters:
- rd: data dictionary for the recipe
- destlayerdir: base directory of the layer to place the bbappend in
- (subdirectory path from there will be determined automatically)
- srcfiles: dict of source files to add to SRC_URI, where the value
- is the full path to the file to be added, and the value is the
- original filename as it would appear in SRC_URI or None if it
- isn't already present. You may pass None for this parameter if
- you simply want to specify your own content via the extralines
- parameter.
- install: dict mapping entries in srcfiles to a tuple of two elements:
- install path (*without* ${D} prefix) and permission value (as a
- string, e.g. '0644').
- wildcardver: True to use a % wildcard in the bbappend filename, or
- False to make the bbappend specific to the recipe version.
- machine:
- If specified, make the changes in the bbappend specific to this
- machine. This will also cause PACKAGE_ARCH = "${MACHINE_ARCH}"
- to be added to the bbappend.
- extralines:
- Extra lines to add to the bbappend. This may be a dict of name
- value pairs, or simply a list of the lines.
- removevalues:
- Variable values to remove - a dict of names/values.
- redirect_output:
- If specified, redirects writing the output file to the
- specified directory (for dry-run purposes)
- """
-
- if not removevalues:
- removevalues = {}
-
- # Determine how the bbappend should be named
- appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
- if not appendpath:
- bb.error('Unable to determine layer directory containing %s' % recipefile)
- return (None, None)
- if not pathok:
- bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
-
- appenddir = os.path.dirname(appendpath)
- if not redirect_output:
- bb.utils.mkdirhier(appenddir)
-
- # FIXME check if the bbappend doesn't get overridden by a higher priority layer?
-
- layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
- if not os.path.abspath(destlayerdir) in layerdirs:
- bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
-
- bbappendlines = []
- if extralines:
- if isinstance(extralines, dict):
- for name, value in extralines.items():
- bbappendlines.append((name, '=', value))
- else:
- # Do our best to split it
- for line in extralines:
- if line[-1] == '\n':
- line = line[:-1]
- splitline = line.split(None, 2)
- if len(splitline) == 3:
- bbappendlines.append(tuple(splitline))
- else:
- raise Exception('Invalid extralines value passed')
-
- def popline(varname):
- for i in range(0, len(bbappendlines)):
- if bbappendlines[i][0] == varname:
- line = bbappendlines.pop(i)
- return line
- return None
-
- def appendline(varname, op, value):
- for i in range(0, len(bbappendlines)):
- item = bbappendlines[i]
- if item[0] == varname:
- bbappendlines[i] = (item[0], item[1], item[2] + ' ' + value)
- break
- else:
- bbappendlines.append((varname, op, value))
-
- destsubdir = rd.getVar('PN')
- if srcfiles:
- bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
-
- appendoverride = ''
- if machine:
- bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
- appendoverride = '_%s' % machine
- copyfiles = {}
- if srcfiles:
- instfunclines = []
- for newfile, origsrcfile in srcfiles.items():
- srcfile = origsrcfile
- srcurientry = None
- if not srcfile:
- srcfile = os.path.basename(newfile)
- srcurientry = 'file://%s' % srcfile
- # Double-check it's not there already
- # FIXME do we care if the entry is added by another bbappend that might go away?
- if not srcurientry in rd.getVar('SRC_URI').split():
- if machine:
- appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
- else:
- appendline('SRC_URI', '+=', srcurientry)
- copyfiles[newfile] = srcfile
- if install:
- institem = install.pop(newfile, None)
- if institem:
- (destpath, perms) = institem
- instdestpath = replace_dir_vars(destpath, rd)
- instdirline = 'install -d ${D}%s' % os.path.dirname(instdestpath)
- if not instdirline in instfunclines:
- instfunclines.append(instdirline)
- instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
- if instfunclines:
- bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
-
- if redirect_output:
- bb.note('Writing append file %s (dry-run)' % appendpath)
- outfile = os.path.join(redirect_output, os.path.basename(appendpath))
- # Only take a copy if the file isn't already there (this function may be called
- # multiple times per operation when we're handling overrides)
- if os.path.exists(appendpath) and not os.path.exists(outfile):
- shutil.copy2(appendpath, outfile)
- else:
- bb.note('Writing append file %s' % appendpath)
- outfile = appendpath
-
- if os.path.exists(outfile):
- # Work around lack of nonlocal in python 2
- extvars = {'destsubdir': destsubdir}
-
- def appendfile_varfunc(varname, origvalue, op, newlines):
- if varname == 'FILESEXTRAPATHS_prepend':
- if origvalue.startswith('${THISDIR}/'):
- popline('FILESEXTRAPATHS_prepend')
- extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
- elif varname == 'PACKAGE_ARCH':
- if machine:
- popline('PACKAGE_ARCH')
- return (machine, None, 4, False)
- elif varname.startswith('do_install_append'):
- func = popline(varname)
- if func:
- instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
- for line in func[2]:
- if not line in instfunclines:
- instfunclines.append(line)
- return (instfunclines, None, 4, False)
- else:
- splitval = split_var_value(origvalue, assignment=False)
- changed = False
- removevar = varname
- if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
- removevar = 'SRC_URI'
- line = popline(varname)
- if line:
- if line[2] not in splitval:
- splitval.append(line[2])
- changed = True
- else:
- line = popline(varname)
- if line:
- splitval = [line[2]]
- changed = True
-
- if removevar in removevalues:
- remove = removevalues[removevar]
- if isinstance(remove, str):
- if remove in splitval:
- splitval.remove(remove)
- changed = True
- else:
- for removeitem in remove:
- if removeitem in splitval:
- splitval.remove(removeitem)
- changed = True
-
- if changed:
- newvalue = splitval
- if len(newvalue) == 1:
- # Ensure it's written out as one line
- if '_append' in varname:
- newvalue = ' ' + newvalue[0]
- else:
- newvalue = newvalue[0]
- if not newvalue and (op in ['+=', '.='] or '_append' in varname):
- # There's no point appending nothing
- newvalue = None
- if varname.endswith('()'):
- indent = 4
- else:
- indent = -1
- return (newvalue, None, indent, True)
- return (origvalue, None, 4, False)
-
- varnames = [item[0] for item in bbappendlines]
- if removevalues:
- varnames.extend(list(removevalues.keys()))
-
- with open(outfile, 'r') as f:
- (updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
-
- destsubdir = extvars['destsubdir']
- else:
- updated = False
- newlines = []
-
- if bbappendlines:
- for line in bbappendlines:
- if line[0].endswith('()'):
- newlines.append('%s {\n %s\n}\n' % (line[0], '\n '.join(line[2])))
- else:
- newlines.append('%s %s "%s"\n\n' % line)
- updated = True
-
- if updated:
- with open(outfile, 'w') as f:
- f.writelines(newlines)
-
- if copyfiles:
- if machine:
- destsubdir = os.path.join(destsubdir, machine)
- if redirect_output:
- outdir = redirect_output
- else:
- outdir = appenddir
- for newfile, srcfile in copyfiles.items():
- filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
- if os.path.abspath(newfile) != os.path.abspath(filedest):
- if newfile.startswith(tempfile.gettempdir()):
- newfiledisp = os.path.basename(newfile)
- else:
- newfiledisp = newfile
- if redirect_output:
- bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
- else:
- bb.note('Copying %s to %s' % (newfiledisp, filedest))
- bb.utils.mkdirhier(os.path.dirname(filedest))
- shutil.copyfile(newfile, filedest)
-
- return (appendpath, os.path.join(appenddir, destsubdir))
-
-
-def find_layerdir(fn):
- """ Figure out the path to the base of the layer containing a file (e.g. a recipe)"""
- pth = os.path.abspath(fn)
- layerdir = ''
- while pth:
- if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
- layerdir = pth
- break
- pth = os.path.dirname(pth)
- if pth == '/':
- return None
- return layerdir
-
-
-def replace_dir_vars(path, d):
- """Replace common directory paths with appropriate variable references (e.g. /etc becomes ${sysconfdir})"""
- dirvars = {}
- # Sort by length so we get the variables we're interested in first
- for var in sorted(list(d.keys()), key=len):
- if var.endswith('dir') and var.lower() == var:
- value = d.getVar(var)
- if value.startswith('/') and not '\n' in value and value not in dirvars:
- dirvars[value] = var
- for dirpath in sorted(list(dirvars.keys()), reverse=True):
- path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
- return path
-
-def get_recipe_pv_without_srcpv(pv, uri_type):
- """
- Get PV without SRCPV common in SCM's for now only
- support git.
-
- Returns tuple with pv, prefix and suffix.
- """
- pfx = ''
- sfx = ''
-
- if uri_type == 'git':
- git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
- m = git_regex.match(pv)
-
- if m:
- pv = m.group('ver')
- pfx = m.group('pfx')
- sfx = m.group('sfx')
- else:
- regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
- m = regex.match(pv)
- if m:
- pv = m.group('ver')
- pfx = m.group('pfx')
-
- return (pv, pfx, sfx)
-
-def get_recipe_upstream_version(rd):
- """
- Get upstream version of recipe using bb.fetch2 methods with support for
- http, https, ftp and git.
-
- bb.fetch2 exceptions can be raised,
- FetchError when don't have network access or upstream site don't response.
- NoMethodError when uri latest_versionstring method isn't implemented.
-
- Returns a dictonary with version, repository revision, current_version, type and datetime.
- Type can be A for Automatic, M for Manual and U for Unknown.
- """
- from bb.fetch2 import decodeurl
- from datetime import datetime
-
- ru = {}
- ru['current_version'] = rd.getVar('PV')
- ru['version'] = ''
- ru['type'] = 'U'
- ru['datetime'] = ''
- ru['revision'] = ''
-
- # XXX: If don't have SRC_URI means that don't have upstream sources so
- # returns the current recipe version, so that upstream version check
- # declares a match.
- src_uris = rd.getVar('SRC_URI')
- if not src_uris:
- ru['version'] = ru['current_version']
- ru['type'] = 'M'
- ru['datetime'] = datetime.now()
- return ru
-
- # XXX: we suppose that the first entry points to the upstream sources
- src_uri = src_uris.split()[0]
- uri_type, _, _, _, _, _ = decodeurl(src_uri)
-
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
- ru['current_version'] = pv
-
- manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
- if manual_upstream_version:
- # manual tracking of upstream version.
- ru['version'] = manual_upstream_version
- ru['type'] = 'M'
-
- manual_upstream_date = rd.getVar("CHECK_DATE")
- if manual_upstream_date:
- date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
- else:
- date = datetime.now()
- ru['datetime'] = date
-
- elif uri_type == "file":
- # files are always up-to-date
- ru['version'] = pv
- ru['type'] = 'A'
- ru['datetime'] = datetime.now()
- else:
- ud = bb.fetch2.FetchData(src_uri, rd)
- if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
- revision = ud.method.latest_revision(ud, rd, 'default')
- upversion = pv
- if revision != rd.getVar("SRCREV"):
- upversion = upversion + "-new-commits-available"
- else:
- pupver = ud.method.latest_versionstring(ud, rd)
- (upversion, revision) = pupver
-
- if upversion:
- ru['version'] = upversion
- ru['type'] = 'A'
-
- if revision:
- ru['revision'] = revision
-
- ru['datetime'] = datetime.now()
-
- return ru
diff --git a/import-layers/yocto-poky/meta/lib/oe/rootfs.py b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
deleted file mode 100644
index f8f717c05..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/rootfs.py
+++ /dev/null
@@ -1,973 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from oe.utils import execute_pre_post_process
-from oe.package_manager import *
-from oe.manifest import *
-import oe.path
-import filecmp
-import shutil
-import os
-import subprocess
-import re
-
-
-class Rootfs(object, metaclass=ABCMeta):
- """
- This is an abstract class. Do not instantiate this directly.
- """
-
- def __init__(self, d, progress_reporter=None, logcatcher=None):
- self.d = d
- self.pm = None
- self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
- self.deploydir = self.d.getVar('IMGDEPLOYDIR')
- self.progress_reporter = progress_reporter
- self.logcatcher = logcatcher
-
- self.install_order = Manifest.INSTALL_ORDER
-
- @abstractmethod
- def _create(self):
- pass
-
- @abstractmethod
- def _get_delayed_postinsts(self):
- pass
-
- @abstractmethod
- def _save_postinsts(self):
- pass
-
- @abstractmethod
- def _log_check(self):
- pass
-
- def _log_check_common(self, type, match):
- # Ignore any lines containing log_check to avoid recursion, and ignore
- # lines beginning with a + since sh -x may emit code which isn't
- # actually executed, but may contain error messages
- excludes = [ 'log_check', r'^\+' ]
- if hasattr(self, 'log_check_expected_regexes'):
- excludes.extend(self.log_check_expected_regexes)
- excludes = [re.compile(x) for x in excludes]
- r = re.compile(match)
- log_path = self.d.expand("${T}/log.do_rootfs")
- messages = []
- with open(log_path, 'r') as log:
- for line in log:
- if self.logcatcher and self.logcatcher.contains(line.rstrip()):
- continue
- for ee in excludes:
- m = ee.search(line)
- if m:
- break
- if m:
- continue
-
- m = r.search(line)
- if m:
- messages.append('[log_check] %s' % line)
- if messages:
- if len(messages) == 1:
- msg = '1 %s message' % type
- else:
- msg = '%d %s messages' % (len(messages), type)
- msg = '[log_check] %s: found %s in the logfile:\n%s' % \
- (self.d.getVar('PN'), msg, ''.join(messages))
- if type == 'error':
- bb.fatal(msg)
- else:
- bb.warn(msg)
-
- def _log_check_warn(self):
- self._log_check_common('warning', '^(warn|Warn|WARNING:)')
-
- def _log_check_error(self):
- self._log_check_common('error', self.log_check_regex)
-
- def _insert_feed_uris(self):
- if bb.utils.contains("IMAGE_FEATURES", "package-management",
- True, False, self.d):
- self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
- self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
- self.d.getVar('PACKAGE_FEED_ARCHS'))
-
-
- """
- The _cleanup() method should be used to clean-up stuff that we don't really
- want to end up on target. For example, in the case of RPM, the DB locks.
- The method is called, once, at the end of create() method.
- """
- @abstractmethod
- def _cleanup(self):
- pass
-
- def _setup_dbg_rootfs(self, dirs):
- gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
- if gen_debugfs != '1':
- return
-
- bb.note(" Renaming the original rootfs...")
- try:
- shutil.rmtree(self.image_rootfs + '-orig')
- except:
- pass
- os.rename(self.image_rootfs, self.image_rootfs + '-orig')
-
- bb.note(" Creating debug rootfs...")
- bb.utils.mkdirhier(self.image_rootfs)
-
- bb.note(" Copying back package database...")
- for dir in dirs:
- if not os.path.isdir(self.image_rootfs + '-orig' + dir):
- continue
- bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
- shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
-
- cpath = oe.cachedpath.CachedPath()
- # Copy files located in /usr/lib/debug or /usr/src/debug
- for dir in ["/usr/lib/debug", "/usr/src/debug"]:
- src = self.image_rootfs + '-orig' + dir
- if cpath.exists(src):
- dst = self.image_rootfs + dir
- bb.utils.mkdirhier(os.path.dirname(dst))
- shutil.copytree(src, dst)
-
- # Copy files with suffix '.debug' or located in '.debug' dir.
- for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
- relative_dir = root[len(self.image_rootfs + '-orig'):]
- for f in files:
- if f.endswith('.debug') or '/.debug' in relative_dir:
- bb.utils.mkdirhier(self.image_rootfs + relative_dir)
- shutil.copy(os.path.join(root, f),
- self.image_rootfs + relative_dir)
-
- bb.note(" Install complementary '*-dbg' packages...")
- self.pm.install_complementary('*-dbg')
-
- bb.note(" Rename debug rootfs...")
- try:
- shutil.rmtree(self.image_rootfs + '-dbg')
- except:
- pass
- os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
-
- bb.note(" Restoreing original rootfs...")
- os.rename(self.image_rootfs + '-orig', self.image_rootfs)
-
- def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT')
- if fakerootcmd is not None:
- exec_cmd = [fakerootcmd, cmd]
- else:
- exec_cmd = cmd
-
- try:
- subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
-
- return None
-
- def create(self):
- bb.note("###### Generate rootfs #######")
- pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
- post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
- rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
-
- bb.utils.mkdirhier(self.image_rootfs)
-
- bb.utils.mkdirhier(self.deploydir)
-
- execute_pre_post_process(self.d, pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- # call the package manager dependent create method
- self._create()
-
- sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
- bb.utils.mkdirhier(sysconfdir)
- with open(sysconfdir + "/version", "w+") as ver:
- ver.write(self.d.getVar('BUILDNAME') + "\n")
-
- execute_pre_post_process(self.d, rootfs_post_install_cmds)
-
- self.pm.run_intercepts()
-
- execute_pre_post_process(self.d, post_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
- True, False, self.d):
- delayed_postinsts = self._get_delayed_postinsts()
- if delayed_postinsts is not None:
- bb.fatal("The following packages could not be configured "
- "offline and rootfs is read-only: %s" %
- delayed_postinsts)
-
- if self.d.getVar('USE_DEVFS') != "1":
- self._create_devfs()
-
- self._uninstall_unneeded()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._insert_feed_uris()
-
- self._run_ldconfig()
-
- if self.d.getVar('USE_DEPMOD') != "0":
- self._generate_kernel_module_deps()
-
- self._cleanup()
- self._log_check()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
-
- def _uninstall_unneeded(self):
- # Remove unneeded init script symlinks
- delayed_postinsts = self._get_delayed_postinsts()
- if delayed_postinsts is None:
- if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
- self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS'),
- "run-postinsts", "remove"])
-
- image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
- True, False, self.d)
- image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
-
- if image_rorfs or image_rorfs_force == "1":
- # Remove components that we don't need if it's a read-only rootfs
- unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
- pkgs_installed = image_list_installed_packages(self.d)
- # Make sure update-alternatives is removed last. This is
- # because its database has to available while uninstalling
- # other packages, allowing alternative symlinks of packages
- # to be uninstalled or to be managed correctly otherwise.
- provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
- pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
-
- # update-alternatives provider is removed in its own remove()
- # call because all package managers do not guarantee the packages
- # are removed in the order they given in the list (which is
- # passed to the command line). The sorting done earlier is
- # utilized to implement the 2-stage removal.
- if len(pkgs_to_remove) > 1:
- self.pm.remove(pkgs_to_remove[:-1], False)
- if len(pkgs_to_remove) > 0:
- self.pm.remove([pkgs_to_remove[-1]], False)
-
- if delayed_postinsts:
- self._save_postinsts()
- if image_rorfs:
- bb.warn("There are post install scripts "
- "in a read-only rootfs")
-
- post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
- execute_pre_post_process(self.d, post_uninstall_cmds)
-
- runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
- True, False, self.d)
- if not runtime_pkgmanage:
- # Remove the package manager data files
- self.pm.remove_packaging_data()
-
- def _run_ldconfig(self):
- if self.d.getVar('LDCONFIGDEPEND'):
- bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
- self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
- 'new', '-v'])
-
- def _check_for_kernel_modules(self, modules_dir):
- for root, dirs, files in os.walk(modules_dir, topdown=True):
- for name in files:
- found_ko = name.endswith(".ko")
- if found_ko:
- return found_ko
- return False
-
- def _generate_kernel_module_deps(self):
- modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules')
- # if we don't have any modules don't bother to do the depmod
- if not self._check_for_kernel_modules(modules_dir):
- bb.note("No Kernel Modules found, not running depmod")
- return
-
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
- 'kernel-abiversion')
- if not os.path.exists(kernel_abi_ver_file):
- bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
-
- kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
- versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
-
- bb.utils.mkdirhier(versioned_modules_dir)
-
- self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
-
- """
- Create devfs:
- * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
- * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
- for in the BBPATH
- If neither are specified then the default name of files/device_table-minimal.txt
- is searched for in the BBPATH (same as the old version.)
- """
- def _create_devfs(self):
- devtable_list = []
- devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
- if devtable is not None:
- devtable_list.append(devtable)
- else:
- devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
- if devtables is None:
- devtables = 'files/device_table-minimal.txt'
- for devtable in devtables.split():
- devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
-
- for devtable in devtable_list:
- self._exec_shell_cmd(["makedevs", "-r",
- self.image_rootfs, "-D", devtable])
-
-
-class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
- '|exit 1|ERROR: |Error: |Error |ERROR '\
- '|Failed |Failed: |Failed$|Failed\(\d+\):)'
- self.manifest = RpmManifest(d, manifest_dir)
-
- self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS'),
- self.d.getVar('TARGET_VENDOR')
- )
-
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
- if self.inc_rpm_image_gen != "1":
- bb.utils.remove(self.image_rootfs, True)
- else:
- self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- self.pm.create_configs()
-
- '''
- While rpm incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the new install solution manifest and the
- old installed manifest.
- '''
- def _create_incremental(self, pkgs_initial_install):
- if self.inc_rpm_image_gen == "1":
-
- pkgs_to_install = list()
- for pkg_type in pkgs_initial_install:
- pkgs_to_install += pkgs_initial_install[pkg_type]
-
- installed_manifest = self.pm.load_old_install_solution()
- solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
-
- pkg_to_remove = list()
- for pkg in installed_manifest:
- if pkg not in solution_manifest:
- pkg_to_remove.append(pkg)
-
- self.pm.update()
-
- bb.note('incremental update -- upgrade packages in place ')
- self.pm.upgrade()
- if pkg_to_remove != []:
- bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- self.pm.autoremove()
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, rpm_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_rpm_image_gen == "1":
- self._create_incremental(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs_attempt, True)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
-
- execute_pre_post_process(self.d, rpm_post_process_cmds)
-
- if self.inc_rpm_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
-
- def _get_delayed_postinsts(self):
- postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
- if os.path.isdir(postinst_dir):
- files = os.listdir(postinst_dir)
- for f in files:
- bb.note('Delayed package scriptlet: %s' % f)
- return files
-
- return None
-
- def _save_postinsts(self):
- # this is just a stub. For RPM, the failed postinstalls are
- # already saved in /etc/rpm-postinsts
- pass
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- self.pm._invoke_dnf(["clean", "all"])
-
-
-class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None, logcatcher=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
-
- def _get_pkgs_postinsts(self, status_file):
- def _get_pkg_depends_list(pkg_depends):
- pkg_depends_list = []
- # filter version requirements like libc (>= 1.1)
- for dep in pkg_depends.split(', '):
- m_dep = re.match("^(.*) \(.*\)$", dep)
- if m_dep:
- dep = m_dep.group(1)
- pkg_depends_list.append(dep)
-
- return pkg_depends_list
-
- pkgs = {}
- pkg_name = ""
- pkg_status_match = False
- pkg_depends = ""
-
- with open(status_file) as status:
- data = status.read()
- status.close()
- for line in data.split('\n'):
- m_pkg = re.match("^Package: (.*)", line)
- m_status = re.match("^Status:.*unpacked", line)
- m_depends = re.match("^Depends: (.*)", line)
-
- if m_pkg is not None:
- if pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
-
- pkg_name = m_pkg.group(1)
- pkg_status_match = False
- pkg_depends = ""
- elif m_status is not None:
- pkg_status_match = True
- elif m_depends is not None:
- pkg_depends = m_depends.group(1)
-
- # remove package dependencies not in postinsts
- pkg_names = list(pkgs.keys())
- for pkg_name in pkg_names:
- deps = pkgs[pkg_name][:]
-
- for d in deps:
- if d not in pkg_names:
- pkgs[pkg_name].remove(d)
-
- return pkgs
-
- def _get_delayed_postinsts_common(self, status_file):
- def _dep_resolve(graph, node, resolved, seen):
- seen.append(node)
-
- for edge in graph[node]:
- if edge not in resolved:
- if edge in seen:
- raise RuntimeError("Packages %s and %s have " \
- "a circular dependency in postinsts scripts." \
- % (node, edge))
- _dep_resolve(graph, edge, resolved, seen)
-
- resolved.append(node)
-
- pkg_list = []
-
- pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL').strip():
- bb.note("Building empty image")
- else:
- pkgs = self._get_pkgs_postinsts(status_file)
- if pkgs:
- root = "__packagegroup_postinst__"
- pkgs[root] = list(pkgs.keys())
- _dep_resolve(pkgs, root, pkg_list, [])
- pkg_list.remove(root)
-
- if len(pkg_list) == 0:
- return None
-
- return pkg_list
-
- def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
- num = 0
- for p in self._get_delayed_postinsts():
- bb.utils.mkdirhier(dst_postinst_dir)
-
- if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
- shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
- os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
-
- num += 1
-
-class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '^E:'
- self.log_check_expected_regexes = \
- [
- "^E: Unmet dependencies."
- ]
-
- bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
- self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
- d.getVar('PACKAGE_ARCHS'),
- d.getVar('DPKG_ARCH'))
-
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
-
- alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
- bb.utils.mkdirhier(alt_dir)
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, deb_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Don't support incremental, so skip that
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- # Don't support attemptonly, so skip that
- self.progress_reporter.next_stage()
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/var/lib/dpkg'])
-
- self.pm.fix_broken_dependencies()
-
- self.pm.mark_packages("installed")
-
- self.pm.run_pre_post_installs()
-
- execute_pre_post_process(self.d, deb_post_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
-
- def _get_delayed_postinsts(self):
- status_file = self.image_rootfs + "/var/lib/dpkg/status"
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- pass
-
-
-class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(exit 1|Collected errors)'
-
- self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
-
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
- if self._remove_old_rootfs():
- bb.utils.remove(self.image_rootfs, True)
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- else:
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- self.pm.recover_packaging_data()
-
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- def _prelink_file(self, root_dir, filename):
- bb.note('prelink %s in %s' % (filename, root_dir))
- prelink_cfg = oe.path.join(root_dir,
- self.d.expand('${sysconfdir}/prelink.conf'))
- if not os.path.exists(prelink_cfg):
- shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
- prelink_cfg)
-
- cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
- self._exec_shell_cmd([cmd_prelink,
- '--root',
- root_dir,
- '-amR',
- '-N',
- '-c',
- self.d.expand('${sysconfdir}/prelink.conf')])
-
- '''
- Compare two files with the same key twice to see if they are equal.
- If they are not equal, it means they are duplicated and come from
- different packages.
- 1st: Comapre them directly;
- 2nd: While incremental image creation is enabled, one of the
- files could be probaly prelinked in the previous image
- creation and the file has been changed, so we need to
- prelink the other one and compare them.
- '''
- def _file_equal(self, key, f1, f2):
-
- # Both of them are not prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
-
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
-
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- # Not equal
- return False
-
- """
- This function was reused from the old implementation.
- See commit: "image.bbclass: Added variables for multilib support." by
- Lianhao Lu.
- """
- def _multilib_sanity_test(self, dirs):
-
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
- if allow_replace is None:
- allow_replace = ""
-
- allow_rep = re.compile(re.sub("\|$", "", allow_replace))
- error_prompt = "Multilib check error:"
-
- files = {}
- for dir in dirs:
- for root, subfolders, subfiles in os.walk(dir):
- for file in subfiles:
- item = os.path.join(root, file)
- key = str(os.path.join("/", os.path.relpath(item, dir)))
-
- valid = True
- if key in files:
- #check whether the file is allow to replace
- if allow_rep.match(key):
- valid = True
- else:
- if os.path.exists(files[key]) and \
- os.path.exists(item) and \
- not self._file_equal(key, files[key], item):
- valid = False
- bb.fatal("%s duplicate files %s %s is not the same\n" %
- (error_prompt, item, files[key]))
-
- #pass the check, add to list
- if valid:
- files[key] = item
-
- def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
- bb.utils.mkdirhier(ml_temp)
-
- dirs = [self.image_rootfs]
-
- for variant in self.d.getVar("MULTILIB_VARIANTS").split():
- ml_target_rootfs = os.path.join(ml_temp, variant)
-
- bb.utils.remove(ml_target_rootfs, True)
-
- ml_opkg_conf = os.path.join(ml_temp,
- variant + "-" + os.path.basename(self.opkg_conf))
-
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
-
- ml_pm.update()
- ml_pm.install(pkgs)
-
- dirs.append(ml_target_rootfs)
-
- self._multilib_sanity_test(dirs)
-
- '''
- While ipk incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the old full manifest in previous existing
- image and the new full manifest in the current image.
- '''
- def _remove_extra_packages(self, pkgs_initial_install):
- if self.inc_opkg_image_gen == "1":
- # Parse full manifest in previous existing image creation session
- old_full_manifest = self.manifest.parse_full_manifest()
-
- # Create full manifest for the current image session, the old one
- # will be replaced by the new one.
- self.manifest.create_full(self.pm)
-
- # Parse full manifest in current image creation session
- new_full_manifest = self.manifest.parse_full_manifest()
-
- pkg_to_remove = list()
- for pkg in old_full_manifest:
- if pkg not in new_full_manifest:
- pkg_to_remove.append(pkg)
-
- if pkg_to_remove != []:
- bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- '''
- Compare with previous existing image creation, if some conditions
- triggered, the previous old image should be removed.
- The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
- and BAD_RECOMMENDATIONS' has been changed.
- '''
- def _remove_old_rootfs(self):
- if self.inc_opkg_image_gen != "1":
- return True
-
- vars_list_file = self.d.expand('${T}/vars_list')
-
- old_vars_list = ""
- if os.path.exists(vars_list_file):
- old_vars_list = open(vars_list_file, 'r+').read()
-
- new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
- open(vars_list_file, 'w+').write(new_vars_list)
-
- if old_vars_list != new_vars_list:
- return True
-
- return False
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
-
- # update PM index files, unless users provide their own feeds
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- self.pm.write_index()
-
- execute_pre_post_process(self.d, opkg_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Steps are a bit different in order, skip next
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- self.pm.handle_bad_recommendations()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_opkg_image_gen == "1":
- self._remove_extra_packages(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- # For multilib, we perform a sanity test before final install
- # If sanity test fails, it will automatically do a bb.fatal()
- # and the installation will stop
- if pkg_type == Manifest.PKG_TYPE_MULTILIB:
- self._multilib_test_install(pkgs_to_install[pkg_type])
-
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
- self._setup_dbg_rootfs([opkg_dir])
-
- execute_pre_post_process(self.d, opkg_post_process_cmds)
-
- if self.inc_opkg_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
-
- def _get_delayed_postinsts(self):
- status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR').strip('/'),
- "opkg", "status")
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- self.pm.remove_lists()
-
-def get_class_for_type(imgtype):
- return {"rpm": RpmRootfs,
- "ipk": OpkgRootfs,
- "deb": DpkgRootfs}[imgtype]
-
-def variable_depends(d, manifest_dir=None):
- img_type = d.getVar('IMAGE_PKGTYPE')
- cls = get_class_for_type(img_type)
- return cls._depends_list()
-
-def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
- env_bkp = os.environ.copy()
-
- img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
-
- os.environ.clear()
- os.environ.update(env_bkp)
-
-
-def image_list_installed_packages(d, rootfs_dir=None):
- if not rootfs_dir:
- rootfs_dir = d.getVar('IMAGE_ROOTFS')
-
- img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
-
-if __name__ == "__main__":
- """
- We should be able to run this as a standalone script, from outside bitbake
- environment.
- """
- """
- TBD
- """
diff --git a/import-layers/yocto-poky/meta/lib/oe/sdk.py b/import-layers/yocto-poky/meta/lib/oe/sdk.py
deleted file mode 100644
index d6a503372..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/sdk.py
+++ /dev/null
@@ -1,473 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from oe.utils import execute_pre_post_process
-from oe.manifest import *
-from oe.package_manager import *
-import os
-import shutil
-import glob
-import traceback
-
-def generate_locale_archive(d, rootfs):
- # Pretty sure we don't need this for SDK archive generation but
- # keeping it to be safe...
- target_arch = d.getVar('SDK_ARCH')
- locale_arch_options = { \
- "arm": ["--uint32-align=4", "--little-endian"],
- "armeb": ["--uint32-align=4", "--big-endian"],
- "aarch64": ["--uint32-align=4", "--little-endian"],
- "aarch64_be": ["--uint32-align=4", "--big-endian"],
- "sh4": ["--uint32-align=4", "--big-endian"],
- "powerpc": ["--uint32-align=4", "--big-endian"],
- "powerpc64": ["--uint32-align=4", "--big-endian"],
- "mips": ["--uint32-align=4", "--big-endian"],
- "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
- "mips64": ["--uint32-align=4", "--big-endian"],
- "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
- "mipsel": ["--uint32-align=4", "--little-endian"],
- "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
- "mips64el": ["--uint32-align=4", "--little-endian"],
- "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
- "i586": ["--uint32-align=4", "--little-endian"],
- "i686": ["--uint32-align=4", "--little-endian"],
- "x86_64": ["--uint32-align=4", "--little-endian"]
- }
- if target_arch in locale_arch_options:
- arch_options = locale_arch_options[target_arch]
- else:
- bb.error("locale_arch_options not found for target_arch=" + target_arch)
- bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
-
- localedir = oe.path.join(rootfs, d.getVar("libdir_nativesdk"), "locale")
- # Need to set this so cross-localedef knows where the archive is
- env = dict(os.environ)
- env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
-
- for name in os.listdir(localedir):
- path = os.path.join(localedir, name)
- if os.path.isdir(path):
- try:
- cmd = ["cross-localedef", "--verbose"]
- cmd += arch_options
- cmd += ["--add-to-archive", path]
- subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
- except Exception as e:
- bb.fatal("Cannot create locale archive: %s" % e.output)
-
-class Sdk(object, metaclass=ABCMeta):
- def __init__(self, d, manifest_dir):
- self.d = d
- self.sdk_output = self.d.getVar('SDK_OUTPUT')
- self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
- self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
- self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
-
- self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
- self.sdk_host_sysroot = self.sdk_output
-
- if manifest_dir is None:
- self.manifest_dir = self.d.getVar("SDK_DIR")
- else:
- self.manifest_dir = manifest_dir
-
- self.remove(self.sdk_output, True)
-
- self.install_order = Manifest.INSTALL_ORDER
-
- @abstractmethod
- def _populate(self):
- pass
-
- def populate(self):
- self.mkdirhier(self.sdk_output)
-
- # call backend dependent implementation
- self._populate()
-
- # Don't ship any libGL in the SDK
- self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk').strip('/'),
- "libGL*"))
-
- # Fix or remove broken .la files
- self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk').strip('/'),
- "*.la"))
-
- # Link the ld.so.cache file into the hosts filesystem
- link_name = os.path.join(self.sdk_output, self.sdk_native_path,
- self.sysconfdir, "ld.so.cache")
- self.mkdirhier(os.path.dirname(link_name))
- os.symlink("/etc/ld.so.cache", link_name)
-
- execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
-
- def movefile(self, sourcefile, destdir):
- try:
- # FIXME: this check of movefile's return code to None should be
- # fixed within the function to use only exceptions to signal when
- # something goes wrong
- if (bb.utils.movefile(sourcefile, destdir) == None):
- raise OSError("moving %s to %s failed"
- %(sourcefile, destdir))
- #FIXME: using umbrella exc catching because bb.utils method raises it
- except Exception as e:
- bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.error("unable to place %s in final SDK location" % sourcefile)
-
- def mkdirhier(self, dirpath):
- try:
- bb.utils.mkdirhier(dirpath)
- except OSError as e:
- bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.fatal("cannot make dir for SDK: %s" % dirpath)
-
- def remove(self, path, recurse=False):
- try:
- bb.utils.remove(path, recurse)
- #FIXME: using umbrella exc catching because bb.utils method raises it
- except Exception as e:
- bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.warn("cannot remove SDK dir: %s" % path)
-
- def install_locales(self, pm):
- # This is only relevant for glibc
- if self.d.getVar("TCLIBC") != "glibc":
- return
-
- linguas = self.d.getVar("SDKIMAGE_LINGUAS")
- if linguas:
- import fnmatch
- # Install the binary locales
- if linguas == "all":
- pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
- else:
- for lang in linguas.split():
- pm.install("nativesdk-glibc-binary-localedata-%s.utf-8" % lang)
- # Generate a locale archive of them
- generate_locale_archive(self.d, oe.path.join(self.sdk_host_sysroot, self.sdk_native_path))
- # And now delete the binary locales
- pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
- pm.remove(pkgs)
- else:
- # No linguas so do nothing
- pass
-
-
-class RpmSdk(Sdk):
- def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
- super(RpmSdk, self).__init__(d, manifest_dir)
-
- self.target_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- rpm_repo_workdir = "oe-sdk-repo"
- if "sdk_ext" in d.getVar("BB_RUNTASK"):
- rpm_repo_workdir = "oe-sdk-ext-repo"
-
- self.target_pm = RpmPM(d,
- self.sdk_target_sysroot,
- self.d.getVar('TARGET_VENDOR'),
- 'target',
- rpm_repo_workdir=rpm_repo_workdir
- )
-
- self.host_pm = RpmPM(d,
- self.sdk_host_sysroot,
- self.d.getVar('SDK_VENDOR'),
- 'host',
- "SDK_PACKAGE_ARCHS",
- "SDK_OS",
- rpm_repo_workdir=rpm_repo_workdir
- )
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.create_configs()
- pm.write_index()
- pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- pm.install(pkgs)
-
- pm.install(pkgs_attempt, True)
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- self.target_pm.run_intercepts()
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
- self.install_locales(self.host_pm)
-
- self.host_pm.run_intercepts()
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- # Move host RPM library data
- native_rpm_state_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib",
- "rpm"
- )
- self.mkdirhier(native_rpm_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output,
- "var",
- "lib",
- "rpm",
- "*")):
- self.movefile(f, native_rpm_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
- # Move host sysconfig data
- native_sysconf_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('sysconfdir',
- True).strip('/'),
- )
- self.mkdirhier(native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
- self.movefile(f, native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
- self.remove(os.path.join(self.sdk_output, "etc"), True)
-
-
-class OpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(OpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf = self.d.getVar("IPKGCONF_TARGET")
- self.host_conf = self.d.getVar("IPKGCONF_SDK")
-
- self.target_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
- self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
-
- self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
- self.d.getVar("SDK_PACKAGE_ARCHS"))
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- pm.write_index()
-
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- self.target_pm.run_intercepts()
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
- self.install_locales(self.host_pm)
-
- self.host_pm.run_intercepts()
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
- host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
-
- self.mkdirhier(target_sysconfdir)
- shutil.copy(self.target_conf, target_sysconfdir)
- os.chmod(os.path.join(target_sysconfdir,
- os.path.basename(self.target_conf)), 0o644)
-
- self.mkdirhier(host_sysconfdir)
- shutil.copy(self.host_conf, host_sysconfdir)
- os.chmod(os.path.join(host_sysconfdir,
- os.path.basename(self.host_conf)), 0o644)
-
- native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib", "opkg")
- self.mkdirhier(native_opkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
- self.movefile(f, native_opkg_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
-
-class DpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(DpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
- self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
-
- self.target_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
- self.d.getVar("PACKAGE_ARCHS"),
- self.d.getVar("DPKG_ARCH"),
- self.target_conf_dir)
-
- self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
- self.d.getVar("SDK_PACKAGE_ARCHS"),
- self.d.getVar("DEB_SDK_ARCH"),
- self.host_conf_dir)
-
- def _copy_apt_dir_to(self, dst_dir):
- staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
-
- self.remove(dst_dir, True)
-
- shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.write_index()
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- self.target_pm.run_intercepts()
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
- self.install_locales(self.host_pm)
-
- self.host_pm.run_intercepts()
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
- "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- "var", "lib", "dpkg")
- self.mkdirhier(native_dpkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
- self.movefile(f, native_dpkg_state_dir)
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
-
-
-def sdk_list_installed_packages(d, target, rootfs_dir=None):
- if rootfs_dir is None:
- sdk_output = d.getVar('SDK_OUTPUT')
- target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
-
- rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
-
- img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
- os_var = ["SDK_OS", None][target is True]
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
- return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
-
-def populate_sdk(d, manifest_dir=None):
- env_bkp = os.environ.copy()
-
- img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmSdk(d, manifest_dir).populate()
- elif img_type == "ipk":
- OpkgSdk(d, manifest_dir).populate()
- elif img_type == "deb":
- DpkgSdk(d, manifest_dir).populate()
-
- os.environ.clear()
- os.environ.update(env_bkp)
-
-def get_extra_sdkinfo(sstate_dir):
- """
- This function is going to be used for generating the target and host manifest files packages of eSDK.
- """
- import math
-
- extra_info = {}
- extra_info['tasksizes'] = {}
- extra_info['filesizes'] = {}
- for root, _, files in os.walk(sstate_dir):
- for fn in files:
- if fn.endswith('.tgz'):
- fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
- task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
- origtotal = extra_info['tasksizes'].get(task, 0)
- extra_info['tasksizes'][task] = origtotal + fsize
- extra_info['filesizes'][fn] = fsize
- return extra_info
-
-if __name__ == "__main__":
- pass
diff --git a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
deleted file mode 100644
index b82e0f422..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
+++ /dev/null
@@ -1,404 +0,0 @@
-import bb.siggen
-import oe
-
-def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
- # Return True if we should keep the dependency, False to drop it
- def isNative(x):
- return x.endswith("-native")
- def isCross(x):
- return "-cross-" in x
- def isNativeSDK(x):
- return x.startswith("nativesdk-")
- def isKernel(fn):
- inherits = " ".join(dataCache.inherits[fn])
- return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
- def isPackageGroup(fn):
- inherits = " ".join(dataCache.inherits[fn])
- return "/packagegroup.bbclass" in inherits
- def isAllArch(fn):
- inherits = " ".join(dataCache.inherits[fn])
- return "/allarch.bbclass" in inherits
- def isImage(fn):
- return "/image.bbclass" in " ".join(dataCache.inherits[fn])
-
- # (Almost) always include our own inter-task dependencies.
- # The exception is the special do_kernel_configme->do_unpack_and_patch
- # dependency from archiver.bbclass.
- if recipename == depname:
- if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
- return False
- return True
-
- # Exclude well defined recipe->dependency
- if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
- return False
-
- # Check for special wildcard
- if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
- return False
-
- # Don't change native/cross/nativesdk recipe dependencies any further
- if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
- return True
-
- # Only target packages beyond here
-
- # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
- if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
- return False
-
- # Exclude well defined machine specific configurations which don't change ABI
- if depname in siggen.abisaferecipes and not isImage(fn):
- return False
-
- # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
- # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
- # is machine specific.
- # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
- # and we reccomend a kernel-module, we exclude the dependency.
- depfn = dep.rsplit(".", 1)[0]
- if dataCache and isKernel(depfn) and not isKernel(fn):
- for pkg in dataCache.runrecs[fn]:
- if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
- return False
-
- # Default to keep dependencies
- return True
-
-def sstate_lockedsigs(d):
- sigs = {}
- types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
- for t in types:
- siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
- lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
- for ls in lockedsigs:
- pn, task, h = ls.split(":", 2)
- if pn not in sigs:
- sigs[pn] = {}
- sigs[pn][task] = [h, siggen_lockedsigs_var]
- return sigs
-
-class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
- name = "OEBasic"
- def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
- pass
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
-
-class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
- name = "OEBasicHash"
- def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
- self.lockedsigs = sstate_lockedsigs(data)
- self.lockedhashes = {}
- self.lockedpnmap = {}
- self.lockedhashfn = {}
- self.machine = data.getVar("MACHINE")
- self.mismatch_msgs = []
- self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
- "").split()
- self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
- pass
-
- def tasks_resolved(self, virtmap, virtpnmap, dataCache):
- # Translate virtual/xxx entries to PN values
- newabisafe = []
- for a in self.abisaferecipes:
- if a in virtpnmap:
- newabisafe.append(virtpnmap[a])
- else:
- newabisafe.append(a)
- self.abisaferecipes = newabisafe
- newsafedeps = []
- for a in self.saferecipedeps:
- a1, a2 = a.split("->")
- if a1 in virtpnmap:
- a1 = virtpnmap[a1]
- if a2 in virtpnmap:
- a2 = virtpnmap[a2]
- newsafedeps.append(a1 + "->" + a2)
- self.saferecipedeps = newsafedeps
-
- def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
-
- def get_taskdata(self):
- data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
- return (data, self.lockedpnmap, self.lockedhashfn)
-
- def set_taskdata(self, data):
- coredata, self.lockedpnmap, self.lockedhashfn = data
- super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
-
- def dump_sigs(self, dataCache, options):
- sigfile = os.getcwd() + "/locked-sigs.inc"
- bb.plain("Writing locked sigs to %s" % sigfile)
- self.dump_lockedsigs(sigfile)
- return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
-
- def get_taskhash(self, fn, task, deps, dataCache):
- h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
-
- recipename = dataCache.pkg_fn[fn]
- self.lockedpnmap[fn] = recipename
- self.lockedhashfn[fn] = dataCache.hashfn[fn]
-
- unlocked = False
- if recipename in self.unlockedrecipes:
- unlocked = True
- else:
- def recipename_from_dep(dep):
- # The dep entry will look something like
- # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
- # ...
- fn = dep.rsplit('.', 1)[0]
- return dataCache.pkg_fn[fn]
-
- # If any unlocked recipe is in the direct dependencies then the
- # current recipe should be unlocked as well.
- depnames = [ recipename_from_dep(x) for x in deps ]
- if any(x in y for y in depnames for x in self.unlockedrecipes):
- self.unlockedrecipes[recipename] = ''
- unlocked = True
-
- if not unlocked and recipename in self.lockedsigs:
- if task in self.lockedsigs[recipename]:
- k = fn + "." + task
- h_locked = self.lockedsigs[recipename][task][0]
- var = self.lockedsigs[recipename][task][1]
- self.lockedhashes[k] = h_locked
- self.taskhash[k] = h_locked
- #bb.warn("Using %s %s %s" % (recipename, task, h))
-
- if h != h_locked:
- self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
- % (recipename, task, h, h_locked, var))
-
- return h_locked
- #bb.warn("%s %s %s" % (recipename, task, h))
- return h
-
- def dump_sigtask(self, fn, task, stampbase, runtime):
- k = fn + "." + task
- if k in self.lockedhashes:
- return
- super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
-
- def dump_lockedsigs(self, sigfile, taskfilter=None):
- types = {}
- for k in self.runtaskdeps:
- if taskfilter:
- if not k in taskfilter:
- continue
- fn = k.rsplit(".",1)[0]
- t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
- t = 't-' + t.replace('_', '-')
- if t not in types:
- types[t] = []
- types[t].append(k)
-
- with open(sigfile, "w") as f:
- l = sorted(types)
- for t in l:
- f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
- types[t].sort()
- sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
- for k in sortedk:
- fn = k.rsplit(".",1)[0]
- task = k.rsplit(".",1)[1]
- if k not in self.taskhash:
- continue
- f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
- f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
-
- def dump_siglist(self, sigfile):
- with open(sigfile, "w") as f:
- tasks = []
- for taskitem in self.taskhash:
- (fn, task) = taskitem.rsplit(".", 1)
- pn = self.lockedpnmap[fn]
- tasks.append((pn, task, fn, self.taskhash[taskitem]))
- for (pn, task, fn, taskhash) in sorted(tasks):
- f.write('%s.%s %s %s\n' % (pn, task, fn, taskhash))
-
- def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
- warn_msgs = []
- error_msgs = []
- sstate_missing_msgs = []
-
- for task in range(len(sq_fn)):
- if task not in ret:
- for pn in self.lockedsigs:
- if sq_hash[task] in iter(self.lockedsigs[pn].values()):
- if sq_task[task] == 'do_shared_workdir':
- continue
- sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
- % (pn, sq_task[task], sq_hash[task]))
-
- checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
- if checklevel == 'warn':
- warn_msgs += self.mismatch_msgs
- elif checklevel == 'error':
- error_msgs += self.mismatch_msgs
-
- checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
- if checklevel == 'warn':
- warn_msgs += sstate_missing_msgs
- elif checklevel == 'error':
- error_msgs += sstate_missing_msgs
-
- if warn_msgs:
- bb.warn("\n".join(warn_msgs))
- if error_msgs:
- bb.fatal("\n".join(error_msgs))
-
-
-# Insert these classes into siggen's namespace so it can see and select them
-bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
-bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
-
-
-def find_siginfo(pn, taskname, taskhashlist, d):
- """ Find signature data files for comparison purposes """
-
- import fnmatch
- import glob
-
- if not taskname:
- # We have to derive pn and taskname
- key = pn
- splitit = key.split('.bb.')
- taskname = splitit[1]
- pn = os.path.basename(splitit[0]).split('_')[0]
- if key.startswith('virtual:native:'):
- pn = pn + '-native'
-
- hashfiles = {}
- filedates = {}
-
- def get_hashval(siginfo):
- if siginfo.endswith('.siginfo'):
- return siginfo.rpartition(':')[2].partition('_')[0]
- else:
- return siginfo.rpartition('.')[2]
-
- # First search in stamps dir
- localdata = d.createCopy()
- localdata.setVar('MULTIMACH_TARGET_SYS', '*')
- localdata.setVar('PN', pn)
- localdata.setVar('PV', '*')
- localdata.setVar('PR', '*')
- localdata.setVar('EXTENDPE', '')
- stamp = localdata.getVar('STAMP')
- if pn.startswith("gcc-source"):
- # gcc-source shared workdir is a special case :(
- stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
-
- filespec = '%s.%s.sigdata.*' % (stamp, taskname)
- foundall = False
- import glob
- for fullpath in glob.glob(filespec):
- match = False
- if taskhashlist:
- for taskhash in taskhashlist:
- if fullpath.endswith('.%s' % taskhash):
- hashfiles[taskhash] = fullpath
- if len(hashfiles) == len(taskhashlist):
- foundall = True
- break
- else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except OSError:
- continue
- hashval = get_hashval(fullpath)
- hashfiles[hashval] = fullpath
-
- if not taskhashlist or (len(filedates) < 2 and not foundall):
- # That didn't work, look in sstate-cache
- hashes = taskhashlist or ['?' * 32]
- localdata = bb.data.createCopy(d)
- for hashval in hashes:
- localdata.setVar('PACKAGE_ARCH', '*')
- localdata.setVar('TARGET_VENDOR', '*')
- localdata.setVar('TARGET_OS', '*')
- localdata.setVar('PN', pn)
- localdata.setVar('PV', '*')
- localdata.setVar('PR', '*')
- localdata.setVar('BB_TASKHASH', hashval)
- swspec = localdata.getVar('SSTATE_SWSPEC')
- if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
- localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
- elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
- localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
- sstatename = taskname[3:]
- filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
-
- matchedfiles = glob.glob(filespec)
- for fullpath in matchedfiles:
- actual_hashval = get_hashval(fullpath)
- if actual_hashval in hashfiles:
- continue
- hashfiles[hashval] = fullpath
- if not taskhashlist:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
-
- if taskhashlist:
- return hashfiles
- else:
- return filedates
-
-bb.siggen.find_siginfo = find_siginfo
-
-
-def sstate_get_manifest_filename(task, d):
- """
- Return the sstate manifest file path for a particular task.
- Also returns the datastore that can be used to query related variables.
- """
- d2 = d.createCopy()
- extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
- if extrainf:
- d2.setVar("SSTATE_MANMACH", extrainf)
- return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
-
-def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
- d2 = d
- variant = ''
- if taskdata2.startswith("virtual:multilib"):
- variant = taskdata2.split(":")[2]
- if variant not in multilibcache:
- multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
- d2 = multilibcache[variant]
-
- if taskdata.endswith("-native"):
- pkgarchs = ["${BUILD_ARCH}"]
- elif taskdata.startswith("nativesdk-"):
- pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
- elif "-cross-canadian" in taskdata:
- pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
- elif "-cross-" in taskdata:
- pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
- elif "-crosssdk" in taskdata:
- pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
- else:
- pkgarchs = ['${MACHINE_ARCH}']
- pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
- pkgarchs.append('allarch')
- pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
-
- for pkgarch in pkgarchs:
- manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
- if os.path.exists(manifest):
- return manifest, d2
- bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
- return None, d2
-
-
diff --git a/import-layers/yocto-poky/meta/lib/oe/terminal.py b/import-layers/yocto-poky/meta/lib/oe/terminal.py
deleted file mode 100644
index 94afe394e..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/terminal.py
+++ /dev/null
@@ -1,308 +0,0 @@
-import logging
-import oe.classutils
-import shlex
-from bb.process import Popen, ExecutionError
-from distutils.version import LooseVersion
-
-logger = logging.getLogger('BitBake.OE.Terminal')
-
-
-class UnsupportedTerminal(Exception):
- pass
-
-class NoSupportedTerminals(Exception):
- def __init__(self, terms):
- self.terms = terms
-
-
-class Registry(oe.classutils.ClassRegistry):
- command = None
-
- def __init__(cls, name, bases, attrs):
- super(Registry, cls).__init__(name.lower(), bases, attrs)
-
- @property
- def implemented(cls):
- return bool(cls.command)
-
-
-class Terminal(Popen, metaclass=Registry):
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- fmt_sh_cmd = self.format_command(sh_cmd, title)
- try:
- Popen.__init__(self, fmt_sh_cmd, env=env)
- except OSError as exc:
- import errno
- if exc.errno == errno.ENOENT:
- raise UnsupportedTerminal(self.name)
- else:
- raise
-
- def format_command(self, sh_cmd, title):
- fmt = {'title': title or 'Terminal', 'command': sh_cmd}
- if isinstance(self.command, str):
- return shlex.split(self.command.format(**fmt))
- else:
- return [element.format(**fmt) for element in self.command]
-
-class XTerminal(Terminal):
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- Terminal.__init__(self, sh_cmd, title, env, d)
- if not os.environ.get('DISPLAY'):
- raise UnsupportedTerminal(self.name)
-
-class Gnome(XTerminal):
- command = 'gnome-terminal -t "{title}" -x {command}'
- priority = 2
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- # Recent versions of gnome-terminal does not support non-UTF8 charset:
- # https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround,
- # clearing the LC_ALL environment variable so it uses the locale.
- # Once fixed on the gnome-terminal project, this should be removed.
- if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
-
- XTerminal.__init__(self, sh_cmd, title, env, d)
-
-class Mate(XTerminal):
- command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
- priority = 2
-
-class Xfce(XTerminal):
- command = 'xfce4-terminal -T "{title}" -e "{command}"'
- priority = 2
-
-class Terminology(XTerminal):
- command = 'terminology -T="{title}" -e {command}'
- priority = 2
-
-class Konsole(XTerminal):
- command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
- priority = 2
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- # Check version
- vernum = check_terminal_version("konsole")
- if vernum and LooseVersion(vernum) < '2.0.0':
- # Konsole from KDE 3.x
- self.command = 'konsole -T "{title}" -e {command}'
- elif vernum and LooseVersion(vernum) < '16.08.1':
- # Konsole pre 16.08.01 Has nofork
- self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
- XTerminal.__init__(self, sh_cmd, title, env, d)
-
-class XTerm(XTerminal):
- command = 'xterm -T "{title}" -e {command}'
- priority = 1
-
-class Rxvt(XTerminal):
- command = 'rxvt -T "{title}" -e {command}'
- priority = 1
-
-class Screen(Terminal):
- command = 'screen -D -m -t "{title}" -S devshell {command}'
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- s_id = "devshell_%i" % os.getpid()
- self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
- Terminal.__init__(self, sh_cmd, title, env, d)
- msg = 'Screen started. Please connect in another terminal with ' \
- '"screen -r %s"' % s_id
- if (d):
- bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
- 0.5, 10), d)
- else:
- logger.warn(msg)
-
-class TmuxRunning(Terminal):
- """Open a new pane in the current running tmux window"""
- name = 'tmux-running'
- command = 'tmux split-window "{command}"'
- priority = 2.75
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- if not bb.utils.which(os.getenv('PATH'), 'tmux'):
- raise UnsupportedTerminal('tmux is not installed')
-
- if not os.getenv('TMUX'):
- raise UnsupportedTerminal('tmux is not running')
-
- if not check_tmux_pane_size('tmux'):
- raise UnsupportedTerminal('tmux pane too small or tmux < 1.9 version is being used')
-
- Terminal.__init__(self, sh_cmd, title, env, d)
-
-class TmuxNewWindow(Terminal):
- """Open a new window in the current running tmux session"""
- name = 'tmux-new-window'
- command = 'tmux new-window -n "{title}" "{command}"'
- priority = 2.70
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- if not bb.utils.which(os.getenv('PATH'), 'tmux'):
- raise UnsupportedTerminal('tmux is not installed')
-
- if not os.getenv('TMUX'):
- raise UnsupportedTerminal('tmux is not running')
-
- Terminal.__init__(self, sh_cmd, title, env, d)
-
-class Tmux(Terminal):
- """Start a new tmux session and window"""
- command = 'tmux new -d -s devshell -n devshell "{command}"'
- priority = 0.75
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- if not bb.utils.which(os.getenv('PATH'), 'tmux'):
- raise UnsupportedTerminal('tmux is not installed')
-
- # TODO: consider using a 'devshell' session shared amongst all
- # devshells, if it's already there, add a new window to it.
- window_name = 'devshell-%i' % os.getpid()
-
- self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
- Terminal.__init__(self, sh_cmd, title, env, d)
-
- attach_cmd = 'tmux att -t {0}'.format(window_name)
- msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
- if d:
- bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
- else:
- logger.warn(msg)
-
-class Custom(Terminal):
- command = 'false' # This is a placeholder
- priority = 3
-
- def __init__(self, sh_cmd, title=None, env=None, d=None):
- self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
- if self.command:
- if not '{command}' in self.command:
- self.command += ' {command}'
- Terminal.__init__(self, sh_cmd, title, env, d)
- logger.warn('Custom terminal was started.')
- else:
- logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
- raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
-
-
-def prioritized():
- return Registry.prioritized()
-
-def get_cmd_list():
- terms = Registry.prioritized()
- cmds = []
- for term in terms:
- if term.command:
- cmds.append(term.command)
- return cmds
-
-def spawn_preferred(sh_cmd, title=None, env=None, d=None):
- """Spawn the first supported terminal, by priority"""
- for terminal in prioritized():
- try:
- spawn(terminal.name, sh_cmd, title, env, d)
- break
- except UnsupportedTerminal:
- continue
- else:
- raise NoSupportedTerminals(get_cmd_list())
-
-def spawn(name, sh_cmd, title=None, env=None, d=None):
- """Spawn the specified terminal, by name"""
- logger.debug(1, 'Attempting to spawn terminal "%s"', name)
- try:
- terminal = Registry.registry[name]
- except KeyError:
- raise UnsupportedTerminal(name)
-
- # We need to know when the command completes but some terminals (at least
- # gnome and tmux) gives us no way to do this. We therefore write the pid
- # to a file using a "phonehome" wrapper script, then monitor the pid
- # until it exits.
- import tempfile
- import time
- pidfile = tempfile.NamedTemporaryFile(delete = False).name
- try:
- sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
- pipe = terminal(sh_cmd, title, env, d)
- output = pipe.communicate()[0]
- if output:
- output = output.decode("utf-8")
- if pipe.returncode != 0:
- raise ExecutionError(sh_cmd, pipe.returncode, output)
-
- while os.stat(pidfile).st_size <= 0:
- time.sleep(0.01)
- continue
- with open(pidfile, "r") as f:
- pid = int(f.readline())
- finally:
- os.unlink(pidfile)
-
- while True:
- try:
- os.kill(pid, 0)
- time.sleep(0.1)
- except OSError:
- return
-
-def check_tmux_pane_size(tmux):
- import subprocess as sub
- # On older tmux versions (<1.9), return false. The reason
- # is that there is no easy way to get the height of the active panel
- # on current window without nested formats (available from version 1.9)
- vernum = check_terminal_version("tmux")
- if vernum and LooseVersion(vernum) < '1.9':
- return False
- try:
- p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
- shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
- out, err = p.communicate()
- size = int(out.strip())
- except OSError as exc:
- import errno
- if exc.errno == errno.ENOENT:
- return None
- else:
- raise
-
- return size/2 >= 19
-
-def check_terminal_version(terminalName):
- import subprocess as sub
- try:
- cmdversion = '%s --version' % terminalName
- if terminalName.startswith('tmux'):
- cmdversion = '%s -V' % terminalName
- newenv = os.environ.copy()
- newenv["LANG"] = "C"
- p = sub.Popen(['sh', '-c', cmdversion], stdout=sub.PIPE, stderr=sub.PIPE, env=newenv)
- out, err = p.communicate()
- ver_info = out.decode().rstrip().split('\n')
- except OSError as exc:
- import errno
- if exc.errno == errno.ENOENT:
- return None
- else:
- raise
- vernum = None
- for ver in ver_info:
- if ver.startswith('Konsole'):
- vernum = ver.split(' ')[-1]
- if ver.startswith('GNOME Terminal'):
- vernum = ver.split(' ')[-1]
- if ver.startswith('MATE Terminal'):
- vernum = ver.split(' ')[-1]
- if ver.startswith('tmux'):
- vernum = ver.split()[-1]
- return vernum
-
-def distro_name():
- try:
- p = Popen(['lsb_release', '-i'])
- out, err = p.communicate()
- distro = out.split(':')[1].strip().lower()
- except:
- distro = "unknown"
- return distro
diff --git a/import-layers/yocto-poky/meta/lib/oe/types.py b/import-layers/yocto-poky/meta/lib/oe/types.py
deleted file mode 100644
index 4ae58acfa..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/types.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import errno
-import re
-import os
-
-
-class OEList(list):
- """OpenEmbedded 'list' type
-
- Acts as an ordinary list, but is constructed from a string value and a
- separator (optional), and re-joins itself when converted to a string with
- str(). Set the variable type flag to 'list' to use this type, and the
- 'separator' flag may be specified (defaulting to whitespace)."""
-
- name = "list"
-
- def __init__(self, value, separator = None):
- if value is not None:
- list.__init__(self, value.split(separator))
- else:
- list.__init__(self)
-
- if separator is None:
- self.separator = " "
- else:
- self.separator = separator
-
- def __str__(self):
- return self.separator.join(self)
-
-def choice(value, choices):
- """OpenEmbedded 'choice' type
-
- Acts as a multiple choice for the user. To use this, set the variable
- type flag to 'choice', and set the 'choices' flag to a space separated
- list of valid values."""
- if not isinstance(value, str):
- raise TypeError("choice accepts a string, not '%s'" % type(value))
-
- value = value.lower()
- choices = choices.lower()
- if value not in choices.split():
- raise ValueError("Invalid choice '%s'. Valid choices: %s" %
- (value, choices))
- return value
-
-class NoMatch(object):
- """Stub python regex pattern object which never matches anything"""
- def findall(self, string, flags=0):
- return None
-
- def finditer(self, string, flags=0):
- return None
-
- def match(self, flags=0):
- return None
-
- def search(self, string, flags=0):
- return None
-
- def split(self, string, maxsplit=0):
- return None
-
- def sub(pattern, repl, string, count=0):
- return None
-
- def subn(pattern, repl, string, count=0):
- return None
-
-NoMatch = NoMatch()
-
-def regex(value, regexflags=None):
- """OpenEmbedded 'regex' type
-
- Acts as a regular expression, returning the pre-compiled regular
- expression pattern object. To use this type, set the variable type flag
- to 'regex', and optionally, set the 'regexflags' type to a space separated
- list of the flags to control the regular expression matching (e.g.
- FOO[regexflags] += 'ignorecase'). See the python documentation on the
- 're' module for a list of valid flags."""
-
- flagval = 0
- if regexflags:
- for flag in regexflags.split():
- flag = flag.upper()
- try:
- flagval |= getattr(re, flag)
- except AttributeError:
- raise ValueError("Invalid regex flag '%s'" % flag)
-
- if not value:
- # Let's ensure that the default behavior for an undefined or empty
- # variable is to match nothing. If the user explicitly wants to match
- # anything, they can match '.*' instead.
- return NoMatch
-
- try:
- return re.compile(value, flagval)
- except re.error as exc:
- raise ValueError("Invalid regex value '%s': %s" %
- (value, exc.args[0]))
-
-def boolean(value):
- """OpenEmbedded 'boolean' type
-
- Valid values for true: 'yes', 'y', 'true', 't', '1'
- Valid values for false: 'no', 'n', 'false', 'f', '0'
- """
-
- if not isinstance(value, str):
- raise TypeError("boolean accepts a string, not '%s'" % type(value))
-
- value = value.lower()
- if value in ('yes', 'y', 'true', 't', '1'):
- return True
- elif value in ('no', 'n', 'false', 'f', '0'):
- return False
- raise ValueError("Invalid boolean value '%s'" % value)
-
-def integer(value, numberbase=10):
- """OpenEmbedded 'integer' type
-
- Defaults to base 10, but this can be specified using the optional
- 'numberbase' flag."""
-
- return int(value, int(numberbase))
-
-_float = float
-def float(value, fromhex='false'):
- """OpenEmbedded floating point type
-
- To use this type, set the type flag to 'float', and optionally set the
- 'fromhex' flag to a true value (obeying the same rules as for the
- 'boolean' type) if the value is in base 16 rather than base 10."""
-
- if boolean(fromhex):
- return _float.fromhex(value)
- else:
- return _float(value)
-
-def path(value, relativeto='', normalize='true', mustexist='false'):
- value = os.path.join(relativeto, value)
-
- if boolean(normalize):
- value = os.path.normpath(value)
-
- if boolean(mustexist):
- try:
- open(value, 'r')
- except IOError as exc:
- if exc.errno == errno.ENOENT:
- raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
-
- return value
diff --git a/import-layers/yocto-poky/meta/lib/oe/useradd.py b/import-layers/yocto-poky/meta/lib/oe/useradd.py
deleted file mode 100644
index 179ac76b5..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/useradd.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import argparse
-import re
-
-class myArgumentParser(argparse.ArgumentParser):
- def _print_message(self, message, file=None):
- bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
-
- # This should never be called...
- def exit(self, status=0, message=None):
- message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
- error(message)
-
- def error(self, message):
- raise bb.build.FuncFailed(message)
-
-def split_commands(params):
- params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
- # Remove any empty items
- return [x for x in params if x]
-
-def split_args(params):
- params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
- # Remove any empty items
- return [x for x in params if x]
-
-def build_useradd_parser():
- # The following comes from --help on useradd from shadow
- parser = myArgumentParser(prog='useradd')
- parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
- parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
- parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
- parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
- parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
- parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
- parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
- parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
- parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
- parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
- parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
- parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
- parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
- parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
- parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
- parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
- parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
- parser.add_argument("-r", "--system", help="create a system account", action="store_true")
- parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
- parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
- parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
- parser.add_argument("LOGIN", help="Login name of the new user")
-
- return parser
-
-def build_groupadd_parser():
- # The following comes from --help on groupadd from shadow
- parser = myArgumentParser(prog='groupadd')
- parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
- parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
- parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
- parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
- parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
- parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
- parser.add_argument("-r", "--system", help="create a system account", action="store_true")
- parser.add_argument("GROUP", help="Group name of the new group")
-
- return parser
diff --git a/import-layers/yocto-poky/meta/lib/oe/utils.py b/import-layers/yocto-poky/meta/lib/oe/utils.py
deleted file mode 100644
index 80f0442d0..000000000
--- a/import-layers/yocto-poky/meta/lib/oe/utils.py
+++ /dev/null
@@ -1,421 +0,0 @@
-import subprocess
-
-def read_file(filename):
- try:
- f = open( filename, "r" )
- except IOError as reason:
- return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
- else:
- data = f.read().strip()
- f.close()
- return data
- return None
-
-def ifelse(condition, iftrue = True, iffalse = False):
- if condition:
- return iftrue
- else:
- return iffalse
-
-def conditional(variable, checkvalue, truevalue, falsevalue, d):
- if d.getVar(variable) == checkvalue:
- return truevalue
- else:
- return falsevalue
-
-def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- if float(d.getVar(variable)) <= float(checkvalue):
- return truevalue
- else:
- return falsevalue
-
-def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
- if result <= 0:
- return truevalue
- else:
- return falsevalue
-
-def both_contain(variable1, variable2, checkvalue, d):
- val1 = d.getVar(variable1)
- val2 = d.getVar(variable2)
- val1 = set(val1.split())
- val2 = set(val2.split())
- if isinstance(checkvalue, str):
- checkvalue = set(checkvalue.split())
- else:
- checkvalue = set(checkvalue)
- if checkvalue.issubset(val1) and checkvalue.issubset(val2):
- return " ".join(checkvalue)
- else:
- return ""
-
-def set_intersect(variable1, variable2, d):
- """
- Expand both variables, interpret them as lists of strings, and return the
- intersection as a flattened string.
-
- For example:
- s1 = "a b c"
- s2 = "b c d"
- s3 = set_intersect(s1, s2)
- => s3 = "b c"
- """
- val1 = set(d.getVar(variable1).split())
- val2 = set(d.getVar(variable2).split())
- return " ".join(val1 & val2)
-
-def prune_suffix(var, suffixes, d):
- # See if var ends with any of the suffixes listed and
- # remove it if found
- for suffix in suffixes:
- if var.endswith(suffix):
- var = var.replace(suffix, "")
-
- prefix = d.getVar("MLPREFIX")
- if prefix and var.startswith(prefix):
- var = var.replace(prefix, "")
-
- return var
-
-def str_filter(f, str, d):
- from re import match
- return " ".join([x for x in str.split() if match(f, x, 0)])
-
-def str_filter_out(f, str, d):
- from re import match
- return " ".join([x for x in str.split() if not match(f, x, 0)])
-
-def build_depends_string(depends, task):
- """Append a taskname to a string of dependencies as used by the [depends] flag"""
- return " ".join(dep + ":" + task for dep in depends.split())
-
-def inherits(d, *classes):
- """Return True if the metadata inherits any of the specified classes"""
- return any(bb.data.inherits_class(cls, d) for cls in classes)
-
-def features_backfill(var,d):
- # This construct allows the addition of new features to variable specified
- # as var
- # Example for var = "DISTRO_FEATURES"
- # This construct allows the addition of new features to DISTRO_FEATURES
- # that if not present would disable existing functionality, without
- # disturbing distributions that have already set DISTRO_FEATURES.
- # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
- # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
- features = (d.getVar(var) or "").split()
- backfill = (d.getVar(var+"_BACKFILL") or "").split()
- considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
-
- addfeatures = []
- for feature in backfill:
- if feature not in features and feature not in considered:
- addfeatures.append(feature)
-
- if addfeatures:
- d.appendVar(var, " " + " ".join(addfeatures))
-
-def all_distro_features(d, features, truevalue="1", falsevalue=""):
- """
- Returns truevalue if *all* given features are set in DISTRO_FEATURES,
- else falsevalue. The features can be given as single string or anything
- that can be turned into a set.
-
- This is a shorter, more flexible version of
- bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
-
- Without explicit true/false values it can be used directly where
- Python expects a boolean:
- if oe.utils.all_distro_features(d, "foo bar"):
- bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
-
- With just a truevalue, it can be used to include files that are meant to be
- used only when requested via DISTRO_FEATURES:
- require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
- """
- return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
-
-def any_distro_features(d, features, truevalue="1", falsevalue=""):
- """
- Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
- else falsevalue. The features can be given as single string or anything
- that can be turned into a set.
-
- This is a shorter, more flexible version of
- bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
-
- Without explicit true/false values it can be used directly where
- Python expects a boolean:
- if not oe.utils.any_distro_features(d, "foo bar"):
- bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
-
- With just a truevalue, it can be used to include files that are meant to be
- used only when requested via DISTRO_FEATURES:
- require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
-
- """
- return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
-
-def parallel_make(d):
- """
- Return the integer value for the number of parallel threads to use when
- building, scraped out of PARALLEL_MAKE. If no parallelization option is
- found, returns None
-
- e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
- """
- pm = (d.getVar('PARALLEL_MAKE') or '').split()
- # look for '-j' and throw other options (e.g. '-l') away
- while pm:
- opt = pm.pop(0)
- if opt == '-j':
- v = pm.pop(0)
- elif opt.startswith('-j'):
- v = opt[2:].strip()
- else:
- continue
-
- return int(v)
-
- return None
-
-def parallel_make_argument(d, fmt, limit=None):
- """
- Helper utility to construct a parallel make argument from the number of
- parallel threads specified in PARALLEL_MAKE.
-
- Returns the input format string `fmt` where a single '%d' will be expanded
- with the number of parallel threads to use. If `limit` is specified, the
- number of parallel threads will be no larger than it. If no parallelization
- option is found in PARALLEL_MAKE, returns an empty string
-
- e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
- "-n 10"
- """
- v = parallel_make(d)
- if v:
- if limit:
- v = min(limit, v)
- return fmt % v
- return ''
-
-def packages_filter_out_system(d):
- """
- Return a list of packages from PACKAGES with the "system" packages such as
- PN-dbg PN-doc PN-locale-eb-gb removed.
- """
- pn = d.getVar('PN')
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
- localepkg = pn + "-locale-"
- pkgs = []
-
- for pkg in d.getVar('PACKAGES').split():
- if pkg not in blacklist and localepkg not in pkg:
- pkgs.append(pkg)
- return pkgs
-
-def getstatusoutput(cmd):
- return subprocess.getstatusoutput(cmd)
-
-
-def trim_version(version, num_parts=2):
- """
- Return just the first <num_parts> of <version>, split by periods. For
- example, trim_version("1.2.3", 2) will return "1.2".
- """
- if type(version) is not str:
- raise TypeError("Version should be a string")
- if num_parts < 1:
- raise ValueError("Cannot split to parts < 1")
-
- parts = version.split(".")
- trimmed = ".".join(parts[:num_parts])
- return trimmed
-
-def cpu_count():
- import multiprocessing
- return multiprocessing.cpu_count()
-
-def execute_pre_post_process(d, cmds):
- if cmds is None:
- return
-
- for cmd in cmds.strip().split(';'):
- cmd = cmd.strip()
- if cmd != '':
- bb.note("Executing %s ..." % cmd)
- bb.build.exec_func(cmd, d)
-
-def multiprocess_exec(commands, function):
- import signal
- import multiprocessing
-
- if not commands:
- return []
-
- def init_worker():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
-
- fails = []
-
- def failures(res):
- fails.append(res)
-
- nproc = min(multiprocessing.cpu_count(), len(commands))
- pool = bb.utils.multiprocessingpool(nproc, init_worker)
-
- try:
- mapresult = pool.map_async(function, commands, error_callback=failures)
-
- pool.close()
- pool.join()
- results = mapresult.get()
- except KeyboardInterrupt:
- pool.terminate()
- pool.join()
- raise
-
- if fails:
- raise fails[0]
-
- return results
-
-def squashspaces(string):
- import re
- return re.sub("\s+", " ", string).strip()
-
-def format_pkg_list(pkg_dict, ret_format=None):
- output = []
-
- if ret_format == "arch":
- for pkg in sorted(pkg_dict):
- output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
- elif ret_format == "file":
- for pkg in sorted(pkg_dict):
- output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
- elif ret_format == "ver":
- for pkg in sorted(pkg_dict):
- output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
- elif ret_format == "deps":
- for pkg in sorted(pkg_dict):
- for dep in pkg_dict[pkg]["deps"]:
- output.append("%s|%s" % (pkg, dep))
- else:
- for pkg in sorted(pkg_dict):
- output.append(pkg)
-
- return '\n'.join(output)
-
-def host_gcc_version(d):
- import re, subprocess
-
- compiler = d.getVar("BUILD_CC")
- try:
- env = os.environ.copy()
- env["PATH"] = d.getVar("PATH")
- output = subprocess.check_output("%s --version" % compiler, shell=True, env=env).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
-
- match = re.match(".* (\d\.\d)\.\d.*", output.split('\n')[0])
- if not match:
- bb.fatal("Can't get compiler version from %s --version output" % compiler)
-
- version = match.group(1)
- return "-%s" % version if version in ("4.8", "4.9") else ""
-
-
-def get_multilib_datastore(variant, d):
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
- localdata.setVar("OVERRIDES", overrides)
- localdata.setVar("MLPREFIX", variant + "-")
- return localdata
-
-#
-# Python 2.7 doesn't have threaded pools (just multiprocessing)
-# so implement a version here
-#
-
-from queue import Queue
-from threading import Thread
-
-class ThreadedWorker(Thread):
- """Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, worker_init, worker_end):
- Thread.__init__(self)
- self.tasks = tasks
- self.daemon = True
-
- self.worker_init = worker_init
- self.worker_end = worker_end
-
- def run(self):
- from queue import Empty
-
- if self.worker_init is not None:
- self.worker_init(self)
-
- while True:
- try:
- func, args, kargs = self.tasks.get(block=False)
- except Empty:
- if self.worker_end is not None:
- self.worker_end(self)
- break
-
- try:
- func(self, *args, **kargs)
- except Exception as e:
- print(e)
- finally:
- self.tasks.task_done()
-
-class ThreadedPool:
- """Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, worker_init=None,
- worker_end=None):
- self.tasks = Queue(num_tasks)
- self.workers = []
-
- for _ in range(num_workers):
- worker = ThreadedWorker(self.tasks, worker_init, worker_end)
- self.workers.append(worker)
-
- def start(self):
- for worker in self.workers:
- worker.start()
-
- def add_task(self, func, *args, **kargs):
- """Add a task to the queue"""
- self.tasks.put((func, args, kargs))
-
- def wait_completion(self):
- """Wait for completion of all the tasks in the queue"""
- self.tasks.join()
- for worker in self.workers:
- worker.join()
-
-def write_ld_so_conf(d):
- # Some utils like prelink may not have the correct target library paths
- # so write an ld.so.conf to help them
- ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
- if os.path.exists(ldsoconf):
- bb.utils.remove(ldsoconf)
- bb.utils.mkdirhier(os.path.dirname(ldsoconf))
- with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir") + '\n')
- f.write(d.getVar("libdir") + '\n')
-
-class ImageQAFailed(bb.build.FuncFailed):
- def __init__(self, description, name=None, logfile=None):
- self.description = description
- self.name = name
- self.logfile=logfile
-
- def __str__(self):
- msg = 'Function failed: %s' % self.name
- if self.description:
- msg = msg + ' (%s)' % self.description
-
- return msg