summaryrefslogtreecommitdiff
path: root/poky/bitbake/lib
diff options
context:
space:
mode:
Diffstat (limited to 'poky/bitbake/lib')
-rw-r--r--poky/bitbake/lib/bb/__init__.py2
-rw-r--r--poky/bitbake/lib/bb/build.py57
-rw-r--r--poky/bitbake/lib/bb/cache.py83
-rw-r--r--poky/bitbake/lib/bb/cooker.py60
-rw-r--r--poky/bitbake/lib/bb/cookerdata.py10
-rw-r--r--poky/bitbake/lib/bb/data.py4
-rw-r--r--poky/bitbake/lib/bb/event.py19
-rw-r--r--poky/bitbake/lib/bb/fetch2/__init__.py3
-rw-r--r--poky/bitbake/lib/bb/fetch2/clearcase.py4
-rw-r--r--poky/bitbake/lib/bb/fetch2/git.py19
-rw-r--r--poky/bitbake/lib/bb/fetch2/npm.py45
-rw-r--r--poky/bitbake/lib/bb/fetch2/svn.py2
-rw-r--r--poky/bitbake/lib/bb/fetch2/wget.py16
-rwxr-xr-xpoky/bitbake/lib/bb/main.py18
-rw-r--r--poky/bitbake/lib/bb/monitordisk.py1
-rw-r--r--poky/bitbake/lib/bb/namedtuple_with_abc.py1
-rw-r--r--poky/bitbake/lib/bb/parse/parse_py/BBHandler.py1
-rw-r--r--poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py1
-rw-r--r--poky/bitbake/lib/bb/parse/parse_py/__init__.py1
-rw-r--r--poky/bitbake/lib/bb/runqueue.py1447
-rw-r--r--poky/bitbake/lib/bb/server/process.py5
-rw-r--r--poky/bitbake/lib/bb/siggen.py318
-rw-r--r--poky/bitbake/lib/bb/taskdata.py1
-rw-r--r--poky/bitbake/lib/bb/tests/data.py15
-rw-r--r--poky/bitbake/lib/bb/tests/event.py8
-rw-r--r--poky/bitbake/lib/bb/tests/fetch.py89
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass262
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass5
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass2
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf16
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc1.conf1
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc2.conf1
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/recipes/a1.bb0
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb1
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/recipes/c1.bb0
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb3
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb1
-rw-r--r--poky/bitbake/lib/bb/tests/runqueue.py323
-rw-r--r--poky/bitbake/lib/bb/ui/buildinfohelper.py10
-rw-r--r--poky/bitbake/lib/bb/ui/knotty.py19
-rw-r--r--poky/bitbake/lib/bb/ui/uihelper.py2
-rw-r--r--poky/bitbake/lib/bb/utils.py15
-rw-r--r--poky/bitbake/lib/bblayers/query.py39
-rw-r--r--poky/bitbake/lib/hashserv/__init__.py187
-rw-r--r--poky/bitbake/lib/hashserv/client.py157
-rw-r--r--poky/bitbake/lib/hashserv/server.py414
-rw-r--r--poky/bitbake/lib/hashserv/tests.py163
-rw-r--r--poky/bitbake/lib/layerindexlib/__init__.py21
-rw-r--r--poky/bitbake/lib/prserv/db.py2
-rw-r--r--poky/bitbake/lib/pyinotify.py3
-rw-r--r--poky/bitbake/lib/toaster/orm/models.py10
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/selenium_helpers.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/selenium_helpers_base.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_all_builds_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_all_projects_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_artifacts.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_recipes.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_tasks.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_js_unit_tests.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_landing_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_layerdetails_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_most_recent_builds_states.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_new_custom_image_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_new_project_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_project_builds_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_project_config_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_project_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_sample.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_task_page.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/browser/test_toastertable_ui.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/builds/buildtest.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/builds/test_core_image_min.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/commands/test_loaddata.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/commands/test_lsupdates.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/commands/test_runbuilds.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/eventreplay/__init__.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/functional/functional_helpers.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py2
-rw-r--r--poky/bitbake/lib/toaster/tests/views/test_views.py2
-rw-r--r--poky/bitbake/lib/toaster/toastergui/static/js/importlayer.js12
-rw-r--r--poky/bitbake/lib/toaster/toastermain/management/commands/checksocket.py2
82 files changed, 2826 insertions, 1133 deletions
diff --git a/poky/bitbake/lib/bb/__init__.py b/poky/bitbake/lib/bb/__init__.py
index 7579a893b..f89969174 100644
--- a/poky/bitbake/lib/bb/__init__.py
+++ b/poky/bitbake/lib/bb/__init__.py
@@ -9,7 +9,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-__version__ = "1.43.0"
+__version__ = "1.43.2"
import sys
if sys.version_info < (3, 4, 0):
diff --git a/poky/bitbake/lib/bb/build.py b/poky/bitbake/lib/bb/build.py
index e2f91fa3b..30a2ba236 100644
--- a/poky/bitbake/lib/bb/build.py
+++ b/poky/bitbake/lib/bb/build.py
@@ -54,23 +54,6 @@ else:
builtins['bb'] = bb
builtins['os'] = os
-class FuncFailed(Exception):
- def __init__(self, name = None, logfile = None):
- self.logfile = logfile
- self.name = name
- if name:
- self.msg = 'Function failed: %s' % name
- else:
- self.msg = "Function failed"
-
- def __str__(self):
- if self.logfile and os.path.exists(self.logfile):
- msg = ("%s (log file is located at %s)" %
- (self.msg, self.logfile))
- else:
- msg = self.msg
- return msg
-
class TaskBase(event.Event):
"""Base class for task events"""
@@ -189,12 +172,7 @@ class StdoutNoopContextManager:
return sys.stdout.name
-#
-# pythonexception allows the python exceptions generated to be raised
-# as the real exceptions (not FuncFailed) and without a backtrace at the
-# origin of the failure.
-#
-def exec_func(func, d, dirs = None, pythonexception=False):
+def exec_func(func, d, dirs = None):
"""Execute a BB 'function'"""
try:
@@ -266,7 +244,7 @@ def exec_func(func, d, dirs = None, pythonexception=False):
with bb.utils.fileslocked(lockfiles):
if ispython:
- exec_func_python(func, d, runfile, cwd=adir, pythonexception=pythonexception)
+ exec_func_python(func, d, runfile, cwd=adir)
else:
exec_func_shell(func, d, runfile, cwd=adir)
@@ -286,7 +264,7 @@ _functionfmt = """
{function}(d)
"""
logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
-def exec_func_python(func, d, runfile, cwd=None, pythonexception=False):
+def exec_func_python(func, d, runfile, cwd=None):
"""Execute a python BB 'function'"""
code = _functionfmt.format(function=func)
@@ -311,14 +289,7 @@ def exec_func_python(func, d, runfile, cwd=None, pythonexception=False):
bb.methodpool.insert_method(func, text, fn, lineno - 1)
comp = utils.better_compile(code, func, "exec_python_func() autogenerated")
- utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated", pythonexception=pythonexception)
- except (bb.parse.SkipRecipe, bb.build.FuncFailed):
- raise
- except Exception as e:
- if pythonexception:
- raise
- logger.error(str(e))
- raise FuncFailed(func, None)
+ utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated")
finally:
bb.debug(2, "Python function %s finished" % func)
@@ -475,13 +446,8 @@ exit $ret
with open(fifopath, 'r+b', buffering=0) as fifo:
try:
bb.debug(2, "Executing shell function %s" % func)
-
- try:
- with open(os.devnull, 'r+') as stdin, logfile:
- bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
- except bb.process.CmdError:
- logfn = d.getVar('BB_LOGFILE')
- raise FuncFailed(func, logfn)
+ with open(os.devnull, 'r+') as stdin, logfile:
+ bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
finally:
os.unlink(fifopath)
@@ -609,9 +575,6 @@ def _exec_task(fn, task, d, quieterr):
event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
except (bb.BBHandledException, SystemExit):
return 1
- except FuncFailed as exc:
- logger.error(str(exc))
- return 1
try:
for func in (prefuncs or '').split():
@@ -619,7 +582,10 @@ def _exec_task(fn, task, d, quieterr):
exec_func(task, localdata)
for func in (postfuncs or '').split():
exec_func(func, localdata)
- except FuncFailed as exc:
+ except bb.BBHandledException:
+ event.fire(TaskFailed(task, logfn, localdata, True), localdata)
+ return 1
+ except Exception as exc:
if quieterr:
event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
else:
@@ -627,9 +593,6 @@ def _exec_task(fn, task, d, quieterr):
logger.error(str(exc))
event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
return 1
- except bb.BBHandledException:
- event.fire(TaskFailed(task, logfn, localdata, True), localdata)
- return 1
finally:
sys.stdout.flush()
sys.stderr.flush()
diff --git a/poky/bitbake/lib/bb/cache.py b/poky/bitbake/lib/bb/cache.py
index 5fb2f17cd..b6f7da592 100644
--- a/poky/bitbake/lib/bb/cache.py
+++ b/poky/bitbake/lib/bb/cache.py
@@ -83,21 +83,21 @@ class CoreRecipeInfo(RecipeInfoCommon):
self.appends = self.listvar('__BBAPPEND', metadata)
self.nocache = self.getvar('BB_DONT_CACHE', metadata)
+ self.provides = self.depvar('PROVIDES', metadata)
+ self.rprovides = self.depvar('RPROVIDES', metadata)
+ self.pn = self.getvar('PN', metadata) or bb.parse.vars_from_file(filename,metadata)[0]
+ self.packages = self.listvar('PACKAGES', metadata)
+ if not self.packages:
+ self.packages.append(self.pn)
+ self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
+
self.skipreason = self.getvar('__SKIPPED', metadata)
if self.skipreason:
- self.pn = self.getvar('PN', metadata) or bb.parse.vars_from_file(filename,metadata)[0]
self.skipped = True
- self.provides = self.depvar('PROVIDES', metadata)
- self.rprovides = self.depvar('RPROVIDES', metadata)
return
self.tasks = metadata.getVar('__BBTASKS', False)
- self.pn = self.getvar('PN', metadata)
- self.packages = self.listvar('PACKAGES', metadata)
- if not self.packages:
- self.packages.append(self.pn)
-
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
@@ -113,11 +113,8 @@ class CoreRecipeInfo(RecipeInfoCommon):
self.stampclean = self.getvar('STAMPCLEAN', metadata)
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
- self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
self.depends = self.depvar('DEPENDS', metadata)
- self.provides = self.depvar('PROVIDES', metadata)
self.rdepends = self.depvar('RDEPENDS', metadata)
- self.rprovides = self.depvar('RPROVIDES', metadata)
self.rrecommends = self.depvar('RRECOMMENDS', metadata)
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
@@ -223,7 +220,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
cachedata.hashfn[fn] = self.hashfilename
for task, taskhash in self.basetaskhashes.items():
- identifier = '%s.%s' % (fn, task)
+ identifier = '%s:%s' % (fn, task)
cachedata.basetaskhash[identifier] = taskhash
cachedata.inherits[fn] = self.inherits
@@ -399,6 +396,15 @@ class Cache(NoCache):
else:
logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
+ # We don't use the symlink, its just for debugging convinience
+ symlink = os.path.join(self.cachedir, "bb_cache.dat")
+ if os.path.exists(symlink):
+ bb.utils.remove(symlink)
+ try:
+ os.symlink(os.path.basename(self.cachefile), symlink)
+ except OSError:
+ pass
+
def load_cachefile(self):
cachesize = 0
previous_progress = 0
@@ -877,3 +883,56 @@ class MultiProcessCache(object):
p.dump([data, self.__class__.CACHE_VERSION])
bb.utils.unlockfile(glf)
+
+
+class SimpleCache(object):
+ """
+ BitBake multi-process cache implementation
+
+ Used by the codeparser & file checksum caches
+ """
+
+ def __init__(self, version):
+ self.cachefile = None
+ self.cachedata = None
+ self.cacheversion = version
+
+ def init_cache(self, d, cache_file_name=None, defaultdata=None):
+ cachedir = (d.getVar("PERSISTENT_DIR") or
+ d.getVar("CACHE"))
+ if not cachedir:
+ return defaultdata
+
+ bb.utils.mkdirhier(cachedir)
+ self.cachefile = os.path.join(cachedir,
+ cache_file_name or self.__class__.cache_file_name)
+ logger.debug(1, "Using cache in '%s'", self.cachefile)
+
+ glf = bb.utils.lockfile(self.cachefile + ".lock")
+
+ try:
+ with open(self.cachefile, "rb") as f:
+ p = pickle.Unpickler(f)
+ data, version = p.load()
+ except:
+ bb.utils.unlockfile(glf)
+ return defaultdata
+
+ bb.utils.unlockfile(glf)
+
+ if version != self.cacheversion:
+ return defaultdata
+
+ return data
+
+ def save(self, data):
+ if not self.cachefile:
+ return
+
+ glf = bb.utils.lockfile(self.cachefile + ".lock")
+
+ with open(self.cachefile, "wb") as f:
+ p = pickle.Pickler(f, -1)
+ p.dump([data, self.cacheversion])
+
+ bb.utils.unlockfile(glf)
diff --git a/poky/bitbake/lib/bb/cooker.py b/poky/bitbake/lib/bb/cooker.py
index 0008c2fde..20ef04d3f 100644
--- a/poky/bitbake/lib/bb/cooker.py
+++ b/poky/bitbake/lib/bb/cooker.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2003, 2004 Phil Blundell
@@ -32,6 +31,7 @@ import pyinotify
import json
import pickle
import codecs
+import hashserv
logger = logging.getLogger("BitBake")
collectlog = logging.getLogger("BitBake.Collection")
@@ -193,6 +193,8 @@ class BBCooker:
bb.parse.BBHandler.cached_statements = {}
self.ui_cmdline = None
+ self.hashserv = None
+ self.hashservaddr = None
self.initConfigurationData()
@@ -373,8 +375,6 @@ class BBCooker:
# Copy of the data store which has been expanded.
# Used for firing events and accessing variables where expansion needs to be accounted for
#
- bb.parse.init_parser(self.data)
-
if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
self.disableDataTracking()
@@ -392,6 +392,22 @@ class BBCooker:
except prserv.serv.PRServiceConfigError as e:
bb.fatal("Unable to start PR Server, exitting")
+ if self.data.getVar("BB_HASHSERVE") == "auto":
+ # Create a new hash server bound to a unix domain socket
+ if not self.hashserv:
+ dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
+ self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
+ self.hashserv = hashserv.create_server(self.hashservaddr, dbfile, sync=False)
+ self.hashserv.process = multiprocessing.Process(target=self.hashserv.serve_forever)
+ self.hashserv.process.start()
+ self.data.setVar("BB_HASHSERVE", self.hashservaddr)
+ self.databuilder.origdata.setVar("BB_HASHSERVE", self.hashservaddr)
+ self.databuilder.data.setVar("BB_HASHSERVE", self.hashservaddr)
+ for mc in self.databuilder.mcdata:
+ self.databuilder.mcdata[mc].setVar("BB_HASHSERVE", self.hashservaddr)
+
+ bb.parse.init_parser(self.data)
+
def enableDataTracking(self):
self.configuration.tracking = True
if hasattr(self, "data"):
@@ -903,6 +919,10 @@ class BBCooker:
os.unlink('package-depends.dot')
except FileNotFoundError:
pass
+ try:
+ os.unlink('recipe-depends.dot')
+ except FileNotFoundError:
+ pass
with open('task-depends.dot', 'w') as f:
f.write("digraph depends {\n")
@@ -916,27 +936,6 @@ class BBCooker:
f.write("}\n")
logger.info("Task dependencies saved to 'task-depends.dot'")
- with open('recipe-depends.dot', 'w') as f:
- f.write("digraph depends {\n")
- pndeps = {}
- for task in sorted(depgraph["tdepends"]):
- (pn, taskname) = task.rsplit(".", 1)
- if pn not in pndeps:
- pndeps[pn] = set()
- for dep in sorted(depgraph["tdepends"][task]):
- (deppn, deptaskname) = dep.rsplit(".", 1)
- pndeps[pn].add(deppn)
- for pn in sorted(pndeps):
- fn = depgraph["pn"][pn]["filename"]
- version = depgraph["pn"][pn]["version"]
- f.write('"%s" [label="%s\\n%s\\n%s"]\n' % (pn, pn, version, fn))
- for dep in sorted(pndeps[pn]):
- if dep == pn:
- continue
- f.write('"%s" -> "%s"\n' % (pn, dep))
- f.write("}\n")
- logger.info("Flattened recipe dependencies saved to 'recipe-depends.dot'")
-
def show_appends_with_no_recipes(self):
# Determine which bbappends haven't been applied
@@ -1646,9 +1645,11 @@ class BBCooker:
def post_serve(self):
prserv.serv.auto_shutdown()
+ if self.hashserv:
+ self.hashserv.process.terminate()
+ self.hashserv.process.join()
bb.event.fire(CookerExit(), self.data)
-
def shutdown(self, force = False):
if force:
self.state = state.forceshutdown
@@ -1663,6 +1664,7 @@ class BBCooker:
def reset(self):
self.initConfigurationData()
+ self.handlePRServ()
def clientComplete(self):
"""Called when the client is done using the server"""
@@ -2063,6 +2065,14 @@ class CookerParser(object):
for process in self.processes:
self.parser_quit.put(None)
+ # Cleanup the queue before call process.join(), otherwise there might be
+ # deadlocks.
+ while True:
+ try:
+ self.result_queue.get(timeout=0.25)
+ except queue.Empty:
+ break
+
for process in self.processes:
if force:
process.join(.1)
diff --git a/poky/bitbake/lib/bb/cookerdata.py b/poky/bitbake/lib/bb/cookerdata.py
index 842275d53..472423fdc 100644
--- a/poky/bitbake/lib/bb/cookerdata.py
+++ b/poky/bitbake/lib/bb/cookerdata.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2003, 2004 Phil Blundell
@@ -14,6 +13,7 @@ import logging
import os
import re
import sys
+import hashlib
from functools import wraps
import bb
from bb import data
@@ -122,6 +122,7 @@ class CookerConfiguration(object):
self.profile = False
self.nosetscene = False
self.setsceneonly = False
+ self.skipsetscene = False
self.invalidate_stamp = False
self.dump_signatures = []
self.dry_run = False
@@ -267,12 +268,13 @@ class CookerDataBuilder(object):
self.mcdata = {}
def parseBaseConfiguration(self):
+ data_hash = hashlib.sha256()
try:
- bb.parse.init_parser(self.basedata)
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
if self.data.getVar("BB_WORKERCONTEXT", False) is None:
bb.fetch.fetcher_init(self.data)
+ bb.parse.init_parser(self.data)
bb.codeparser.parser_cache_init(self.data)
bb.event.fire(bb.event.ConfigParsed(), self.data)
@@ -290,7 +292,7 @@ class CookerDataBuilder(object):
bb.event.fire(bb.event.ConfigParsed(), self.data)
bb.parse.init_parser(self.data)
- self.data_hash = self.data.get_hash()
+ data_hash.update(self.data.get_hash().encode('utf-8'))
self.mcdata[''] = self.data
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
@@ -298,9 +300,11 @@ class CookerDataBuilder(object):
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
bb.event.fire(bb.event.ConfigParsed(), mcdata)
self.mcdata[config] = mcdata
+ data_hash.update(mcdata.get_hash().encode('utf-8'))
if multiconfig:
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
+ self.data_hash = data_hash.hexdigest()
except (SyntaxError, bb.BBHandledException):
raise bb.BBHandledException
except bb.data_smart.ExpansionError as e:
diff --git a/poky/bitbake/lib/bb/data.py b/poky/bitbake/lib/bb/data.py
index 92ef40530..0d75d0c1a 100644
--- a/poky/bitbake/lib/bb/data.py
+++ b/poky/bitbake/lib/bb/data.py
@@ -130,7 +130,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
if all:
oval = d.getVar(var, False)
val = d.getVar(var)
- except (KeyboardInterrupt, bb.build.FuncFailed):
+ except (KeyboardInterrupt):
raise
except Exception as exc:
o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
@@ -422,7 +422,7 @@ def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
var = lookupcache[dep]
if var is not None:
data = data + str(var)
- k = fn + "." + task
+ k = fn + ":" + task
basehash[k] = hashlib.sha256(data.encode("utf-8")).hexdigest()
taskdeps[task] = alldeps
diff --git a/poky/bitbake/lib/bb/event.py b/poky/bitbake/lib/bb/event.py
index 7cbb5ca47..d44621edf 100644
--- a/poky/bitbake/lib/bb/event.py
+++ b/poky/bitbake/lib/bb/event.py
@@ -124,6 +124,7 @@ def fire_class_handlers(event, d):
ui_queue = []
@atexit.register
def print_ui_queue():
+ global ui_queue
"""If we're exiting before a UI has been spawned, display any queued
LogRecords to the console."""
logger = logging.getLogger("BitBake")
@@ -168,6 +169,7 @@ def print_ui_queue():
logger.removeHandler(stderr)
else:
logger.removeHandler(stdout)
+ ui_queue = []
def fire_ui_handlers(event, d):
global _thread_lock
@@ -402,23 +404,6 @@ class RecipeTaskPreProcess(RecipeEvent):
class RecipeParsed(RecipeEvent):
""" Recipe Parsing Complete """
-class StampUpdate(Event):
- """Trigger for any adjustment of the stamp files to happen"""
-
- def __init__(self, targets, stampfns):
- self._targets = targets
- self._stampfns = stampfns
- Event.__init__(self)
-
- def getStampPrefix(self):
- return self._stampfns
-
- def getTargets(self):
- return self._targets
-
- stampPrefix = property(getStampPrefix)
- targets = property(getTargets)
-
class BuildBase(Event):
"""Base class for bitbake build events"""
diff --git a/poky/bitbake/lib/bb/fetch2/__init__.py b/poky/bitbake/lib/bb/fetch2/__init__.py
index f6b5529bb..1f5f8f1f1 100644
--- a/poky/bitbake/lib/bb/fetch2/__init__.py
+++ b/poky/bitbake/lib/bb/fetch2/__init__.py
@@ -962,7 +962,8 @@ def rename_bad_checksum(ud, suffix):
new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix)
bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath))
- bb.utils.movefile(ud.localpath, new_localpath)
+ if not bb.utils.movefile(ud.localpath, new_localpath):
+ bb.warn("Renaming %s to %s failed, grep movefile in log.do_fetch to see why" % (ud.localpath, new_localpath))
def try_mirror_url(fetch, origud, ud, ld, check = False):
diff --git a/poky/bitbake/lib/bb/fetch2/clearcase.py b/poky/bitbake/lib/bb/fetch2/clearcase.py
index 9ed0d9bea..3dd93ad6b 100644
--- a/poky/bitbake/lib/bb/fetch2/clearcase.py
+++ b/poky/bitbake/lib/bb/fetch2/clearcase.py
@@ -54,6 +54,8 @@ import shutil
import bb
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
+from bb.fetch2 import MissingParameterError
+from bb.fetch2 import ParameterError
from bb.fetch2 import runfetchcmd
from bb.fetch2 import logger
@@ -79,7 +81,7 @@ class ClearCase(FetchMethod):
if 'protocol' in ud.parm:
ud.proto = ud.parm['protocol']
if not ud.proto in ('http', 'https'):
- raise fetch2.ParameterError("Invalid protocol type", ud.url)
+ raise ParameterError("Invalid protocol type", ud.url)
ud.vob = ''
if 'vob' in ud.parm:
diff --git a/poky/bitbake/lib/bb/fetch2/git.py b/poky/bitbake/lib/bb/fetch2/git.py
index e171aa7eb..2d1d2cabd 100644
--- a/poky/bitbake/lib/bb/fetch2/git.py
+++ b/poky/bitbake/lib/bb/fetch2/git.py
@@ -464,6 +464,8 @@ class Git(FetchMethod):
if os.path.exists(destdir):
bb.utils.prunedir(destdir)
+ need_lfs = ud.parm.get("lfs", "1") == "1"
+
source_found = False
source_error = []
@@ -493,14 +495,10 @@ class Git(FetchMethod):
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
if self._contains_lfs(ud, d, destdir):
- path = d.getVar('PATH')
- if path:
- gitlfstool = bb.utils.which(path, "git-lfs", executable=True)
- if not gitlfstool:
- raise bb.fetch2.FetchError("Repository %s has lfs content, install git-lfs plugin on host to download" % (repourl))
+ if need_lfs and not self._find_git_lfs(d):
+ raise bb.fetch2.FetchError("Repository %s has LFS content, install git-lfs on host to download (or set lfs=0 to ignore it)" % (repourl))
else:
- bb.note("Could not find 'PATH'")
-
+ bb.note("Repository %s has LFS content but it is not being fetched" % (repourl))
if not ud.nocheckout:
if subdir != "":
@@ -566,6 +564,13 @@ class Git(FetchMethod):
pass
return False
+ def _find_git_lfs(self, d):
+ """
+ Return True if git-lfs can be found, False otherwise.
+ """
+ import shutil
+ return shutil.which("git-lfs", path=d.getVar('PATH')) is not None
+
def _get_repo_url(self, ud):
"""
Return the repository URL
diff --git a/poky/bitbake/lib/bb/fetch2/npm.py b/poky/bitbake/lib/bb/fetch2/npm.py
index 4427b1bb8..9700e6102 100644
--- a/poky/bitbake/lib/bb/fetch2/npm.py
+++ b/poky/bitbake/lib/bb/fetch2/npm.py
@@ -101,11 +101,19 @@ class Npm(FetchMethod):
return False
return True
- def _runwget(self, ud, d, command, quiet):
- logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
- bb.fetch2.check_network_access(d, command, ud.url)
+ def _runpack(self, ud, d, pkgfullname: str, quiet=False) -> str:
+ """
+ Runs npm pack on a full package name.
+ Returns the filename of the downloaded package
+ """
+ bb.fetch2.check_network_access(d, pkgfullname, ud.registry)
dldir = d.getVar("DL_DIR")
- runfetchcmd(command, d, quiet, workdir=dldir)
+ dldir = os.path.join(dldir, ud.prefixdir)
+
+ command = "npm pack {} --registry {}".format(pkgfullname, ud.registry)
+ logger.debug(2, "Fetching {} using command '{}' in {}".format(pkgfullname, command, dldir))
+ filename = runfetchcmd(command, d, quiet, workdir=dldir)
+ return filename.rstrip()
def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
file = data[pkg]['tgz']
@@ -163,6 +171,9 @@ class Npm(FetchMethod):
pkgfullname = pkg
if version != '*' and not '/' in version:
pkgfullname += "@'%s'" % version
+ if pkgfullname in fetchedlist:
+ return
+
logger.debug(2, "Calling getdeps on %s" % pkg)
fetchcmd = "npm view %s --json --registry %s" % (pkgfullname, ud.registry)
output = runfetchcmd(fetchcmd, d, True)
@@ -182,15 +193,10 @@ class Npm(FetchMethod):
if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
return
- #logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile))
- outputurl = pdata['dist']['tarball']
+ filename = self._runpack(ud, d, pkgfullname)
data[pkg] = {}
- data[pkg]['tgz'] = os.path.basename(outputurl)
- if outputurl in fetchedlist:
- return
-
- self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
- fetchedlist.append(outputurl)
+ data[pkg]['tgz'] = filename
+ fetchedlist.append(pkgfullname)
dependencies = pdata.get('dependencies', {})
optionalDependencies = pdata.get('optionalDependencies', {})
@@ -217,17 +223,12 @@ class Npm(FetchMethod):
if obj == pkg:
self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest, False)
return
- outputurl = "invalid"
- if ('resolved' not in data) or (not data['resolved'].startswith('http://') and not data['resolved'].startswith('https://')):
- # will be the case for ${PN}
- fetchcmd = "npm view %s@%s dist.tarball --registry %s" % (pkg, version, ud.registry)
- logger.debug(2, "Found this matching URL: %s" % str(fetchcmd))
- outputurl = runfetchcmd(fetchcmd, d, True)
- else:
- outputurl = data['resolved']
- self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
+
+ pkgnameWithVersion = "{}@{}".format(pkg, version)
+ logger.debug(2, "Get dependencies for {}".format(pkgnameWithVersion))
+ filename = self._runpack(ud, d, pkgnameWithVersion)
manifest[pkg] = {}
- manifest[pkg]['tgz'] = os.path.basename(outputurl).rstrip()
+ manifest[pkg]['tgz'] = filename
manifest[pkg]['deps'] = {}
if pkg in lockdown:
diff --git a/poky/bitbake/lib/bb/fetch2/svn.py b/poky/bitbake/lib/bb/fetch2/svn.py
index 59ce93160..96d666ba3 100644
--- a/poky/bitbake/lib/bb/fetch2/svn.py
+++ b/poky/bitbake/lib/bb/fetch2/svn.py
@@ -145,7 +145,7 @@ class Svn(FetchMethod):
if not ("externals" in ud.parm and ud.parm["externals"] == "nowarn"):
# Warn the user if this had externals (won't catch them all)
- output = runfetchcmd("svn propget svn:externals", d, workdir=ud.moddir)
+ output = runfetchcmd("svn propget svn:externals || true", d, workdir=ud.moddir)
if output:
if "--ignore-externals" in svnfetchcmd.split():
bb.warn("%s contains svn:externals." % ud.url)
diff --git a/poky/bitbake/lib/bb/fetch2/wget.py b/poky/bitbake/lib/bb/fetch2/wget.py
index 0f71ee4ea..725586d2b 100644
--- a/poky/bitbake/lib/bb/fetch2/wget.py
+++ b/poky/bitbake/lib/bb/fetch2/wget.py
@@ -257,13 +257,15 @@ class Wget(FetchMethod):
fp.read()
fp.close()
- newheaders = dict((k, v) for k, v in list(req.headers.items())
- if k.lower() not in ("content-length", "content-type"))
- return self.parent.open(urllib.request.Request(req.get_full_url(),
- headers=newheaders,
- origin_req_host=req.origin_req_host,
- unverifiable=True))
-
+ if req.get_method() != 'GET':
+ newheaders = dict((k, v) for k, v in list(req.headers.items())
+ if k.lower() not in ("content-length", "content-type"))
+ return self.parent.open(urllib.request.Request(req.get_full_url(),
+ headers=newheaders,
+ origin_req_host=req.origin_req_host,
+ unverifiable=True))
+
+ raise urllib.request.HTTPError(req, code, msg, headers, None)
# Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403
# Forbidden when they actually mean 405 Method Not Allowed.
diff --git a/poky/bitbake/lib/bb/main.py b/poky/bitbake/lib/bb/main.py
index ca59eb9af..af2880f8d 100755
--- a/poky/bitbake/lib/bb/main.py
+++ b/poky/bitbake/lib/bb/main.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2003, 2004 Phil Blundell
@@ -255,6 +254,11 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
help="Do not run any setscene tasks. sstate will be ignored and "
"everything needed, built.")
+ parser.add_option("", "--skip-setscene", action="store_true",
+ dest="skipsetscene", default=False,
+ help="Skip setscene tasks if they would be executed. Tasks previously "
+ "restored from sstate will be kept, unlike --no-setscene")
+
parser.add_option("", "--setscene-only", action="store_true",
dest="setsceneonly", default=False,
help="Only run setscene tasks, don't run any real tasks.")
@@ -448,12 +452,7 @@ def setup_bitbake(configParams, configuration, extrafeatures=None):
bb.utils.unlockfile(lock)
raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
if not configParams.server_only:
- try:
- server_connection = bb.server.process.connectProcessServer(sockname, featureset)
- except EOFError:
- # The server may have been shutting down but not closed the socket yet. If that happened,
- # ignore it.
- pass
+ server_connection = bb.server.process.connectProcessServer(sockname, featureset)
if server_connection or configParams.server_only:
break
@@ -464,12 +463,13 @@ def setup_bitbake(configParams, configuration, extrafeatures=None):
raise
retries -= 1
tryno = 8 - retries
- if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
+ if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError, EOFError)):
logger.info("Retrying server connection (#%d)..." % tryno)
else:
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, traceback.format_exc()))
if not retries:
- bb.fatal("Unable to connect to bitbake server, or start one")
+ bb.fatal("Unable to connect to bitbake server, or start one (server startup failures would be in bitbake-cookerdaemon.log).")
+ bb.event.print_ui_queue()
if retries < 5:
time.sleep(5)
diff --git a/poky/bitbake/lib/bb/monitordisk.py b/poky/bitbake/lib/bb/monitordisk.py
index 69b25c772..1a25b0041 100644
--- a/poky/bitbake/lib/bb/monitordisk.py
+++ b/poky/bitbake/lib/bb/monitordisk.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# Copyright (C) 2012 Robert Yang
#
diff --git a/poky/bitbake/lib/bb/namedtuple_with_abc.py b/poky/bitbake/lib/bb/namedtuple_with_abc.py
index c8e1d55c1..646aed6ff 100644
--- a/poky/bitbake/lib/bb/namedtuple_with_abc.py
+++ b/poky/bitbake/lib/bb/namedtuple_with_abc.py
@@ -1,5 +1,4 @@
# http://code.activestate.com/recipes/577629-namedtupleabc-abstract-base-class-mix-in-for-named/
-#!/usr/bin/env python
# Copyright (c) 2011 Jan Kaliszewski (zuo). Available under the MIT License.
#
# SPDX-License-Identifier: MIT
diff --git a/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py b/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py
index 889f230f7..6f7cf82b2 100644
--- a/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py
+++ b/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""
class for handling .bb files
diff --git a/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py
index 71bf61b76..2e84b913d 100644
--- a/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py
+++ b/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""
class for handling configuration data files
diff --git a/poky/bitbake/lib/bb/parse/parse_py/__init__.py b/poky/bitbake/lib/bb/parse/parse_py/__init__.py
index cdebe440d..f508afa14 100644
--- a/poky/bitbake/lib/bb/parse/parse_py/__init__.py
+++ b/poky/bitbake/lib/bb/parse/parse_py/__init__.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""
BitBake Parsers
diff --git a/poky/bitbake/lib/bb/runqueue.py b/poky/bitbake/lib/bb/runqueue.py
index 010b08501..18049436f 100644
--- a/poky/bitbake/lib/bb/runqueue.py
+++ b/poky/bitbake/lib/bb/runqueue.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""
BitBake 'RunQueue' implementation
@@ -26,6 +25,7 @@ import subprocess
import pickle
from multiprocessing import Process
import shlex
+import pprint
bblogger = logging.getLogger("BitBake")
logger = logging.getLogger("BitBake.RunQueue")
@@ -68,6 +68,14 @@ def build_tid(mc, fn, taskname):
return "mc:" + mc + ":" + fn + ":" + taskname
return fn + ":" + taskname
+# Index used to pair up potentially matching multiconfig tasks
+# We match on PN, taskname and hash being equal
+def pending_hash_index(tid, rqdata):
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
+ h = rqdata.runtaskentries[tid].unihash
+ return pn + ":" + "taskname" + h
+
class RunQueueStats:
"""
Holds statistics on the tasks handled by the associated runQueue
@@ -103,8 +111,6 @@ class RunQueueStats:
# runQueue state machine
runQueuePrepare = 2
runQueueSceneInit = 3
-runQueueSceneRun = 4
-runQueueRunInit = 5
runQueueRunning = 6
runQueueFailed = 7
runQueueCleanUp = 8
@@ -127,7 +133,7 @@ class RunQueueScheduler(object):
self.prio_map = [self.rqdata.runtaskentries.keys()]
- self.buildable = []
+ self.buildable = set()
self.skip_maxthread = {}
self.stamps = {}
for tid in self.rqdata.runtaskentries:
@@ -142,8 +148,11 @@ class RunQueueScheduler(object):
"""
Return the id of the first task we find that is buildable
"""
- self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
- if not self.buildable:
+ buildable = set(self.buildable)
+ buildable.difference_update(self.rq.runq_running)
+ buildable.difference_update(self.rq.holdoff_tasks)
+ buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
+ if not buildable:
return None
# Filter out tasks that have a max number of threads that have been exceeded
@@ -159,8 +168,8 @@ class RunQueueScheduler(object):
else:
skip_buildable[rtaskname] = 1
- if len(self.buildable) == 1:
- tid = self.buildable[0]
+ if len(buildable) == 1:
+ tid = buildable.pop()
taskname = taskname_from_tid(tid)
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
return None
@@ -175,7 +184,7 @@ class RunQueueScheduler(object):
best = None
bestprio = None
- for tid in self.buildable:
+ for tid in buildable:
taskname = taskname_from_tid(tid)
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
continue
@@ -197,7 +206,12 @@ class RunQueueScheduler(object):
return self.next_buildable_task()
def newbuildable(self, task):
- self.buildable.append(task)
+ self.buildable.add(task)
+ # Once tasks are running we don't need to worry about them again
+ self.buildable.difference_update(self.rq.runq_running)
+
+ def removebuildable(self, task):
+ self.buildable.remove(task)
def describe_task(self, taskid):
result = 'ID %s' % taskid
@@ -837,6 +851,20 @@ class RunQueueData:
for depend in depends:
mark_active(depend, depth+1)
+ def invalidate_task(tid, error_nostamp):
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ taskdep = self.dataCaches[mc].task_deps[taskfn]
+ if fn + ":" + taskname not in taskData[mc].taskentries:
+ logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
+ if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
+ if error_nostamp:
+ bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
+ else:
+ bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
+ else:
+ logger.verbose("Invalidate task %s, %s", taskname, fn)
+ bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
+
self.target_tids = []
for (mc, target, task, fn) in self.targets:
@@ -905,6 +933,8 @@ class RunQueueData:
for tid in list(runall_tids):
mark_active(tid,1)
+ if self.cooker.configuration.force:
+ invalidate_task(tid, False)
for tid in list(self.runtaskentries.keys()):
if tid not in runq_build:
@@ -926,6 +956,8 @@ class RunQueueData:
for tid in list(runonly_tids):
mark_active(tid,1)
+ if self.cooker.configuration.force:
+ invalidate_task(tid, False)
for tid in list(self.runtaskentries.keys()):
if tid not in runq_build:
@@ -1102,20 +1134,6 @@ class RunQueueData:
continue
self.runq_setscene_tids.append(tid)
- def invalidate_task(tid, error_nostamp):
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
- taskdep = self.dataCaches[mc].task_deps[taskfn]
- if fn + ":" + taskname not in taskData[mc].taskentries:
- logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
- if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
- if error_nostamp:
- bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
- else:
- bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
- else:
- logger.verbose("Invalidate task %s, %s", taskname, fn)
- bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
-
self.init_progress_reporter.next_stage()
# Invalidate task if force mode active
@@ -1146,6 +1164,8 @@ class RunQueueData:
self.init_progress_reporter.next_stage()
+ bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
+
# Iterate over the task list and call into the siggen code
dealtwith = set()
todeal = set(self.runtaskentries)
@@ -1164,10 +1184,9 @@ class RunQueueData:
def prepare_task_hash(self, tid):
procdep = []
for dep in self.runtaskentries[tid].depends:
- procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
- self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
- self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(taskfn + "." + taskname)
+ procdep.append(dep)
+ self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.dataCaches[mc_from_tid(tid)])
+ self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
def dump_data(self):
"""
@@ -1194,7 +1213,6 @@ class RunQueue:
self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
- self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
self.state = runQueuePrepare
@@ -1203,7 +1221,7 @@ class RunQueue:
# Invoked at regular time intervals via the bitbake heartbeat event
# while the build is running. We generate a unique name for the handler
# here, just in case that there ever is more than one RunQueue instance,
- # start the handler when reaching runQueueSceneRun, and stop it when
+ # start the handler when reaching runQueueSceneInit, and stop it when
# done with the build.
self.dm = monitordisk.diskMonitor(cfgData)
self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
@@ -1245,6 +1263,7 @@ class RunQueue:
"buildname" : self.cfgData.getVar("BUILDNAME"),
"date" : self.cfgData.getVar("DATE"),
"time" : self.cfgData.getVar("TIME"),
+ "hashservaddr" : self.cooker.hashservaddr,
}
worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
@@ -1378,24 +1397,29 @@ class RunQueue:
cache[tid] = iscurrent
return iscurrent
- def validate_hash(self, *, sq_fn, sq_task, sq_hash, sq_hashfn, siginfo, sq_unihash, d):
- locs = {"sq_fn" : sq_fn, "sq_task" : sq_task, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn,
- "sq_unihash" : sq_unihash, "siginfo" : siginfo, "d" : d}
+ def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False):
+ valid = set()
+ if self.hashvalidate:
+ sq_data = {}
+ sq_data['hash'] = {}
+ sq_data['hashfn'] = {}
+ sq_data['unihash'] = {}
+ for tid in tocheck:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
+ sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
+ sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
- hashvalidate_args = ("(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo, sq_unihash=sq_unihash)",
- "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo)",
- "(sq_fn, sq_task, sq_hash, sq_hashfn, d)")
+ valid = self.validate_hash(sq_data, data, siginfo, currentcount)
- for args in hashvalidate_args[:-1]:
- try:
- call = self.hashvalidate + args
- return bb.utils.better_eval(call, locs)
- except TypeError:
- continue
+ return valid
+
+ def validate_hash(self, sq_data, d, siginfo, currentcount):
+ locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
+
+ # Metadata has **kwargs so args can be added, sq_data can also gain new fields
+ call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
- # Call the last entry without a try...catch to propagate any thrown
- # TypeError
- call = self.hashvalidate + hashvalidate_args[-1]
return bb.utils.better_eval(call, locs)
def _execute_runqueue(self):
@@ -1408,7 +1432,6 @@ class RunQueue:
retval = True
if self.state is runQueuePrepare:
- self.rqexe = RunQueueExecuteDummy(self)
# NOTE: if you add, remove or significantly refactor the stages of this
# process then you should recalculate the weightings here. This is quite
# easy to do - just change the next line temporarily to pass debug=True as
@@ -1422,18 +1445,20 @@ class RunQueue:
self.state = runQueueComplete
else:
self.state = runQueueSceneInit
- self.rqdata.init_progress_reporter.next_stage()
-
- # we are ready to run, emit dependency info to any UI or class which
- # needs it
- depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
- self.rqdata.init_progress_reporter.next_stage()
- bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
+ bb.parse.siggen.save_unitaskhashes()
if self.state is runQueueSceneInit:
+ self.rqdata.init_progress_reporter.next_stage()
+
+ # we are ready to run, emit dependency info to any UI or class which
+ # needs it
+ depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
+ self.rqdata.init_progress_reporter.next_stage()
+ bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
+
if not self.dm_event_handler_registered:
res = bb.event.register(self.dm_event_handler_name,
- lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
+ lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
('bb.event.HeartbeatEvent',))
self.dm_event_handler_registered = True
@@ -1446,24 +1471,23 @@ class RunQueue:
if 'printdiff' in dump:
self.write_diffscenetasks(invalidtasks)
self.state = runQueueComplete
- else:
- self.rqdata.init_progress_reporter.next_stage()
- self.start_worker()
- self.rqdata.init_progress_reporter.next_stage()
- self.rqexe = RunQueueExecuteScenequeue(self)
-
- if self.state is runQueueSceneRun:
- retval = self.rqexe.execute()
- if self.state is runQueueRunInit:
- if self.cooker.configuration.setsceneonly:
- self.state = runQueueComplete
- else:
- # Just in case we didn't setscene
- self.rqdata.init_progress_reporter.finish()
- logger.info("Executing RunQueue Tasks")
- self.rqexe = RunQueueExecuteTasks(self)
- self.state = runQueueRunning
+ if self.state is runQueueSceneInit:
+ self.rqdata.init_progress_reporter.next_stage()
+ self.start_worker()
+ self.rqdata.init_progress_reporter.next_stage()
+ self.rqexe = RunQueueExecute(self)
+
+ # If we don't have any setscene functions, skip execution
+ if len(self.rqdata.runq_setscene_tids) == 0:
+ logger.info('No setscene tasks')
+ for tid in self.rqdata.runtaskentries:
+ if len(self.rqdata.runtaskentries[tid].depends) == 0:
+ self.rqexe.setbuildable(tid)
+ self.rqexe.tasks_notcovered.add(tid)
+ self.rqexe.sqdone = True
+ logger.info('Executing Tasks')
+ self.state = runQueueRunning
if self.state is runQueueRunning:
retval = self.rqexe.execute()
@@ -1478,12 +1502,14 @@ class RunQueue:
self.dm_event_handler_registered = False
if build_done and self.rqexe:
+ bb.parse.siggen.save_unitaskhashes()
self.teardown_workers()
- if self.rqexe.stats.failed:
- logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
- else:
- # Let's avoid the word "failed" if nothing actually did
- logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
+ if self.rqexe:
+ if self.rqexe.stats.failed:
+ logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
+ else:
+ # Let's avoid the word "failed" if nothing actually did
+ logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
if self.state is runQueueFailed:
raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
@@ -1566,16 +1592,8 @@ class RunQueue:
def print_diffscenetasks(self):
- valid = []
- sq_hash = []
- sq_hashfn = []
- sq_unihash = []
- sq_fn = []
- sq_taskname = []
- sq_task = []
noexec = []
- stamppresent = []
- valid_new = set()
+ tocheck = set()
for tid in self.rqdata.runtaskentries:
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
@@ -1585,18 +1603,9 @@ class RunQueue:
noexec.append(tid)
continue
- sq_fn.append(fn)
- sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
- sq_hash.append(self.rqdata.runtaskentries[tid].hash)
- sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
- sq_taskname.append(taskname)
- sq_task.append(tid)
-
- valid = self.validate_hash(sq_fn=sq_fn, sq_task=sq_taskname, sq_hash=sq_hash, sq_hashfn=sq_hashfn,
- siginfo=True, sq_unihash=sq_unihash, d=self.cooker.data)
+ tocheck.add(tid)
- for v in valid:
- valid_new.add(sq_task[v])
+ valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True)
# Tasks which are both setscene and noexec never care about dependencies
# We therefore find tasks which are setscene and noexec and mark their
@@ -1680,6 +1689,7 @@ class RunQueue:
output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
+
class RunQueueExecute:
def __init__(self, rq):
@@ -1691,6 +1701,13 @@ class RunQueueExecute:
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
+ self.sq_buildable = set()
+ self.sq_running = set()
+ self.sq_live = set()
+
+ self.updated_taskhash_queue = []
+ self.pending_migrations = set()
+
self.runq_buildable = set()
self.runq_running = set()
self.runq_complete = set()
@@ -1698,9 +1715,17 @@ class RunQueueExecute:
self.build_stamps = {}
self.build_stamps2 = []
self.failed_tids = []
+ self.sq_deferred = {}
self.stampcache = {}
+ self.holdoff_tasks = set()
+ self.holdoff_need_update = True
+ self.sqdone = False
+
+ self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
+ self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
+
for mc in rq.worker:
rq.worker[mc].pipe.setrunqueueexec(self)
for mc in rq.fakeworker:
@@ -1709,6 +1734,34 @@ class RunQueueExecute:
if self.number_tasks <= 0:
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
+ # List of setscene tasks which we've covered
+ self.scenequeue_covered = set()
+ # List of tasks which are covered (including setscene ones)
+ self.tasks_covered = set()
+ self.tasks_scenequeue_done = set()
+ self.scenequeue_notcovered = set()
+ self.tasks_notcovered = set()
+ self.scenequeue_notneeded = set()
+
+ # We can't skip specified target tasks which aren't setscene tasks
+ self.cantskip = set(self.rqdata.target_tids)
+ self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
+ self.cantskip.intersection_update(self.rqdata.runtaskentries)
+
+ schedulers = self.get_schedulers()
+ for scheduler in schedulers:
+ if self.scheduler == scheduler.name:
+ self.sched = scheduler(self, self.rqdata)
+ logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
+ break
+ else:
+ bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
+ (self.scheduler, ", ".join(obj.name for obj in schedulers)))
+
+ #if len(self.rqdata.runq_setscene_tids) > 0:
+ self.sqdata = SQData()
+ build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
+
def runqueue_process_waitpid(self, task, status):
# self.build_stamps[pid] may not exist when use shared work directory.
@@ -1716,10 +1769,17 @@ class RunQueueExecute:
self.build_stamps2.remove(self.build_stamps[task])
del self.build_stamps[task]
- if status != 0:
- self.task_fail(task, status)
+ if task in self.sq_live:
+ if status != 0:
+ self.sq_task_fail(task, status)
+ else:
+ self.sq_task_complete(task)
+ self.sq_live.remove(task)
else:
- self.task_complete(task)
+ if status != 0:
+ self.task_fail(task, status)
+ else:
+ self.task_complete(task)
return True
def finish_now(self):
@@ -1748,8 +1808,9 @@ class RunQueueExecute:
def finish(self):
self.rq.state = runQueueCleanUp
- if self.stats.active > 0:
- bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
+ active = self.stats.active + self.sq_stats.active
+ if active > 0:
+ bb.event.fire(runQueueExitWait(active), self.cfgData)
self.rq.read_workers()
return self.rq.active_fds()
@@ -1760,10 +1821,14 @@ class RunQueueExecute:
self.rq.state = runQueueComplete
return True
- def check_dependencies(self, task, taskdeps, setscene = False):
+ # Used by setscene only
+ def check_dependencies(self, task, taskdeps):
if not self.rq.depvalidate:
return False
+ # Must not edit parent data
+ taskdeps = set(taskdeps)
+
taskdata = {}
taskdeps.add(task)
for dep in taskdeps:
@@ -1776,121 +1841,10 @@ class RunQueueExecute:
return valid
def can_start_task(self):
- can_start = self.stats.active < self.number_tasks
+ active = self.stats.active + self.sq_stats.active
+ can_start = active < self.number_tasks
return can_start
-class RunQueueExecuteDummy(RunQueueExecute):
- def __init__(self, rq):
- self.rq = rq
- self.stats = RunQueueStats(0)
-
- def finish(self):
- self.rq.state = runQueueComplete
- return
-
-class RunQueueExecuteTasks(RunQueueExecute):
- def __init__(self, rq):
- RunQueueExecute.__init__(self, rq)
-
- self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
-
- self.stampcache = {}
-
- initial_covered = self.rq.scenequeue_covered.copy()
-
- # Mark initial buildable tasks
- for tid in self.rqdata.runtaskentries:
- if len(self.rqdata.runtaskentries[tid].depends) == 0:
- self.runq_buildable.add(tid)
- if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
- self.rq.scenequeue_covered.add(tid)
-
- found = True
- while found:
- found = False
- for tid in self.rqdata.runtaskentries:
- if tid in self.rq.scenequeue_covered:
- continue
- logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
-
- if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
- if tid in self.rq.scenequeue_notcovered:
- continue
- found = True
- self.rq.scenequeue_covered.add(tid)
-
- logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
-
- # Allow the metadata to elect for setscene tasks to run anyway
- covered_remove = set()
- if self.rq.setsceneverify:
- invalidtasks = []
- tasknames = {}
- fns = {}
- for tid in self.rqdata.runtaskentries:
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
- taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
- fns[tid] = taskfn
- tasknames[tid] = taskname
- if 'noexec' in taskdep and taskname in taskdep['noexec']:
- continue
- if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
- logger.debug(2, 'Setscene stamp current for task %s', tid)
- continue
- if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
- logger.debug(2, 'Normal stamp current for task %s', tid)
- continue
- invalidtasks.append(tid)
-
- call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
- locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
- covered_remove = bb.utils.better_eval(call, locs)
-
- def removecoveredtask(tid):
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
- taskname = taskname + '_setscene'
- bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
- self.rq.scenequeue_covered.remove(tid)
-
- toremove = covered_remove | self.rq.scenequeue_notcovered
- for task in toremove:
- logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
- while toremove:
- covered_remove = []
- for task in toremove:
- if task in self.rq.scenequeue_covered:
- removecoveredtask(task)
- for deptask in self.rqdata.runtaskentries[task].depends:
- if deptask not in self.rq.scenequeue_covered:
- continue
- if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
- continue
- logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
- covered_remove.append(deptask)
- toremove = covered_remove
-
- logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
-
-
- for mc in self.rqdata.dataCaches:
- target_pairs = []
- for tid in self.rqdata.target_tids:
- (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
- if tidmc == mc:
- target_pairs.append((fn, taskname))
-
- event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
-
- schedulers = self.get_schedulers()
- for scheduler in schedulers:
- if self.scheduler == scheduler.name:
- self.sched = scheduler(self, self.rqdata)
- logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
- break
- else:
- bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
- (self.scheduler, ", ".join(obj.name for obj in schedulers)))
-
def get_schedulers(self):
schedulers = set(obj for obj in globals().values()
if type(obj) is type and
@@ -1962,67 +1916,172 @@ class RunQueueExecuteTasks(RunQueueExecute):
self.stats.taskSkipped()
self.stats.taskCompleted()
+ def summarise_scenequeue_errors(self):
+ err = False
+ if not self.sqdone:
+ logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
+ completeevent = sceneQueueComplete(self.sq_stats, self.rq)
+ bb.event.fire(completeevent, self.cfgData)
+ if self.sq_deferred:
+ logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
+ err = True
+ if self.updated_taskhash_queue:
+ logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
+ err = True
+ if self.holdoff_tasks:
+ logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
+ err = True
+
+ for tid in self.rqdata.runq_setscene_tids:
+ if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
+ err = True
+ logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
+ if tid not in self.sq_buildable:
+ err = True
+ logger.error("Setscene Task %s was never marked as buildable" % tid)
+ if tid not in self.sq_running:
+ err = True
+ logger.error("Setscene Task %s was never marked as running" % tid)
+
+ for x in self.rqdata.runtaskentries:
+ if x not in self.tasks_covered and x not in self.tasks_notcovered:
+ logger.error("Task %s was never moved from the setscene queue" % x)
+ err = True
+ if x not in self.tasks_scenequeue_done:
+ logger.error("Task %s was never processed by the setscene code" % x)
+ err = True
+ if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
+ logger.error("Task %s was never marked as buildable by the setscene code" % x)
+ err = True
+ return err
+
+
def execute(self):
"""
- Run the tasks in a queue prepared by rqdata.prepare()
+ Run the tasks in a queue prepared by prepare_runqueue
"""
- if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
- self.rqdata.setscenewhitelist_checked = True
-
- # Check tasks that are going to run against the whitelist
- def check_norun_task(tid, showerror=False):
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
- # Ignore covered tasks
- if tid in self.rq.scenequeue_covered:
- return False
- # Ignore stamped tasks
- if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
- return False
- # Ignore noexec tasks
- taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
- if 'noexec' in taskdep and taskname in taskdep['noexec']:
- return False
+ self.rq.read_workers()
+ self.process_possible_migrations()
- pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
- if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
- if showerror:
- if tid in self.rqdata.runq_setscene_tids:
- logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
+ task = None
+ if not self.sqdone and self.can_start_task():
+ # Find the next setscene to run
+ for nexttask in sorted(self.rqdata.runq_setscene_tids):
+ if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
+ if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
+ if nexttask not in self.rqdata.target_tids:
+ logger.debug(2, "Skipping setscene for task %s" % nexttask)
+ self.sq_task_skip(nexttask)
+ self.scenequeue_notneeded.add(nexttask)
+ if nexttask in self.sq_deferred:
+ del self.sq_deferred[nexttask]
+ return True
+ # If covered tasks are running, need to wait for them to complete
+ for t in self.sqdata.sq_covered_tasks[nexttask]:
+ if t in self.runq_running and t not in self.runq_complete:
+ continue
+ if nexttask in self.sq_deferred:
+ if self.sq_deferred[nexttask] not in self.runq_complete:
+ continue
+ logger.debug(1, "Task %s no longer deferred" % nexttask)
+ del self.sq_deferred[nexttask]
+ valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False)
+ if not valid:
+ logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
+ self.sq_task_failoutright(nexttask)
+ return True
else:
- logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
- return True
- return False
- # Look to see if any tasks that we think shouldn't run are going to
- unexpected = False
- for tid in self.rqdata.runtaskentries:
- if check_norun_task(tid):
- unexpected = True
+ self.sqdata.outrightfail.remove(nexttask)
+ if nexttask in self.sqdata.outrightfail:
+ logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
+ self.sq_task_failoutright(nexttask)
+ return True
+ if nexttask in self.sqdata.unskippable:
+ logger.debug(2, "Setscene task %s is unskippable" % nexttask)
+ task = nexttask
break
- if unexpected:
- # Run through the tasks in the rough order they'd have executed and print errors
- # (since the order can be useful - usually missing sstate for the last few tasks
- # is the cause of the problem)
- task = self.sched.next()
- while task is not None:
- check_norun_task(task, showerror=True)
- self.task_skip(task, 'Setscene enforcement check')
- task = self.sched.next()
+ if task is not None:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
+ taskname = taskname + "_setscene"
+ if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
+ logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
+ self.sq_task_failoutright(task)
+ return True
- self.rq.state = runQueueCleanUp
+ if self.cooker.configuration.force:
+ if task in self.rqdata.target_tids:
+ self.sq_task_failoutright(task)
+ return True
+
+ if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
+ logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
+ self.sq_task_skip(task)
return True
- self.rq.read_workers()
+ if self.cooker.configuration.skipsetscene:
+ logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
+ self.sq_task_failoutright(task)
+ return True
- if self.stats.total == 0:
- # nothing to do
- self.rq.state = runQueueCleanUp
+ startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
+ bb.event.fire(startevent, self.cfgData)
+
+ taskdepdata = self.sq_build_taskdepdata(task)
+
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ taskhash = self.rqdata.get_task_hash(task)
+ unihash = self.rqdata.get_task_unihash(task)
+ if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
+ if not mc in self.rq.fakeworker:
+ self.rq.start_fakeworker(self, mc)
+ self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
+ self.rq.fakeworker[mc].process.stdin.flush()
+ else:
+ self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
+ self.rq.worker[mc].process.stdin.flush()
+
+ self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
+ self.build_stamps2.append(self.build_stamps[task])
+ self.sq_running.add(task)
+ self.sq_live.add(task)
+ self.sq_stats.taskActive()
+ if self.can_start_task():
+ return True
+
+ self.update_holdofftasks()
+
+ if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
+ logger.info("Setscene tasks completed")
+
+ err = self.summarise_scenequeue_errors()
+ if err:
+ self.rq.state = runQueueFailed
+ return True
+
+ if self.cooker.configuration.setsceneonly:
+ self.rq.state = runQueueComplete
+ return True
+ self.sqdone = True
+
+ if self.stats.total == 0:
+ # nothing to do
+ self.rq.state = runQueueComplete
+ return True
- task = self.sched.next()
+ if self.cooker.configuration.setsceneonly:
+ task = None
+ else:
+ task = self.sched.next()
if task is not None:
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
- if task in self.rq.scenequeue_covered:
+ if self.rqdata.setscenewhitelist is not None:
+ if self.check_setscenewhitelist(task):
+ self.task_fail(task, "setscene whitelist")
+ return True
+
+ if task in self.tasks_covered:
logger.debug(2, "Setscene covered task %s", task)
self.task_skip(task, "covered")
return True
@@ -2075,43 +2134,58 @@ class RunQueueExecuteTasks(RunQueueExecute):
if self.can_start_task():
return True
- if self.stats.active > 0:
+ if self.stats.active > 0 or self.sq_stats.active > 0:
self.rq.read_workers()
return self.rq.active_fds()
+ # No more tasks can be run. If we have deferred setscene tasks we should run them.
+ if self.sq_deferred:
+ tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
+ logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
+ self.sq_task_failoutright(tid)
+ return True
+
if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return True
# Sanity Checks
+ err = self.summarise_scenequeue_errors()
for task in self.rqdata.runtaskentries:
if task not in self.runq_buildable:
logger.error("Task %s never buildable!", task)
- if task not in self.runq_running:
+ err = True
+ elif task not in self.runq_running:
logger.error("Task %s never ran!", task)
- if task not in self.runq_complete:
+ err = True
+ elif task not in self.runq_complete:
logger.error("Task %s never completed!", task)
- self.rq.state = runQueueComplete
+ err = True
+
+ if err:
+ self.rq.state = runQueueFailed
+ else:
+ self.rq.state = runQueueComplete
return True
- def filtermcdeps(self, task, deps):
+ def filtermcdeps(self, task, mc, deps):
ret = set()
- mainmc = mc_from_tid(task)
for dep in deps:
- mc = mc_from_tid(dep)
- if mc != mainmc:
+ thismc = mc_from_tid(dep)
+ if thismc != mc:
continue
ret.add(dep)
return ret
- # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
+ # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
# as most code can't handle them
def build_taskdepdata(self, task):
taskdepdata = {}
- next = self.rqdata.runtaskentries[task].depends
+ mc = mc_from_tid(task)
+ next = self.rqdata.runtaskentries[task].depends.copy()
next.add(task)
- next = self.filtermcdeps(task, next)
+ next = self.filtermcdeps(task, mc, next)
while next:
additional = []
for revdep in next:
@@ -2121,7 +2195,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
taskhash = self.rqdata.runtaskentries[revdep].hash
unihash = self.rqdata.runtaskentries[revdep].unihash
- deps = self.filtermcdeps(task, deps)
+ deps = self.filtermcdeps(task, mc, deps)
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
for revdep2 in deps:
if revdep2 not in taskdepdata:
@@ -2131,270 +2205,197 @@ class RunQueueExecuteTasks(RunQueueExecute):
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
-class RunQueueExecuteScenequeue(RunQueueExecute):
- def __init__(self, rq):
- RunQueueExecute.__init__(self, rq)
+ def update_holdofftasks(self):
- self.scenequeue_covered = set()
- self.scenequeue_notcovered = set()
- self.scenequeue_notneeded = set()
-
- # If we don't have any setscene functions, skip this step
- if len(self.rqdata.runq_setscene_tids) == 0:
- rq.scenequeue_covered = set()
- rq.scenequeue_notcovered = set()
- rq.state = runQueueRunInit
+ if not self.holdoff_need_update:
return
- self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
+ notcovered = set(self.scenequeue_notcovered)
+ notcovered |= self.cantskip
+ for tid in self.scenequeue_notcovered:
+ notcovered |= self.sqdata.sq_covered_tasks[tid]
+ notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
+ notcovered.intersection_update(self.tasks_scenequeue_done)
- sq_revdeps = {}
- sq_revdeps_new = {}
- sq_revdeps_squash = {}
- self.sq_harddeps = {}
- self.stamps = {}
+ covered = set(self.scenequeue_covered)
+ for tid in self.scenequeue_covered:
+ covered |= self.sqdata.sq_covered_tasks[tid]
+ covered.difference_update(notcovered)
+ covered.intersection_update(self.tasks_scenequeue_done)
- # We need to construct a dependency graph for the setscene functions. Intermediate
- # dependencies between the setscene tasks only complicate the code. This code
- # therefore aims to collapse the huge runqueue dependency tree into a smaller one
- # only containing the setscene functions.
-
- self.rqdata.init_progress_reporter.next_stage()
-
- # First process the chains up to the first setscene task.
- endpoints = {}
- for tid in self.rqdata.runtaskentries:
- sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
- sq_revdeps_new[tid] = set()
- if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
- #bb.warn("Added endpoint %s" % (tid))
- endpoints[tid] = set()
-
- self.rqdata.init_progress_reporter.next_stage()
+ for tid in notcovered | covered:
+ if len(self.rqdata.runtaskentries[tid].depends) == 0:
+ self.setbuildable(tid)
+ elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
+ self.setbuildable(tid)
- # Secondly process the chains between setscene tasks.
- for tid in self.rqdata.runq_setscene_tids:
- #bb.warn("Added endpoint 2 %s" % (tid))
- for dep in self.rqdata.runtaskentries[tid].depends:
- if tid in sq_revdeps[dep]:
- sq_revdeps[dep].remove(tid)
- if dep not in endpoints:
- endpoints[dep] = set()
- #bb.warn(" Added endpoint 3 %s" % (dep))
- endpoints[dep].add(tid)
-
- self.rqdata.init_progress_reporter.next_stage()
-
- def process_endpoints(endpoints):
- newendpoints = {}
- for point, task in endpoints.items():
- tasks = set()
- if task:
- tasks |= task
- if sq_revdeps_new[point]:
- tasks |= sq_revdeps_new[point]
- sq_revdeps_new[point] = set()
- if point in self.rqdata.runq_setscene_tids:
- sq_revdeps_new[point] = tasks
- tasks = set()
- continue
- for dep in self.rqdata.runtaskentries[point].depends:
- if point in sq_revdeps[dep]:
- sq_revdeps[dep].remove(point)
- if tasks:
- sq_revdeps_new[dep] |= tasks
- if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
- newendpoints[dep] = task
- if len(newendpoints) != 0:
- process_endpoints(newendpoints)
-
- process_endpoints(endpoints)
-
- self.rqdata.init_progress_reporter.next_stage()
-
- # Build a list of setscene tasks which are "unskippable"
- # These are direct endpoints referenced by the build
- endpoints2 = {}
- sq_revdeps2 = {}
- sq_revdeps_new2 = {}
- def process_endpoints2(endpoints):
- newendpoints = {}
- for point, task in endpoints.items():
- tasks = set([point])
- if task:
- tasks |= task
- if sq_revdeps_new2[point]:
- tasks |= sq_revdeps_new2[point]
- sq_revdeps_new2[point] = set()
- if point in self.rqdata.runq_setscene_tids:
- sq_revdeps_new2[point] = tasks
- for dep in self.rqdata.runtaskentries[point].depends:
- if point in sq_revdeps2[dep]:
- sq_revdeps2[dep].remove(point)
- if tasks:
- sq_revdeps_new2[dep] |= tasks
- if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
- newendpoints[dep] = tasks
- if len(newendpoints) != 0:
- process_endpoints2(newendpoints)
- for tid in self.rqdata.runtaskentries:
- sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
- sq_revdeps_new2[tid] = set()
- if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
- endpoints2[tid] = set()
- process_endpoints2(endpoints2)
- self.unskippable = []
- for tid in self.rqdata.runq_setscene_tids:
- if sq_revdeps_new2[tid]:
- self.unskippable.append(tid)
+ self.tasks_covered = covered
+ self.tasks_notcovered = notcovered
- self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
+ self.holdoff_tasks = set()
- for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
- if tid in self.rqdata.runq_setscene_tids:
- deps = set()
- for dep in sq_revdeps_new[tid]:
- deps.add(dep)
- sq_revdeps_squash[tid] = deps
- elif len(sq_revdeps_new[tid]) != 0:
- bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
- self.rqdata.init_progress_reporter.update(taskcounter)
-
- self.rqdata.init_progress_reporter.next_stage()
-
- # Resolve setscene inter-task dependencies
- # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
- # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
for tid in self.rqdata.runq_setscene_tids:
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
- realtid = tid + "_setscene"
- idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
- self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
- for (depname, idependtask) in idepends:
-
- if depname not in self.rqdata.taskData[mc].build_targets:
- continue
-
- depfn = self.rqdata.taskData[mc].build_targets[depname][0]
- if depfn is None:
- continue
- deptid = depfn + ":" + idependtask.replace("_setscene", "")
- if deptid not in self.rqdata.runtaskentries:
- bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
-
- if not deptid in self.sq_harddeps:
- self.sq_harddeps[deptid] = set()
- self.sq_harddeps[deptid].add(tid)
+ if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
+ self.holdoff_tasks.add(tid)
- sq_revdeps_squash[tid].add(deptid)
- # Have to zero this to avoid circular dependencies
- sq_revdeps_squash[deptid] = set()
-
- self.rqdata.init_progress_reporter.next_stage()
-
- for task in self.sq_harddeps:
- for dep in self.sq_harddeps[task]:
- sq_revdeps_squash[dep].add(task)
-
- self.rqdata.init_progress_reporter.next_stage()
-
- #for tid in sq_revdeps_squash:
- # for dep in sq_revdeps_squash[tid]:
- # data = data + "\n %s" % dep
- # bb.warn("Task %s_setscene: is %s " % (tid, data
-
- self.sq_deps = {}
- self.sq_revdeps = sq_revdeps_squash
- self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
-
- for tid in self.sq_revdeps:
- self.sq_deps[tid] = set()
- for tid in self.sq_revdeps:
- for dep in self.sq_revdeps[tid]:
- self.sq_deps[dep].add(tid)
-
- self.rqdata.init_progress_reporter.next_stage()
-
- for tid in self.sq_revdeps:
- if len(self.sq_revdeps[tid]) == 0:
- self.runq_buildable.add(tid)
-
- self.rqdata.init_progress_reporter.finish()
-
- self.outrightfail = []
- if self.rq.hashvalidate:
- sq_hash = []
- sq_hashfn = []
- sq_unihash = []
- sq_fn = []
- sq_taskname = []
- sq_task = []
- noexec = []
- stamppresent = []
- for tid in self.sq_revdeps:
- (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ for tid in self.holdoff_tasks.copy():
+ for dep in self.sqdata.sq_covered_tasks[tid]:
+ if dep not in self.runq_complete:
+ self.holdoff_tasks.add(dep)
- taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ self.holdoff_need_update = False
- if 'noexec' in taskdep and taskname in taskdep['noexec']:
- noexec.append(tid)
- self.task_skip(tid)
- bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
- continue
+ def process_possible_migrations(self):
- if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
- logger.debug(2, 'Setscene stamp current for task %s', tid)
- stamppresent.append(tid)
- self.task_skip(tid)
- continue
+ changed = set()
+ for tid, unihash in self.updated_taskhash_queue.copy():
+ if tid in self.runq_running and tid not in self.runq_complete:
+ continue
- if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
- logger.debug(2, 'Normal stamp current for task %s', tid)
- stamppresent.append(tid)
- self.task_skip(tid)
- continue
+ self.updated_taskhash_queue.remove((tid, unihash))
+
+ if unihash != self.rqdata.runtaskentries[tid].unihash:
+ logger.info("Task %s unihash changed to %s" % (tid, unihash))
+ self.rqdata.runtaskentries[tid].unihash = unihash
+ bb.parse.siggen.set_unihash(tid, unihash)
+
+ # Work out all tasks which depend on this one
+ total = set()
+ next = set(self.rqdata.runtaskentries[tid].revdeps)
+ while next:
+ current = next.copy()
+ total = total |next
+ next = set()
+ for ntid in current:
+ next |= self.rqdata.runtaskentries[ntid].revdeps
+ next.difference_update(total)
+
+ # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
+ done = set()
+ next = set(self.rqdata.runtaskentries[tid].revdeps)
+ while next:
+ current = next.copy()
+ next = set()
+ for tid in current:
+ if not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
+ continue
+ procdep = []
+ for dep in self.rqdata.runtaskentries[tid].depends:
+ procdep.append(dep)
+ orighash = self.rqdata.runtaskentries[tid].hash
+ self.rqdata.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.rqdata.dataCaches[mc_from_tid(tid)])
+ origuni = self.rqdata.runtaskentries[tid].unihash
+ self.rqdata.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
+ logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, self.rqdata.runtaskentries[tid].hash, origuni, self.rqdata.runtaskentries[tid].unihash))
+ next |= self.rqdata.runtaskentries[tid].revdeps
+ changed.add(tid)
+ total.remove(tid)
+ next.intersection_update(total)
+
+ if changed:
+ for mc in self.rq.worker:
+ self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
+ for mc in self.rq.fakeworker:
+ self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
+
+ logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
+
+ for tid in changed:
+ if tid not in self.rqdata.runq_setscene_tids:
+ continue
+ if tid in self.runq_running:
+ continue
+ if tid in self.scenequeue_covered:
+ # Potentially risky, should we report this hash as a match?
+ logger.info("Already covered setscene for %s so ignoring rehash" % (tid))
+ continue
+ if tid not in self.pending_migrations:
+ self.pending_migrations.add(tid)
+
+ for tid in self.pending_migrations.copy():
+ valid = True
+ # Check no tasks this covers are running
+ for dep in self.sqdata.sq_covered_tasks[tid]:
+ if dep in self.runq_running and dep not in self.runq_complete:
+ logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
+ valid = False
+ break
+ if not valid:
+ continue
- sq_fn.append(fn)
- sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
- sq_hash.append(self.rqdata.runtaskentries[tid].hash)
- sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
- sq_taskname.append(taskname)
- sq_task.append(tid)
+ self.pending_migrations.remove(tid)
+ changed = True
- self.cooker.data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", len(stamppresent))
+ if tid in self.tasks_scenequeue_done:
+ self.tasks_scenequeue_done.remove(tid)
+ for dep in self.sqdata.sq_covered_tasks[tid]:
+ if dep not in self.runq_complete:
+ if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
+ self.tasks_scenequeue_done.remove(dep)
+
+ if tid in self.sq_buildable:
+ self.sq_buildable.remove(tid)
+ if tid in self.sq_running:
+ self.sq_running.remove(tid)
+ if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
+ if tid not in self.sq_buildable:
+ self.sq_buildable.add(tid)
+ if len(self.sqdata.sq_revdeps[tid]) == 0:
+ self.sq_buildable.add(tid)
+
+ if tid in self.sqdata.outrightfail:
+ self.sqdata.outrightfail.remove(tid)
+ if tid in self.scenequeue_notcovered:
+ self.scenequeue_notcovered.remove(tid)
+ if tid in self.scenequeue_covered:
+ self.scenequeue_covered.remove(tid)
+ if tid in self.scenequeue_notneeded:
+ self.scenequeue_notneeded.remove(tid)
- valid = self.rq.validate_hash(sq_fn=sq_fn, sq_task=sq_taskname, sq_hash=sq_hash, sq_hashfn=sq_hashfn,
- siginfo=False, sq_unihash=sq_unihash, d=self.cooker.data)
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
- self.cooker.data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
+ if tid in self.stampcache:
+ del self.stampcache[tid]
- valid_new = stamppresent
- for v in valid:
- valid_new.append(sq_task[v])
+ if tid in self.build_stamps:
+ del self.build_stamps[tid]
- for tid in self.sq_revdeps:
- if tid not in valid_new and tid not in noexec:
- logger.debug(2, 'No package found, so skipping setscene task %s', tid)
- self.outrightfail.append(tid)
+ logger.info("Setscene task %s now valid and being rerun" % tid)
+ self.sqdone = False
+ update_scenequeue_data([tid], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
- logger.info('Executing SetScene Tasks')
+ if changed:
+ self.holdoff_need_update = True
- self.rq.state = runQueueSceneRun
+ def scenequeue_updatecounters(self, task, fail=False):
- def scenequeue_updatecounters(self, task, fail = False):
- for dep in self.sq_deps[task]:
- if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
+ for dep in sorted(self.sqdata.sq_deps[task]):
+ if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
- self.scenequeue_updatecounters(dep, fail)
- continue
- if task not in self.sq_revdeps2[dep]:
- # May already have been removed by the fail case above
+ self.sq_task_failoutright(dep)
continue
- self.sq_revdeps2[dep].remove(task)
- if len(self.sq_revdeps2[dep]) == 0:
- self.runq_buildable.add(dep)
+ if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
+ if dep not in self.sq_buildable:
+ self.sq_buildable.add(dep)
- def task_completeoutright(self, task):
+ next = set([task])
+ while next:
+ new = set()
+ for t in sorted(next):
+ self.tasks_scenequeue_done.add(t)
+ # Look down the dependency chain for non-setscene things which this task depends on
+ # and mark as 'done'
+ for dep in self.rqdata.runtaskentries[t].depends:
+ if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
+ continue
+ if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
+ new.add(dep)
+ next = new
+
+ self.holdoff_need_update = True
+
+ def sq_task_completeoutright(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
@@ -2405,7 +2406,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
self.scenequeue_covered.add(task)
self.scenequeue_updatecounters(task)
- def check_taskfail(self, task):
+ def sq_check_taskfail(self, task):
if self.rqdata.setscenewhitelist is not None:
realtask = task.split('_setscene')[0]
(mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
@@ -2414,132 +2415,34 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
self.rq.state = runQueueCleanUp
- def task_complete(self, task):
- self.stats.taskCompleted()
- bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
- self.task_completeoutright(task)
+ def sq_task_complete(self, task):
+ self.sq_stats.taskCompleted()
+ bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
+ self.sq_task_completeoutright(task)
- def task_fail(self, task, result):
- self.stats.taskFailed()
- bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
+ def sq_task_fail(self, task, result):
+ self.sq_stats.taskFailed()
+ bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
self.scenequeue_notcovered.add(task)
self.scenequeue_updatecounters(task, True)
- self.check_taskfail(task)
+ self.sq_check_taskfail(task)
- def task_failoutright(self, task):
- self.runq_running.add(task)
- self.runq_buildable.add(task)
- self.stats.taskSkipped()
- self.stats.taskCompleted()
+ def sq_task_failoutright(self, task):
+ self.sq_running.add(task)
+ self.sq_buildable.add(task)
+ self.sq_stats.taskSkipped()
+ self.sq_stats.taskCompleted()
self.scenequeue_notcovered.add(task)
self.scenequeue_updatecounters(task, True)
- def task_skip(self, task):
- self.runq_running.add(task)
- self.runq_buildable.add(task)
- self.task_completeoutright(task)
- self.stats.taskSkipped()
- self.stats.taskCompleted()
-
- def execute(self):
- """
- Run the tasks in a queue prepared by prepare_runqueue
- """
-
- self.rq.read_workers()
-
- task = None
- if self.can_start_task():
- # Find the next setscene to run
- for nexttask in self.rqdata.runq_setscene_tids:
- if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
- if nexttask in self.unskippable:
- logger.debug(2, "Setscene task %s is unskippable" % nexttask)
- if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
- fn = fn_from_tid(nexttask)
- foundtarget = False
-
- if nexttask in self.rqdata.target_tids:
- foundtarget = True
- if not foundtarget:
- logger.debug(2, "Skipping setscene for task %s" % nexttask)
- self.task_skip(nexttask)
- self.scenequeue_notneeded.add(nexttask)
- return True
- if nexttask in self.outrightfail:
- self.task_failoutright(nexttask)
- return True
- task = nexttask
- break
- if task is not None:
- (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
- taskname = taskname + "_setscene"
- if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
- logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
- self.task_failoutright(task)
- return True
-
- if self.cooker.configuration.force:
- if task in self.rqdata.target_tids:
- self.task_failoutright(task)
- return True
-
- if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
- logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
- self.task_skip(task)
- return True
-
- startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
- bb.event.fire(startevent, self.cfgData)
-
- taskdepdata = self.build_taskdepdata(task)
-
- taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
- taskhash = self.rqdata.get_task_hash(task)
- unihash = self.rqdata.get_task_unihash(task)
- if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
- if not mc in self.rq.fakeworker:
- self.rq.start_fakeworker(self, mc)
- self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
- self.rq.fakeworker[mc].process.stdin.flush()
- else:
- self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
- self.rq.worker[mc].process.stdin.flush()
-
- self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
- self.build_stamps2.append(self.build_stamps[task])
- self.runq_running.add(task)
- self.stats.taskActive()
- if self.can_start_task():
- return True
-
- if self.stats.active > 0:
- self.rq.read_workers()
- return self.rq.active_fds()
-
- #for tid in self.sq_revdeps:
- # if tid not in self.runq_running:
- # buildable = tid in self.runq_buildable
- # revdeps = self.sq_revdeps[tid]
- # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
-
- self.rq.scenequeue_covered = self.scenequeue_covered
- self.rq.scenequeue_notcovered = self.scenequeue_notcovered
-
- logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
-
- self.rq.state = runQueueRunInit
-
- completeevent = sceneQueueComplete(self.stats, self.rq)
- bb.event.fire(completeevent, self.cfgData)
-
- return True
-
- def runqueue_process_waitpid(self, task, status):
- RunQueueExecute.runqueue_process_waitpid(self, task, status)
-
+ def sq_task_skip(self, task):
+ self.sq_running.add(task)
+ self.sq_buildable.add(task)
+ self.sq_task_completeoutright(task)
+ self.sq_stats.taskSkipped()
+ self.sq_stats.taskCompleted()
- def build_taskdepdata(self, task):
+ def sq_build_taskdepdata(self, task):
def getsetscenedeps(tid):
deps = set()
(mc, fn, taskname, _) = split_tid_mcfn(tid)
@@ -2577,6 +2480,279 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
+ def check_setscenewhitelist(self, tid):
+ # Check task that is going to run against the whitelist
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ # Ignore covered tasks
+ if tid in self.tasks_covered:
+ return False
+ # Ignore stamped tasks
+ if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
+ return False
+ # Ignore noexec tasks
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ return False
+
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
+ if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
+ if tid in self.rqdata.runq_setscene_tids:
+ msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
+ else:
+ msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
+ logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
+ return True
+ return False
+
+class SQData(object):
+ def __init__(self):
+ # SceneQueue dependencies
+ self.sq_deps = {}
+ # SceneQueue reverse dependencies
+ self.sq_revdeps = {}
+ # Injected inter-setscene task dependencies
+ self.sq_harddeps = {}
+ # Cache of stamp files so duplicates can't run in parallel
+ self.stamps = {}
+ # Setscene tasks directly depended upon by the build
+ self.unskippable = set()
+ # List of setscene tasks which aren't present
+ self.outrightfail = set()
+ # A list of normal tasks a setscene task covers
+ self.sq_covered_tasks = {}
+
+def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
+
+ sq_revdeps = {}
+ sq_revdeps_squash = {}
+ sq_collated_deps = {}
+
+ # We need to construct a dependency graph for the setscene functions. Intermediate
+ # dependencies between the setscene tasks only complicate the code. This code
+ # therefore aims to collapse the huge runqueue dependency tree into a smaller one
+ # only containing the setscene functions.
+
+ rqdata.init_progress_reporter.next_stage()
+
+ # First process the chains up to the first setscene task.
+ endpoints = {}
+ for tid in rqdata.runtaskentries:
+ sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
+ sq_revdeps_squash[tid] = set()
+ if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
+ #bb.warn("Added endpoint %s" % (tid))
+ endpoints[tid] = set()
+
+ rqdata.init_progress_reporter.next_stage()
+
+ # Secondly process the chains between setscene tasks.
+ for tid in rqdata.runq_setscene_tids:
+ sq_collated_deps[tid] = set()
+ #bb.warn("Added endpoint 2 %s" % (tid))
+ for dep in rqdata.runtaskentries[tid].depends:
+ if tid in sq_revdeps[dep]:
+ sq_revdeps[dep].remove(tid)
+ if dep not in endpoints:
+ endpoints[dep] = set()
+ #bb.warn(" Added endpoint 3 %s" % (dep))
+ endpoints[dep].add(tid)
+
+ rqdata.init_progress_reporter.next_stage()
+
+ def process_endpoints(endpoints):
+ newendpoints = {}
+ for point, task in endpoints.items():
+ tasks = set()
+ if task:
+ tasks |= task
+ if sq_revdeps_squash[point]:
+ tasks |= sq_revdeps_squash[point]
+ if point not in rqdata.runq_setscene_tids:
+ for t in tasks:
+ sq_collated_deps[t].add(point)
+ sq_revdeps_squash[point] = set()
+ if point in rqdata.runq_setscene_tids:
+ sq_revdeps_squash[point] = tasks
+ tasks = set()
+ continue
+ for dep in rqdata.runtaskentries[point].depends:
+ if point in sq_revdeps[dep]:
+ sq_revdeps[dep].remove(point)
+ if tasks:
+ sq_revdeps_squash[dep] |= tasks
+ if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
+ newendpoints[dep] = task
+ if len(newendpoints) != 0:
+ process_endpoints(newendpoints)
+
+ process_endpoints(endpoints)
+
+ rqdata.init_progress_reporter.next_stage()
+
+ # Build a list of tasks which are "unskippable"
+ # These are direct endpoints referenced by the build upto and including setscene tasks
+ # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
+ new = True
+ for tid in rqdata.runtaskentries:
+ if len(rqdata.runtaskentries[tid].revdeps) == 0:
+ sqdata.unskippable.add(tid)
+ sqdata.unskippable |= sqrq.cantskip
+ while new:
+ new = False
+ orig = sqdata.unskippable.copy()
+ for tid in sorted(orig, reverse=True):
+ if tid in rqdata.runq_setscene_tids:
+ continue
+ if len(rqdata.runtaskentries[tid].depends) == 0:
+ # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
+ sqrq.setbuildable(tid)
+ sqdata.unskippable |= rqdata.runtaskentries[tid].depends
+ if sqdata.unskippable != orig:
+ new = True
+
+ sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
+
+ rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
+
+ # Sanity check all dependencies could be changed to setscene task references
+ for taskcounter, tid in enumerate(rqdata.runtaskentries):
+ if tid in rqdata.runq_setscene_tids:
+ pass
+ elif len(sq_revdeps_squash[tid]) != 0:
+ bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
+ else:
+ del sq_revdeps_squash[tid]
+ rqdata.init_progress_reporter.update(taskcounter)
+
+ rqdata.init_progress_reporter.next_stage()
+
+ # Resolve setscene inter-task dependencies
+ # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
+ # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
+ for tid in rqdata.runq_setscene_tids:
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+ realtid = tid + "_setscene"
+ idepends = rqdata.taskData[mc].taskentries[realtid].idepends
+ sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
+ for (depname, idependtask) in idepends:
+
+ if depname not in rqdata.taskData[mc].build_targets:
+ continue
+
+ depfn = rqdata.taskData[mc].build_targets[depname][0]
+ if depfn is None:
+ continue
+ deptid = depfn + ":" + idependtask.replace("_setscene", "")
+ if deptid not in rqdata.runtaskentries:
+ bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
+
+ if not deptid in sqdata.sq_harddeps:
+ sqdata.sq_harddeps[deptid] = set()
+ sqdata.sq_harddeps[deptid].add(tid)
+
+ sq_revdeps_squash[tid].add(deptid)
+ # Have to zero this to avoid circular dependencies
+ sq_revdeps_squash[deptid] = set()
+
+ rqdata.init_progress_reporter.next_stage()
+
+ for task in sqdata.sq_harddeps:
+ for dep in sqdata.sq_harddeps[task]:
+ sq_revdeps_squash[dep].add(task)
+
+ rqdata.init_progress_reporter.next_stage()
+
+ #for tid in sq_revdeps_squash:
+ # data = ""
+ # for dep in sq_revdeps_squash[tid]:
+ # data = data + "\n %s" % dep
+ # bb.warn("Task %s_setscene: is %s " % (tid, data))
+
+ sqdata.sq_revdeps = sq_revdeps_squash
+ sqdata.sq_covered_tasks = sq_collated_deps
+
+ # Build reverse version of revdeps to populate deps structure
+ for tid in sqdata.sq_revdeps:
+ sqdata.sq_deps[tid] = set()
+ for tid in sqdata.sq_revdeps:
+ for dep in sqdata.sq_revdeps[tid]:
+ sqdata.sq_deps[dep].add(tid)
+
+ rqdata.init_progress_reporter.next_stage()
+
+ sqdata.multiconfigs = set()
+ for tid in sqdata.sq_revdeps:
+ sqdata.multiconfigs.add(mc_from_tid(tid))
+ if len(sqdata.sq_revdeps[tid]) == 0:
+ sqrq.sq_buildable.add(tid)
+
+ rqdata.init_progress_reporter.finish()
+
+ sqdata.noexec = set()
+ sqdata.stamppresent = set()
+ sqdata.valid = set()
+
+ update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq)
+
+def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq):
+
+ tocheck = set()
+
+ for tid in sorted(tids):
+ if tid in sqdata.stamppresent:
+ sqdata.stamppresent.remove(tid)
+ if tid in sqdata.valid:
+ sqdata.valid.remove(tid)
+
+ (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+
+ taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
+
+ if 'noexec' in taskdep and taskname in taskdep['noexec']:
+ sqdata.noexec.add(tid)
+ sqrq.sq_task_skip(tid)
+ bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
+ continue
+
+ if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
+ logger.debug(2, 'Setscene stamp current for task %s', tid)
+ sqdata.stamppresent.add(tid)
+ sqrq.sq_task_skip(tid)
+ continue
+
+ if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
+ logger.debug(2, 'Normal stamp current for task %s', tid)
+ sqdata.stamppresent.add(tid)
+ sqrq.sq_task_skip(tid)
+ continue
+
+ tocheck.add(tid)
+
+ sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False)
+
+ sqdata.hashes = {}
+ for mc in sorted(sqdata.multiconfigs):
+ for tid in sorted(sqdata.sq_revdeps):
+ if mc_from_tid(tid) != mc:
+ continue
+ if tid in sqdata.stamppresent:
+ continue
+ if tid in sqdata.valid:
+ continue
+ if tid in sqdata.noexec:
+ continue
+ if tid in sqrq.scenequeue_notcovered:
+ continue
+ sqdata.outrightfail.add(tid)
+
+ h = pending_hash_index(tid, rqdata)
+ if h not in sqdata.hashes:
+ sqdata.hashes[h] = tid
+ else:
+ sqrq.sq_deferred[tid] = sqdata.hashes[h]
+ bb.warn("Deferring %s after %s" % (tid, sqdata.hashes[h]))
+
+
class TaskFailure(Exception):
"""
Exception raised when a task in a runqueue fails
@@ -2683,6 +2859,15 @@ class runQueueTaskSkipped(runQueueEvent):
runQueueEvent.__init__(self, task, stats, rq)
self.reason = reason
+class taskUniHashUpdate(bb.event.Event):
+ """
+ Base runQueue event class
+ """
+ def __init__(self, task, unihash):
+ self.taskid = task
+ self.unihash = unihash
+ bb.event.Event.__init__(self)
+
class runQueuePipe():
"""
Abstraction for a pipe between a worker thread and the server
@@ -2725,6 +2910,8 @@ class runQueuePipe():
except ValueError as e:
bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
bb.event.fire_from_worker(event, self.d)
+ if isinstance(event, taskUniHashUpdate):
+ self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
found = True
self.queue = self.queue[index+8:]
index = self.queue.find(b"</event>")
diff --git a/poky/bitbake/lib/bb/server/process.py b/poky/bitbake/lib/bb/server/process.py
index f901fe505..69aae626e 100644
--- a/poky/bitbake/lib/bb/server/process.py
+++ b/poky/bitbake/lib/bb/server/process.py
@@ -456,7 +456,10 @@ class BitBakeServer(object):
self.configuration.setServerRegIdleCallback(server.register_idle_function)
os.close(self.readypipe)
writer = ConnectionWriter(self.readypipein)
- self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
+ try:
+ self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
+ except bb.BBHandledException:
+ return None
writer.send("r")
writer.close()
server.cooker = self.cooker
diff --git a/poky/bitbake/lib/bb/siggen.py b/poky/bitbake/lib/bb/siggen.py
index fe580e487..a4bb1ff7f 100644
--- a/poky/bitbake/lib/bb/siggen.py
+++ b/poky/bitbake/lib/bb/siggen.py
@@ -12,6 +12,8 @@ import bb.data
import difflib
import simplediff
from bb.checksum import FileChecksumCache
+from bb import runqueue
+import hashserv
logger = logging.getLogger('BitBake.SigGen')
@@ -41,15 +43,18 @@ class SignatureGenerator(object):
self.runtaskdeps = {}
self.file_checksum_values = {}
self.taints = {}
+ self.unitaskhashes = {}
+ self.setscenetasks = {}
def finalise(self, fn, d, varient):
return
- def get_unihash(self, task):
- return self.taskhash[task]
+ def get_unihash(self, tid):
+ return self.taskhash[tid]
- def get_taskhash(self, fn, task, deps, dataCache):
- return "0"
+ def get_taskhash(self, tid, deps, dataCache):
+ self.taskhash[tid] = hashlib.sha256(tid.encode("utf-8")).hexdigest()
+ return self.taskhash[tid]
def writeout_file_checksum_cache(self):
"""Write/update the file checksum cache onto disk"""
@@ -71,14 +76,25 @@ class SignatureGenerator(object):
return
def get_taskdata(self):
- return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
+ return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash, self.unitaskhashes, self.setscenetasks)
def set_taskdata(self, data):
- self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
+ self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash, self.unitaskhashes, self.setscenetasks = data
def reset(self, data):
self.__init__(data)
+ def get_taskhashes(self):
+ return self.taskhash, self.unitaskhashes
+
+ def set_taskhashes(self, hashes):
+ self.taskhash, self.unitaskhashes = hashes
+
+ def save_unitaskhashes(self):
+ return
+
+ def set_setscene_tasks(self, setscene_tasks):
+ return
class SignatureGeneratorBasic(SignatureGenerator):
"""
@@ -94,7 +110,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
self.taints = {}
self.gendeps = {}
self.lookupcache = {}
- self.pkgnameextract = re.compile(r"(?P<fn>.*)\..*")
+ self.setscenetasks = {}
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
self.taskwhitelist = None
self.init_rundepcheck(data)
@@ -105,6 +121,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
else:
self.checksum_cache = None
+ self.unihash_cache = bb.cache.SimpleCache("1")
+ self.unitaskhashes = self.unihash_cache.init_cache(data, "bb_unihashes.dat", {})
+
def init_rundepcheck(self, data):
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
if self.taskwhitelist:
@@ -120,16 +139,16 @@ class SignatureGeneratorBasic(SignatureGenerator):
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basewhitelist, fn)
for task in tasklist:
- k = fn + "." + task
- if not ignore_mismatch and k in self.basehash and self.basehash[k] != basehash[k]:
- bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], basehash[k]))
+ tid = fn + ":" + task
+ if not ignore_mismatch and tid in self.basehash and self.basehash[tid] != basehash[tid]:
+ bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (tid, self.basehash[tid], basehash[tid]))
bb.error("The following commands may help:")
cmd = "$ bitbake %s -c%s" % (d.getVar('PN'), task)
# Make sure sigdata is dumped before run printdiff
bb.error("%s -Snone" % cmd)
bb.error("Then:")
bb.error("%s -Sprintdiff\n" % cmd)
- self.basehash[k] = basehash[k]
+ self.basehash[tid] = basehash[tid]
self.taskdeps[fn] = taskdeps
self.gendeps[fn] = gendeps
@@ -137,6 +156,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
return taskdeps
+ def set_setscene_tasks(self, setscene_tasks):
+ self.setscenetasks = setscene_tasks
+
def finalise(self, fn, d, variant):
mc = d.getVar("__BBMULTICONFIG", False) or ""
@@ -156,7 +178,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
for task in taskdeps:
- d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
+ d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + ":" + task])
def rundep_check(self, fn, recipename, task, dep, depname, dataCache):
# Return True if we should keep the dependency, False to drop it
@@ -176,33 +198,26 @@ class SignatureGeneratorBasic(SignatureGenerator):
pass
return taint
- def get_taskhash(self, fn, task, deps, dataCache):
+ def get_taskhash(self, tid, deps, dataCache):
- mc = ''
- if fn.startswith('mc:'):
- mc = fn.split(':')[1]
- k = fn + "." + task
+ (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
- data = dataCache.basetaskhash[k]
- self.basehash[k] = data
- self.runtaskdeps[k] = []
- self.file_checksum_values[k] = []
+ data = dataCache.basetaskhash[tid]
+ self.basehash[tid] = data
+ self.runtaskdeps[tid] = []
+ self.file_checksum_values[tid] = []
recipename = dataCache.pkg_fn[fn]
for dep in sorted(deps, key=clean_basepath):
- pkgname = self.pkgnameextract.search(dep).group('fn')
- if mc:
- depmc = pkgname.split(':')[1]
- if mc != depmc:
- continue
- if dep.startswith("mc:") and not mc:
+ (depmc, _, deptaskname, depfn) = bb.runqueue.split_tid_mcfn(dep)
+ if mc != depmc:
continue
- depname = dataCache.pkg_fn[pkgname]
+ depname = dataCache.pkg_fn[depfn]
if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
continue
if dep not in self.taskhash:
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
data = data + self.get_unihash(dep)
- self.runtaskdeps[k].append(dep)
+ self.runtaskdeps[tid].append(dep)
if task in dataCache.file_checksums[fn]:
if self.checksum_cache:
@@ -210,7 +225,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
else:
checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
for (f,cs) in checksums:
- self.file_checksum_values[k].append((f,cs))
+ self.file_checksum_values[tid].append((f,cs))
if cs:
data = data + cs
@@ -220,16 +235,16 @@ class SignatureGeneratorBasic(SignatureGenerator):
import uuid
taint = str(uuid.uuid4())
data = data + taint
- self.taints[k] = "nostamp:" + taint
+ self.taints[tid] = "nostamp:" + taint
taint = self.read_taint(fn, task, dataCache.stamp[fn])
if taint:
data = data + taint
- self.taints[k] = taint
- logger.warning("%s is tainted from a forced run" % k)
+ self.taints[tid] = taint
+ logger.warning("%s is tainted from a forced run" % tid)
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
- self.taskhash[k] = h
+ self.taskhash[tid] = h
#d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
return h
@@ -242,17 +257,20 @@ class SignatureGeneratorBasic(SignatureGenerator):
bb.fetch2.fetcher_parse_save()
bb.fetch2.fetcher_parse_done()
+ def save_unitaskhashes(self):
+ self.unihash_cache.save(self.unitaskhashes)
+
def dump_sigtask(self, fn, task, stampbase, runtime):
- k = fn + "." + task
+ tid = fn + ":" + task
referencestamp = stampbase
if isinstance(runtime, str) and runtime.startswith("customfile"):
sigfile = stampbase
referencestamp = runtime[11:]
- elif runtime and k in self.taskhash:
- sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
+ elif runtime and tid in self.taskhash:
+ sigfile = stampbase + "." + task + ".sigdata" + "." + self.get_unihash(tid)
else:
- sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
+ sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[tid]
bb.utils.mkdirhier(os.path.dirname(sigfile))
@@ -261,7 +279,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
data['basewhitelist'] = self.basewhitelist
data['taskwhitelist'] = self.taskwhitelist
data['taskdeps'] = self.taskdeps[fn][task]
- data['basehash'] = self.basehash[k]
+ data['basehash'] = self.basehash[tid]
data['gendeps'] = {}
data['varvals'] = {}
data['varvals'][task] = self.lookupcache[fn][task]
@@ -271,30 +289,31 @@ class SignatureGeneratorBasic(SignatureGenerator):
data['gendeps'][dep] = self.gendeps[fn][dep]
data['varvals'][dep] = self.lookupcache[fn][dep]
- if runtime and k in self.taskhash:
- data['runtaskdeps'] = self.runtaskdeps[k]
- data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
+ if runtime and tid in self.taskhash:
+ data['runtaskdeps'] = self.runtaskdeps[tid]
+ data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[tid]]
data['runtaskhashes'] = {}
for dep in data['runtaskdeps']:
data['runtaskhashes'][dep] = self.get_unihash(dep)
- data['taskhash'] = self.taskhash[k]
+ data['taskhash'] = self.taskhash[tid]
+ data['unihash'] = self.get_unihash(tid)
taint = self.read_taint(fn, task, referencestamp)
if taint:
data['taint'] = taint
- if runtime and k in self.taints:
- if 'nostamp:' in self.taints[k]:
- data['taint'] = self.taints[k]
+ if runtime and tid in self.taints:
+ if 'nostamp:' in self.taints[tid]:
+ data['taint'] = self.taints[tid]
computed_basehash = calc_basehash(data)
- if computed_basehash != self.basehash[k]:
- bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
- if runtime and k in self.taskhash:
+ if computed_basehash != self.basehash[tid]:
+ bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[tid], tid))
+ if runtime and tid in self.taskhash:
computed_taskhash = calc_taskhash(data)
- if computed_taskhash != self.taskhash[k]:
- bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
- sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
+ if computed_taskhash != self.taskhash[tid]:
+ bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid))
+ sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash)
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
try:
@@ -314,34 +333,33 @@ class SignatureGeneratorBasic(SignatureGenerator):
if fn in self.taskdeps:
for task in self.taskdeps[fn]:
tid = fn + ":" + task
- (mc, _, _) = bb.runqueue.split_tid(tid)
- k = fn + "." + task
- if k not in self.taskhash:
+ mc = bb.runqueue.mc_from_tid(tid)
+ if tid not in self.taskhash:
continue
- if dataCaches[mc].basetaskhash[k] != self.basehash[k]:
- bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
- bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[k], self.basehash[k]))
+ if dataCaches[mc].basetaskhash[tid] != self.basehash[tid]:
+ bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % tid)
+ bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[tid], self.basehash[tid]))
self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
name = "basichash"
- def get_stampfile_hash(self, task):
- if task in self.taskhash:
- return self.taskhash[task]
+ def get_stampfile_hash(self, tid):
+ if tid in self.taskhash:
+ return self.taskhash[tid]
# If task is not in basehash, then error
- return self.basehash[task]
+ return self.basehash[tid]
def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
if taskname != "do_setscene" and taskname.endswith("_setscene"):
- k = fn + "." + taskname[:-9]
+ tid = fn + ":" + taskname[:-9]
else:
- k = fn + "." + taskname
+ tid = fn + ":" + taskname
if clean:
h = "*"
else:
- h = self.get_stampfile_hash(k)
+ h = self.get_stampfile_hash(tid)
return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
@@ -352,6 +370,172 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
bb.build.write_taint(task, d, fn)
+class SignatureGeneratorUniHashMixIn(object):
+ def get_taskdata(self):
+ return (self.server, self.method) + super().get_taskdata()
+
+ def set_taskdata(self, data):
+ self.server, self.method = data[:2]
+ super().set_taskdata(data[2:])
+
+ def client(self):
+ if getattr(self, '_client', None) is None:
+ self._client = hashserv.create_client(self.server)
+ return self._client
+
+ def __get_task_unihash_key(self, tid):
+ # TODO: The key only *needs* to be the taskhash, the tid is just
+ # convenient
+ return '%s:%s' % (tid.rsplit("/", 1)[1], self.taskhash[tid])
+
+ def get_stampfile_hash(self, tid):
+ if tid in self.taskhash:
+ # If a unique hash is reported, use it as the stampfile hash. This
+ # ensures that if a task won't be re-run if the taskhash changes,
+ # but it would result in the same output hash
+ unihash = self.unitaskhashes.get(self.__get_task_unihash_key(tid), None)
+ if unihash is not None:
+ return unihash
+
+ return super().get_stampfile_hash(tid)
+
+ def set_unihash(self, tid, unihash):
+ self.unitaskhashes[self.__get_task_unihash_key(tid)] = unihash
+
+ def get_unihash(self, tid):
+ taskhash = self.taskhash[tid]
+
+ # If its not a setscene task we can return
+ if self.setscenetasks and tid not in self.setscenetasks:
+ return taskhash
+
+ key = self.__get_task_unihash_key(tid)
+
+ # TODO: This cache can grow unbounded. It probably only needs to keep
+ # for each task
+ unihash = self.unitaskhashes.get(key, None)
+ if unihash is not None:
+ return unihash
+
+ # In the absence of being able to discover a unique hash from the
+ # server, make it be equivalent to the taskhash. The unique "hash" only
+ # really needs to be a unique string (not even necessarily a hash), but
+ # making it match the taskhash has a few advantages:
+ #
+ # 1) All of the sstate code that assumes hashes can be the same
+ # 2) It provides maximal compatibility with builders that don't use
+ # an equivalency server
+ # 3) The value is easy for multiple independent builders to derive the
+ # same unique hash from the same input. This means that if the
+ # independent builders find the same taskhash, but it isn't reported
+ # to the server, there is a better chance that they will agree on
+ # the unique hash.
+ unihash = taskhash
+
+ try:
+ data = self.client().get_unihash(self.method, self.taskhash[tid])
+ if data:
+ unihash = data
+ # A unique hash equal to the taskhash is not very interesting,
+ # so it is reported it at debug level 2. If they differ, that
+ # is much more interesting, so it is reported at debug level 1
+ bb.debug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
+ else:
+ bb.debug(2, 'No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
+ except hashserv.client.HashConnectionError as e:
+ bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
+
+ self.unitaskhashes[key] = unihash
+ return unihash
+
+ def report_unihash(self, path, task, d):
+ import importlib
+
+ taskhash = d.getVar('BB_TASKHASH')
+ unihash = d.getVar('BB_UNIHASH')
+ report_taskdata = d.getVar('SSTATE_HASHEQUIV_REPORT_TASKDATA') == '1'
+ tempdir = d.getVar('T')
+ fn = d.getVar('BB_FILENAME')
+ tid = fn + ':do_' + task
+ key = tid.rsplit("/", 1)[1] + ':' + taskhash
+
+ if self.setscenetasks and tid not in self.setscenetasks:
+ return
+
+ # Sanity checks
+ cache_unihash = self.unitaskhashes.get(key, None)
+ if cache_unihash is None:
+ bb.fatal('%s not in unihash cache. Please report this error' % key)
+
+ if cache_unihash != unihash:
+ bb.fatal("Cache unihash %s doesn't match BB_UNIHASH %s" % (cache_unihash, unihash))
+
+ sigfile = None
+ sigfile_name = "depsig.do_%s.%d" % (task, os.getpid())
+ sigfile_link = "depsig.do_%s" % task
+
+ try:
+ sigfile = open(os.path.join(tempdir, sigfile_name), 'w+b')
+
+ locs = {'path': path, 'sigfile': sigfile, 'task': task, 'd': d}
+
+ if "." in self.method:
+ (module, method) = self.method.rsplit('.', 1)
+ locs['method'] = getattr(importlib.import_module(module), method)
+ outhash = bb.utils.better_eval('method(path, sigfile, task, d)', locs)
+ else:
+ outhash = bb.utils.better_eval(self.method + '(path, sigfile, task, d)', locs)
+
+ try:
+ extra_data = {}
+
+ owner = d.getVar('SSTATE_HASHEQUIV_OWNER')
+ if owner:
+ extra_data['owner'] = owner
+
+ if report_taskdata:
+ sigfile.seek(0)
+
+ extra_data['PN'] = d.getVar('PN')
+ extra_data['PV'] = d.getVar('PV')
+ extra_data['PR'] = d.getVar('PR')
+ extra_data['task'] = task
+ extra_data['outhash_siginfo'] = sigfile.read().decode('utf-8')
+
+ data = self.client().report_unihash(taskhash, self.method, outhash, unihash, extra_data)
+ new_unihash = data['unihash']
+
+ if new_unihash != unihash:
+ bb.debug(1, 'Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
+ bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
+ else:
+ bb.debug(1, 'Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
+ except hashserv.client.HashConnectionError as e:
+ bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
+ finally:
+ if sigfile:
+ sigfile.close()
+
+ sigfile_link_path = os.path.join(tempdir, sigfile_link)
+ bb.utils.remove(sigfile_link_path)
+
+ try:
+ os.symlink(sigfile_name, sigfile_link_path)
+ except OSError:
+ pass
+
+
+#
+# Dummy class used for bitbake-selftest
+#
+class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureGeneratorBasicHash):
+ name = "TestEquivHash"
+ def init_rundepcheck(self, data):
+ super().init_rundepcheck(data)
+ self.server = data.getVar('BB_HASHSERVE')
+ self.method = "sstate_output_hash"
+
+
def dump_this_task(outfile, d):
import bb.parse
fn = d.getVar("BB_FILENAME")
@@ -643,9 +827,9 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
a_taint = a_data.get('taint', None)
b_taint = b_data.get('taint', None)
if a_taint != b_taint:
- if a_taint.startswith('nostamp:'):
+ if a_taint and a_taint.startswith('nostamp:'):
a_taint = a_taint.replace('nostamp:', 'nostamp(uuid4):')
- if b_taint.startswith('nostamp:'):
+ if b_taint and b_taint.startswith('nostamp:'):
b_taint = b_taint.replace('nostamp:', 'nostamp(uuid4):')
output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint))
diff --git a/poky/bitbake/lib/bb/taskdata.py b/poky/bitbake/lib/bb/taskdata.py
index d13bd7c3b..8c25e09e8 100644
--- a/poky/bitbake/lib/bb/taskdata.py
+++ b/poky/bitbake/lib/bb/taskdata.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""
BitBake 'TaskData' implementation
diff --git a/poky/bitbake/lib/bb/tests/data.py b/poky/bitbake/lib/bb/tests/data.py
index 3cf5abec7..3e49984c9 100644
--- a/poky/bitbake/lib/bb/tests/data.py
+++ b/poky/bitbake/lib/bb/tests/data.py
@@ -381,6 +381,19 @@ class TestOverrides(unittest.TestCase):
self.d.setVar("OVERRIDES", "foo:bar:some_val")
self.assertEqual(self.d.getVar("TEST"), " testvalue5")
+ def test_append_and_override_1(self):
+ self.d.setVar("TEST_append", "testvalue2")
+ self.d.setVar("TEST_bar", "testvalue3")
+ self.assertEqual(self.d.getVar("TEST"), "testvalue3testvalue2")
+
+ def test_append_and_override_2(self):
+ self.d.setVar("TEST_append_bar", "testvalue2")
+ self.assertEqual(self.d.getVar("TEST"), "testvaluetestvalue2")
+
+ def test_append_and_override_3(self):
+ self.d.setVar("TEST_bar_append", "testvalue2")
+ self.assertEqual(self.d.getVar("TEST"), "testvalue2")
+
# Test an override with _<numeric> in it based on a real world OE issue
def test_underscore_override(self):
self.d.setVar("TARGET_ARCH", "x86_64")
@@ -466,7 +479,7 @@ class TaskHash(unittest.TestCase):
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, set(), "somefile")
bb.warn(str(lookupcache))
- return basehash["somefile." + taskname]
+ return basehash["somefile:" + taskname]
d = bb.data.init()
d.setVar("__BBTASKS", ["mytask"])
diff --git a/poky/bitbake/lib/bb/tests/event.py b/poky/bitbake/lib/bb/tests/event.py
index b6e40c6ae..9229b63d4 100644
--- a/poky/bitbake/lib/bb/tests/event.py
+++ b/poky/bitbake/lib/bb/tests/event.py
@@ -561,14 +561,6 @@ class EventClassesTest(unittest.TestCase):
self.assertEqual(event.fn(1), callback(1))
self.assertEqual(event.pid, EventClassesTest._worker_pid)
- def test_StampUpdate(self):
- targets = ["foo", "bar"]
- stampfns = [lambda:"foobar"]
- event = bb.event.StampUpdate(targets, stampfns)
- self.assertEqual(event.targets, targets)
- self.assertEqual(event.stampPrefix, stampfns)
- self.assertEqual(event.pid, EventClassesTest._worker_pid)
-
def test_BuildBase(self):
""" Test base class for bitbake build events """
name = "foo"
diff --git a/poky/bitbake/lib/bb/tests/fetch.py b/poky/bitbake/lib/bb/tests/fetch.py
index 16f975b13..a0b656b61 100644
--- a/poky/bitbake/lib/bb/tests/fetch.py
+++ b/poky/bitbake/lib/bb/tests/fetch.py
@@ -899,6 +899,7 @@ class FetcherNetworkTest(FetcherTest):
if os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1')):
self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1', 'bitbake')), msg='submodule of submodule missing')
+ @skipIfNoNetwork()
def test_git_submodule_dbus_broker(self):
# The following external repositories have show failures in fetch and unpack operations
# We want to avoid regressions!
@@ -916,6 +917,7 @@ class FetcherNetworkTest(FetcherTest):
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-sundry/config')), msg='Missing submodule config "subprojects/c-sundry"')
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-utf8/config')), msg='Missing submodule config "subprojects/c-utf8"')
+ @skipIfNoNetwork()
def test_git_submodule_CLI11(self):
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf"
fetcher = bb.fetch.Fetch([url], self.d)
@@ -929,6 +931,7 @@ class FetcherNetworkTest(FetcherTest):
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"')
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"')
+ @skipIfNoNetwork()
def test_git_submodule_update_CLI11(self):
""" Prevent regression on update detection not finding missing submodule, or modules without needed commits """
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714"
@@ -948,6 +951,7 @@ class FetcherNetworkTest(FetcherTest):
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"')
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"')
+ @skipIfNoNetwork()
def test_git_submodule_aktualizr(self):
url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=git;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
fetcher = bb.fetch.Fetch([url], self.d)
@@ -964,6 +968,7 @@ class FetcherNetworkTest(FetcherTest):
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/googletest/config')), msg='Missing submodule config "third_party/googletest/config"')
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/HdrHistogram_c/config')), msg='Missing submodule config "third_party/HdrHistogram_c/config"')
+ @skipIfNoNetwork()
def test_git_submodule_iotedge(self):
""" Prevent regression on deeply nested submodules not being checked out properly, even though they were fetched. """
@@ -1195,8 +1200,8 @@ class FetchLatestVersionTest(FetcherTest):
# packages with valid UPSTREAM_CHECK_URI and UPSTREAM_CHECK_REGEX
("cups", "http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2", "https://github.com/apple/cups/releases", "(?P<name>cups\-)(?P<pver>((\d+[\.\-_]*)+))\-source\.tar\.gz")
: "2.0.0",
- ("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://www.oracle.com/technetwork/products/berkeleydb/downloads/index-082944.html", "http://download.oracle.com/otn/berkeley-db/(?P<name>db-)(?P<pver>((\d+[\.\-_]*)+))\.tar\.gz")
- : "6.1.19",
+ ("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://ftp.debian.org/debian/pool/main/d/db5.3/", "(?P<name>db5\.3_)(?P<pver>\d+(\.\d+)+).+\.orig\.tar\.xz")
+ : "5.3.10",
}
@skipIfNoNetwork()
@@ -1903,3 +1908,83 @@ class GitShallowTest(FetcherTest):
dir = os.listdir(self.unpackdir + "/git/")
self.assertIn("fstests.doap", dir)
+
+class GitLfsTest(FetcherTest):
+ def setUp(self):
+ FetcherTest.setUp(self)
+
+ self.gitdir = os.path.join(self.tempdir, 'git')
+ self.srcdir = os.path.join(self.tempdir, 'gitsource')
+
+ self.d.setVar('WORKDIR', self.tempdir)
+ self.d.setVar('S', self.gitdir)
+ self.d.delVar('PREMIRRORS')
+ self.d.delVar('MIRRORS')
+
+ self.d.setVar('SRCREV', '${AUTOREV}')
+ self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}')
+
+ bb.utils.mkdirhier(self.srcdir)
+ self.git('init', cwd=self.srcdir)
+ with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
+ attrs.write('*.mp3 filter=lfs -text')
+ self.git(['add', '.gitattributes'], cwd=self.srcdir)
+ self.git(['commit', '-m', "attributes", '.gitattributes'], cwd=self.srcdir)
+
+ def git(self, cmd, cwd=None):
+ if isinstance(cmd, str):
+ cmd = 'git ' + cmd
+ else:
+ cmd = ['git'] + cmd
+ if cwd is None:
+ cwd = self.gitdir
+ return bb.process.run(cmd, cwd=cwd)[0]
+
+ def fetch(self, uri=None):
+ uris = self.d.getVar('SRC_URI').split()
+ uri = uris[0]
+ d = self.d
+
+ fetcher = bb.fetch2.Fetch(uris, d)
+ fetcher.download()
+ ud = fetcher.ud[uri]
+ return fetcher, ud
+
+ def test_lfs_enabled(self):
+ import shutil
+
+ uri = 'git://%s;protocol=file;subdir=${S};lfs=1' % self.srcdir
+ self.d.setVar('SRC_URI', uri)
+
+ fetcher, ud = self.fetch()
+ self.assertIsNotNone(ud.method._find_git_lfs)
+
+ # If git-lfs can be found, the unpack should be successful
+ ud.method._find_git_lfs = lambda d: True
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+
+ # If git-lfs cannot be found, the unpack should throw an error
+ with self.assertRaises(bb.fetch2.FetchError):
+ ud.method._find_git_lfs = lambda d: False
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+
+ def test_lfs_disabled(self):
+ import shutil
+
+ uri = 'git://%s;protocol=file;subdir=${S};lfs=0' % self.srcdir
+ self.d.setVar('SRC_URI', uri)
+
+ fetcher, ud = self.fetch()
+ self.assertIsNotNone(ud.method._find_git_lfs)
+
+ # If git-lfs can be found, the unpack should be successful
+ ud.method._find_git_lfs = lambda d: True
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
+
+ # If git-lfs cannot be found, the unpack should be successful
+ ud.method._find_git_lfs = lambda d: False
+ shutil.rmtree(self.gitdir, ignore_errors=True)
+ fetcher.unpack(self.d.getVar('WORKDIR'))
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass b/poky/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass
new file mode 100644
index 000000000..b57650d59
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/classes/base.bbclass
@@ -0,0 +1,262 @@
+SLOWTASKS ??= ""
+SSTATEVALID ??= ""
+
+def stamptask(d):
+ import time
+
+ thistask = d.expand("${PN}:${BB_CURRENTTASK}")
+ stampname = d.expand("${TOPDIR}/%s.run" % thistask)
+ with open(stampname, "a+") as f:
+ f.write(d.getVar("BB_UNIHASH") + "\n")
+
+ if d.getVar("BB_CURRENT_MC") != "default":
+ thistask = d.expand("${BB_CURRENT_MC}:${PN}:${BB_CURRENTTASK}")
+ if thistask in d.getVar("SLOWTASKS").split():
+ bb.note("Slowing task %s" % thistask)
+ time.sleep(0.5)
+ if d.getVar("BB_HASHSERVE"):
+ task = d.getVar("BB_CURRENTTASK")
+ if task in ['package', 'package_qa', 'packagedata', 'package_write_ipk', 'package_write_rpm', 'populate_lic', 'populate_sysroot']:
+ bb.parse.siggen.report_unihash(os.getcwd(), d.getVar("BB_CURRENTTASK"), d)
+
+ with open(d.expand("${TOPDIR}/task.log"), "a+") as f:
+ f.write(thistask + "\n")
+
+
+def sstate_output_hash(path, sigfile, task, d):
+ import hashlib
+ h = hashlib.sha256()
+ h.update(d.expand("${PN}:${BB_CURRENTTASK}").encode('utf-8'))
+ return h.hexdigest()
+
+python do_fetch() {
+ # fetch
+ stamptask(d)
+}
+python do_unpack() {
+ # unpack
+ stamptask(d)
+}
+python do_patch() {
+ # patch
+ stamptask(d)
+}
+python do_populate_lic() {
+ # populate_lic
+ stamptask(d)
+}
+python do_prepare_recipe_sysroot() {
+ # prepare_recipe_sysroot
+ stamptask(d)
+}
+python do_configure() {
+ # configure
+ stamptask(d)
+}
+python do_compile() {
+ # compile
+ stamptask(d)
+}
+python do_install() {
+ # install
+ stamptask(d)
+}
+python do_populate_sysroot() {
+ # populate_sysroot
+ stamptask(d)
+}
+python do_package() {
+ # package
+ stamptask(d)
+}
+python do_package_write_ipk() {
+ # package_write_ipk
+ stamptask(d)
+}
+python do_package_write_rpm() {
+ # package_write_rpm
+ stamptask(d)
+}
+python do_packagedata() {
+ # packagedata
+ stamptask(d)
+}
+python do_package_qa() {
+ # package_qa
+ stamptask(d)
+}
+python do_build() {
+ # build
+ stamptask(d)
+}
+do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
+do_package[deptask] += "do_packagedata"
+do_build[recrdeptask] += "do_deploy"
+do_build[recrdeptask] += "do_package_write_ipk"
+do_build[recrdeptask] += "do_package_write_rpm"
+do_package_qa[rdeptask] = "do_packagedata"
+do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
+
+DEBIANRDEP = "do_packagedata"
+oo_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
+do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
+
+addtask fetch
+addtask unpack after do_fetch
+addtask patch after do_unpack
+addtask prepare_recipe_sysroot after do_patch
+addtask configure after do_prepare_recipe_sysroot
+addtask compile after do_configure
+addtask install after do_compile
+addtask populate_sysroot after do_install
+addtask package after do_install
+addtask package_write_ipk after do_packagedata do_package
+addtask package_write_rpm after do_packagedata do_package
+addtask packagedata after do_package
+addtask package_qa after do_package
+addtask build after do_package_qa do_package_write_rpm do_package_write_ipk do_populate_sysroot
+
+python do_package_setscene() {
+ stamptask(d)
+}
+python do_package_qa_setscene() {
+ stamptask(d)
+}
+python do_package_write_ipk_setscene() {
+ stamptask(d)
+}
+python do_package_write_rpm_setscene() {
+ stamptask(d)
+}
+python do_packagedata_setscene() {
+ stamptask(d)
+}
+python do_populate_lic_setscene() {
+ stamptask(d)
+}
+python do_populate_sysroot_setscene() {
+ stamptask(d)
+}
+
+addtask package_setscene
+addtask package_qa_setscene
+addtask package_write_ipk_setscene
+addtask package_write_rpm_setscene
+addtask packagedata_setscene
+addtask populate_lic_setscene
+addtask populate_sysroot_setscene
+
+BB_SETSCENE_DEPVALID = "setscene_depvalid"
+
+def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
+ # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
+ # task is included in taskdependees too
+ # Return - False - We need this dependency
+ # - True - We can skip this dependency
+ import re
+
+ def logit(msg, log):
+ if log is not None:
+ log.append(msg)
+ else:
+ bb.debug(2, msg)
+
+ logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
+
+ def isNativeCross(x):
+ return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
+
+ # We only need to trigger populate_lic through direct dependencies
+ if taskdependees[task][1] == "do_populate_lic":
+ return True
+
+ # We only need to trigger packagedata through direct dependencies
+ # but need to preserve packagedata on packagedata links
+ if taskdependees[task][1] == "do_packagedata":
+ for dep in taskdependees:
+ if taskdependees[dep][1] == "do_packagedata":
+ return False
+ return True
+
+ for dep in taskdependees:
+ logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
+ if task == dep:
+ continue
+ if dep in notneeded:
+ continue
+ # do_package_write_* and do_package doesn't need do_package
+ if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
+ continue
+ # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_ipk', 'do_package_write_rpm']:
+ return False
+ # do_package/packagedata/package_qa don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
+ continue
+ # Native/Cross packages don't exist and are noexec anyway
+ if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
+ continue
+
+ # This is due to the [depends] in useradd.bbclass complicating matters
+ # The logic *is* reversed here due to the way hard setscene dependencies are injected
+ if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
+ continue
+
+ # Consider sysroot depending on sysroot tasks
+ if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
+ # Native/Cross populate_sysroot need their dependencies
+ if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
+ return False
+ # Target populate_sysroot depended on by cross tools need to be installed
+ if isNativeCross(taskdependees[dep][0]):
+ return False
+ # Native/cross tools depended upon by target sysroot are not needed
+ # Add an exception for shadow-native as required by useradd.bbclass
+ if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
+ continue
+ # Target populate_sysroot need their dependencies
+ return False
+
+
+ if taskdependees[dep][1] == "do_populate_lic":
+ continue
+
+ # Safe fallthrough default
+ logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
+ return False
+ return True
+
+BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
+
+def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, **kwargs):
+
+ found = set()
+ missed = set()
+
+ valid = d.getVar("SSTATEVALID").split()
+
+ for tid in sorted(sq_data['hash']):
+ n = os.path.basename(bb.runqueue.fn_from_tid(tid)).split(".")[0] + ":do_" + bb.runqueue.taskname_from_tid(tid)[3:]
+ print(n)
+ stampfile = d.expand("${TOPDIR}/%s.run" % n.replace("do_", ""))
+ if n in valid:
+ bb.note("SState: Found valid sstate for %s" % n)
+ found.add(tid)
+ elif n + ":" + sq_data['hash'][tid] in valid:
+ bb.note("SState: Found valid sstate for %s" % n)
+ found.add(tid)
+ elif os.path.exists(stampfile):
+ with open(stampfile, "r") as f:
+ hash = f.readline().strip()
+ if hash == sq_data['hash'][tid]:
+ bb.note("SState: Found valid sstate for %s (already run)" % n)
+ found.add(tid)
+ else:
+ bb.note("SState: sstate hash didn't match previous run for %s (%s vs %s)" % (n, sq_data['hash'][tid], hash))
+ missed.add(tid)
+ else:
+ missed.add(tid)
+ bb.note("SState: Found no valid sstate for %s (%s)" % (n, sq_data['hash'][tid]))
+
+ return found
+
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass b/poky/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass
new file mode 100644
index 000000000..da9ff1106
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/classes/image.bbclass
@@ -0,0 +1,5 @@
+do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
+do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
+do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa
+do_rootfs[recrdeptask] += "do_packagedata"
+do_rootfs[recrdeptask] += "do_populate_lic"
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass b/poky/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass
new file mode 100644
index 000000000..7eaaee54a
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/classes/native.bbclass
@@ -0,0 +1,2 @@
+RECIPERDEPTASK = "do_populate_sysroot"
+do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf b/poky/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf
new file mode 100644
index 000000000..5e451fc2c
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/conf/bitbake.conf
@@ -0,0 +1,16 @@
+CACHE = "${TOPDIR}/cache"
+THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
+COREBASE := "${@os.path.normpath(os.path.dirname(d.getVar('FILE')+'/../../'))}"
+BBFILES = "${COREBASE}/recipes/*.bb"
+PROVIDES = "${PN}"
+PN = "${@bb.parse.vars_from_file(d.getVar('FILE', False),d)[0]}"
+PF = "${BB_CURRENT_MC}:${PN}"
+export PATH
+TMPDIR ??= "${TOPDIR}"
+STAMP = "${TMPDIR}/stamps/${PN}"
+T = "${TMPDIR}/workdir/${PN}/temp"
+BB_NUMBER_THREADS = "4"
+
+BB_HASHBASE_WHITELIST = "BB_CURRENT_MC BB_HASHSERVE TMPDIR TOPDIR SLOWTASKS SSTATEVALID FILE"
+
+include conf/multiconfig/${BB_CURRENT_MC}.conf
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc1.conf b/poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc1.conf
new file mode 100644
index 000000000..ecf23e1c7
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc1.conf
@@ -0,0 +1 @@
+TMPDIR = "${TOPDIR}/mc1/"
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc2.conf b/poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc2.conf
new file mode 100644
index 000000000..eef338e4c
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/conf/multiconfig/mc2.conf
@@ -0,0 +1 @@
+TMPDIR = "${TOPDIR}/mc2/"
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/a1.bb b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/a1.bb
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/a1.bb
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb
new file mode 100644
index 000000000..c0b288e5b
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/b1.bb
@@ -0,0 +1 @@
+DEPENDS = "a1" \ No newline at end of file
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/c1.bb b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/c1.bb
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/c1.bb
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb
new file mode 100644
index 000000000..5ba197515
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/d1.bb
@@ -0,0 +1,3 @@
+DEPENDS = "a1"
+
+do_package_setscene[depends] = "a1:do_populate_sysroot_setscene"
diff --git a/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb
new file mode 100644
index 000000000..1588bc8a5
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue-tests/recipes/e1.bb
@@ -0,0 +1 @@
+DEPENDS = "b1" \ No newline at end of file
diff --git a/poky/bitbake/lib/bb/tests/runqueue.py b/poky/bitbake/lib/bb/tests/runqueue.py
new file mode 100644
index 000000000..5e6439156
--- /dev/null
+++ b/poky/bitbake/lib/bb/tests/runqueue.py
@@ -0,0 +1,323 @@
+#
+# BitBake Tests for runqueue task processing
+#
+# Copyright (C) 2019 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import unittest
+import bb
+import os
+import tempfile
+import subprocess
+import sys
+import time
+
+#
+# TODO:
+# Add tests on task ordering (X happens before Y after Z)
+#
+
+class RunQueueTests(unittest.TestCase):
+
+ alltasks = ['package', 'fetch', 'unpack', 'patch', 'prepare_recipe_sysroot', 'configure',
+ 'compile', 'install', 'packagedata', 'package_qa', 'package_write_rpm', 'package_write_ipk',
+ 'populate_sysroot', 'build']
+ a1_sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot"
+ b1_sstatevalid = "b1:do_package b1:do_package_qa b1:do_packagedata b1:do_package_write_ipk b1:do_package_write_rpm b1:do_populate_lic b1:do_populate_sysroot"
+
+ def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False):
+ env = os.environ.copy()
+ env["BBPATH"] = os.path.realpath(os.path.join(os.path.dirname(__file__), "runqueue-tests"))
+ env["BB_ENV_EXTRAWHITE"] = "SSTATEVALID SLOWTASKS"
+ env["SSTATEVALID"] = sstatevalid
+ env["SLOWTASKS"] = slowtasks
+ if extraenv:
+ for k in extraenv:
+ env[k] = extraenv[k]
+ env["BB_ENV_EXTRAWHITE"] = env["BB_ENV_EXTRAWHITE"] + " " + k
+ try:
+ output = subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,universal_newlines=True, cwd=builddir)
+ print(output)
+ except subprocess.CalledProcessError as e:
+ self.fail("Command %s failed with %s" % (cmd, e.output))
+ tasks = []
+ tasklog = builddir + "/task.log"
+ if os.path.exists(tasklog):
+ with open(tasklog, "r") as f:
+ tasks = [line.rstrip() for line in f]
+ if cleanup:
+ os.remove(tasklog)
+ return tasks
+
+ def test_no_setscenevalid(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:' + x for x in self.alltasks]
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_single_setscenevalid(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ sstatevalid = "a1:do_package"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
+ 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot', 'a1:build']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_intermediate_setscenevalid(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ sstatevalid = "a1:do_package a1:do_populate_sysroot"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_setscene', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot_setscene', 'a1:build']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_intermediate_notcovered(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ sstatevalid = "a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
+ 'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_all_setscenevalid(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ sstatevalid = self.a1_sstatevalid
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
+ 'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_no_settasks(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1", "-c", "patch"]
+ sstatevalid = self.a1_sstatevalid
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:fetch', 'a1:unpack', 'a1:patch']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_mix_covered_notcovered(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1:do_patch", "a1:do_populate_sysroot"]
+ sstatevalid = self.a1_sstatevalid
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:fetch', 'a1:unpack', 'a1:patch', 'a1:populate_sysroot_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+
+ # Test targets with intermediate setscene tasks alongside a target with no intermediate setscene tasks
+ def test_mixed_direct_tasks_setscene_tasks(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "c1:do_patch", "a1"]
+ sstatevalid = self.a1_sstatevalid
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['c1:fetch', 'c1:unpack', 'c1:patch', 'a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
+ 'a1:package_qa_setscene', 'a1:build', 'a1:populate_sysroot_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ # This test slows down the execution of do_package_setscene until after other real tasks have
+ # started running which tests for a bug where tasks were being lost from the buildable list of real
+ # tasks if they weren't in tasks_covered or tasks_notcovered
+ def test_slow_setscene(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ sstatevalid = "a1:do_package"
+ slowtasks = "a1:package_setscene"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, slowtasks)
+ expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
+ 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_qa', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot', 'a1:build']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_setscenewhitelist(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "a1"]
+ extraenv = {
+ "BB_SETSCENE_ENFORCE" : "1",
+ "BB_SETSCENE_ENFORCE_WHITELIST" : "a1:do_package_write_rpm a1:do_build"
+ }
+ sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_populate_lic a1:do_populate_sysroot"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
+ expected = ['a1:packagedata_setscene', 'a1:package_qa_setscene', 'a1:package_write_ipk_setscene',
+ 'a1:populate_sysroot_setscene', 'a1:package_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ # Tests for problems with dependencies between setscene tasks
+ def test_no_setscenevalid_harddeps(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "d1"]
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
+ 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot', 'd1:package', 'd1:fetch', 'd1:unpack', 'd1:patch', 'd1:prepare_recipe_sysroot', 'd1:configure',
+ 'd1:compile', 'd1:install', 'd1:packagedata', 'd1:package_qa', 'd1:package_write_rpm', 'd1:package_write_ipk',
+ 'd1:populate_sysroot', 'd1:build']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_no_setscenevalid_withdeps(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "b1"]
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks]
+ expected.remove('a1:build')
+ expected.remove('a1:package_qa')
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_single_a1_setscenevalid_withdeps(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "b1"]
+ sstatevalid = "a1:do_package"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_setscene', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
+ 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot'] + ['b1:' + x for x in self.alltasks]
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_single_b1_setscenevalid_withdeps(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "b1"]
+ sstatevalid = "b1:do_package"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package', 'a1:fetch', 'a1:unpack', 'a1:patch', 'a1:prepare_recipe_sysroot', 'a1:configure',
+ 'a1:compile', 'a1:install', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot', 'b1:package_setscene'] + ['b1:' + x for x in self.alltasks]
+ expected.remove('b1:package')
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_intermediate_setscenevalid_withdeps(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "b1"]
+ sstatevalid = "a1:do_package a1:do_populate_sysroot b1:do_package"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_setscene', 'a1:packagedata', 'a1:package_write_rpm', 'a1:package_write_ipk',
+ 'a1:populate_sysroot_setscene', 'b1:package_setscene'] + ['b1:' + x for x in self.alltasks]
+ expected.remove('b1:package')
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_all_setscenevalid_withdeps(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ cmd = ["bitbake", "b1"]
+ sstatevalid = self.a1_sstatevalid + " " + self.b1_sstatevalid
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid)
+ expected = ['a1:package_write_ipk_setscene', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
+ 'b1:build', 'a1:populate_sysroot_setscene', 'b1:package_write_ipk_setscene', 'b1:package_write_rpm_setscene',
+ 'b1:packagedata_setscene', 'b1:package_qa_setscene', 'b1:populate_sysroot_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ def test_multiconfig_setscene_optimise(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ extraenv = {
+ "BBMULTICONFIG" : "mc1 mc2",
+ "BB_SIGNATURE_HANDLER" : "basic"
+ }
+ cmd = ["bitbake", "b1", "mc:mc1:b1", "mc:mc2:b1"]
+ setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
+ 'populate_sysroot_setscene', 'package_qa_setscene']
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
+ expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + \
+ ['mc1:b1:' + x for x in setscenetasks] + ['mc1:a1:' + x for x in setscenetasks] + \
+ ['mc2:b1:' + x for x in setscenetasks] + ['mc2:a1:' + x for x in setscenetasks] + \
+ ['mc1:b1:build', 'mc2:b1:build']
+ for x in ['mc1:a1:package_qa_setscene', 'mc2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
+ expected.remove(x)
+ self.assertEqual(set(tasks), set(expected))
+
+
+ @unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
+ def test_hashserv_single(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ extraenv = {
+ "BB_HASHSERVE" : "auto",
+ "BB_SIGNATURE_HANDLER" : "TestEquivHash"
+ }
+ cmd = ["bitbake", "a1", "b1"]
+ setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
+ 'populate_sysroot_setscene', 'package_qa_setscene']
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks]
+ self.assertEqual(set(tasks), set(expected))
+ cmd = ["bitbake", "a1", "-c", "install", "-f"]
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ expected = ['a1:install']
+ self.assertEqual(set(tasks), set(expected))
+ cmd = ["bitbake", "a1", "b1"]
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ expected = ['a1:populate_sysroot', 'a1:package', 'a1:package_write_rpm_setscene', 'a1:packagedata_setscene',
+ 'a1:package_write_ipk_setscene', 'a1:package_qa_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ self.shutdown(tempdir)
+
+ @unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
+ def test_hashserv_double(self):
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ extraenv = {
+ "BB_HASHSERVE" : "auto",
+ "BB_SIGNATURE_HANDLER" : "TestEquivHash"
+ }
+ cmd = ["bitbake", "a1", "b1", "e1"]
+ setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
+ 'populate_sysroot_setscene', 'package_qa_setscene']
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + ['e1:' + x for x in self.alltasks]
+ self.assertEqual(set(tasks), set(expected))
+ cmd = ["bitbake", "a1", "b1", "-c", "install", "-fn"]
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ cmd = ["bitbake", "e1"]
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ expected = ['a1:package', 'a1:install', 'b1:package', 'b1:install', 'a1:populate_sysroot', 'b1:populate_sysroot',
+ 'a1:package_write_ipk_setscene', 'b1:packagedata_setscene', 'b1:package_write_rpm_setscene',
+ 'a1:package_write_rpm_setscene', 'b1:package_write_ipk_setscene', 'a1:packagedata_setscene']
+ self.assertEqual(set(tasks), set(expected))
+
+ self.shutdown(tempdir)
+
+ @unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
+ def test_hashserv_multiple_setscene(self):
+ # Runs e1:do_package_setscene twice
+ with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
+ extraenv = {
+ "BB_HASHSERVE" : "auto",
+ "BB_SIGNATURE_HANDLER" : "TestEquivHash"
+ }
+ cmd = ["bitbake", "a1", "b1", "e1"]
+ setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
+ 'populate_sysroot_setscene', 'package_qa_setscene']
+ sstatevalid = ""
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + ['e1:' + x for x in self.alltasks]
+ self.assertEqual(set(tasks), set(expected))
+ cmd = ["bitbake", "a1", "b1", "-c", "install", "-fn"]
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True)
+ cmd = ["bitbake", "e1"]
+ sstatevalid = "e1:do_package"
+ tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv, cleanup=True, slowtasks="a1:populate_sysroot b1:populate_sysroot")
+ expected = ['a1:package', 'a1:install', 'b1:package', 'b1:install', 'a1:populate_sysroot', 'b1:populate_sysroot',
+ 'a1:package_write_ipk_setscene', 'b1:packagedata_setscene', 'b1:package_write_rpm_setscene',
+ 'a1:package_write_rpm_setscene', 'b1:package_write_ipk_setscene', 'a1:packagedata_setscene',
+ 'e1:package_setscene']
+ self.assertEqual(set(tasks), set(expected))
+ for i in expected:
+ self.assertEqual(tasks.count(i), 1, "%s not in task list once" % i)
+
+ self.shutdown(tempdir)
+
+ def shutdown(self, tempdir):
+ # Wait for the hashserve socket to disappear else we'll see races with the tempdir cleanup
+ while os.path.exists(tempdir + "/hashserve.sock"):
+ time.sleep(0.5)
+
+
diff --git a/poky/bitbake/lib/bb/ui/buildinfohelper.py b/poky/bitbake/lib/bb/ui/buildinfohelper.py
index f2151c2d4..5cbca97f3 100644
--- a/poky/bitbake/lib/bb/ui/buildinfohelper.py
+++ b/poky/bitbake/lib/bb/ui/buildinfohelper.py
@@ -646,6 +646,9 @@ class ORMWrapper(object):
Target_Installed_Package.objects.create(target = target_obj, package = packagedict[p]['object'])
packagedeps_objs = []
+ pattern_so = re.compile(r'.*\.so(\.\d*)?$')
+ pattern_lib = re.compile(r'.*\-suffix(\d*)?$')
+ pattern_ko = re.compile(r'^kernel-module-.*')
for p in packagedict:
for (px,deptype) in packagedict[p]['depends']:
if deptype == 'depends':
@@ -654,6 +657,13 @@ class ORMWrapper(object):
tdeptype = Package_Dependency.TYPE_TRECOMMENDS
try:
+ # Skip known non-package objects like libraries and kernel modules
+ if pattern_so.match(px) or pattern_lib.match(px):
+ logger.info("Toaster does not add library file dependencies to packages (%s,%s)", p, px)
+ continue
+ if pattern_ko.match(px):
+ logger.info("Toaster does not add kernel module dependencies to packages (%s,%s)", p, px)
+ continue
packagedeps_objs.append(Package_Dependency(
package = packagedict[p]['object'],
depends_on = packagedict[px]['object'],
diff --git a/poky/bitbake/lib/bb/ui/knotty.py b/poky/bitbake/lib/bb/ui/knotty.py
index 88f638fb3..35736ade0 100644
--- a/poky/bitbake/lib/bb/ui/knotty.py
+++ b/poky/bitbake/lib/bb/ui/knotty.py
@@ -660,7 +660,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
# ignore
if isinstance(event, (bb.event.BuildBase,
bb.event.MetadataEvent,
- bb.event.StampUpdate,
bb.event.ConfigParsed,
bb.event.MultiConfigParsed,
bb.event.RecipeParsed,
@@ -690,17 +689,27 @@ def main(server, eventHandler, params, tf = TerminalFilter):
if params.observe_only:
print("\nKeyboard Interrupt, exiting observer...")
main.shutdown = 2
- if not params.observe_only and main.shutdown == 1:
+
+ def state_force_shutdown():
print("\nSecond Keyboard Interrupt, stopping...\n")
_, error = server.runCommand(["stateForceShutdown"])
if error:
logger.error("Unable to cleanly stop: %s" % error)
+
+ if not params.observe_only and main.shutdown == 1:
+ state_force_shutdown()
+
if not params.observe_only and main.shutdown == 0:
print("\nKeyboard Interrupt, closing down...\n")
interrupted = True
- _, error = server.runCommand(["stateShutdown"])
- if error:
- logger.error("Unable to cleanly shutdown: %s" % error)
+ # Capture the second KeyboardInterrupt during stateShutdown is running
+ try:
+ _, error = server.runCommand(["stateShutdown"])
+ if error:
+ logger.error("Unable to cleanly shutdown: %s" % error)
+ except KeyboardInterrupt:
+ state_force_shutdown()
+
main.shutdown = main.shutdown + 1
pass
except Exception as e:
diff --git a/poky/bitbake/lib/bb/ui/uihelper.py b/poky/bitbake/lib/bb/ui/uihelper.py
index db7f0ca08..c8dd7df08 100644
--- a/poky/bitbake/lib/bb/ui/uihelper.py
+++ b/poky/bitbake/lib/bb/ui/uihelper.py
@@ -40,7 +40,7 @@ class BBUIHelper:
self.running_pids.remove(event.pid)
self.failed_tasks.append( { 'title' : "%s %s" % (event._package, event._task)})
self.needUpdate = True
- elif isinstance(event, bb.runqueue.runQueueTaskStarted) or isinstance(event, bb.runqueue.sceneQueueTaskStarted):
+ elif isinstance(event, bb.runqueue.runQueueTaskStarted):
self.tasknumber_current = event.stats.completed + event.stats.active + event.stats.failed + 1
self.tasknumber_total = event.stats.total
self.needUpdate = True
diff --git a/poky/bitbake/lib/bb/utils.py b/poky/bitbake/lib/bb/utils.py
index ed19825fa..d035949b3 100644
--- a/poky/bitbake/lib/bb/utils.py
+++ b/poky/bitbake/lib/bb/utils.py
@@ -394,7 +394,7 @@ def better_exec(code, context, text = None, realfile = "<code>", pythonexception
code = better_compile(code, realfile, realfile)
try:
exec(code, get_context(), context)
- except (bb.BBHandledException, bb.parse.SkipRecipe, bb.build.FuncFailed, bb.data_smart.ExpansionError):
+ except (bb.BBHandledException, bb.parse.SkipRecipe, bb.data_smart.ExpansionError):
# Error already shown so passthrough, no need for traceback
raise
except Exception as e:
@@ -677,7 +677,7 @@ def _check_unsafe_delete_path(path):
return True
return False
-def remove(path, recurse=False):
+def remove(path, recurse=False, ionice=False):
"""Equivalent to rm -f or rm -rf"""
if not path:
return
@@ -686,7 +686,10 @@ def remove(path, recurse=False):
if _check_unsafe_delete_path(path):
raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
# shutil.rmtree(name) would be ideal but its too slow
- subprocess.check_call(['rm', '-rf'] + glob.glob(path))
+ cmd = []
+ if ionice:
+ cmd = ['ionice', '-c', '3']
+ subprocess.check_call(cmd + ['rm', '-rf'] + glob.glob(path))
return
for name in glob.glob(path):
try:
@@ -695,12 +698,12 @@ def remove(path, recurse=False):
if exc.errno != errno.ENOENT:
raise
-def prunedir(topdir):
+def prunedir(topdir, ionice=False):
# Delete everything reachable from the directory named in 'topdir'.
# CAUTION: This is dangerous!
if _check_unsafe_delete_path(topdir):
raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir)
- remove(topdir, recurse=True)
+ remove(topdir, recurse=True, ionice=ionice)
#
# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
@@ -780,7 +783,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
os.rename(src, destpath)
renamefailed = 0
except Exception as e:
- if e[0] != errno.EXDEV:
+ if e.errno != errno.EXDEV:
# Some random error.
print("movefile: Failed to move", src, "to", dest, e)
return None
diff --git a/poky/bitbake/lib/bblayers/query.py b/poky/bitbake/lib/bblayers/query.py
index 993589de9..7db49c8e2 100644
--- a/poky/bitbake/lib/bblayers/query.py
+++ b/poky/bitbake/lib/bblayers/query.py
@@ -46,7 +46,7 @@ layer, with the preferred version first. Note that skipped recipes that
are overlayed will also be listed, with a " (skipped)" suffix.
"""
- items_listed = self.list_recipes('Overlayed recipes', None, True, args.same_version, args.filenames, True, None)
+ items_listed = self.list_recipes('Overlayed recipes', None, True, args.same_version, args.filenames, False, True, None, False, None)
# Check for overlayed .bbclass files
classes = collections.defaultdict(list)
@@ -112,9 +112,9 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
title = 'Matching recipes:'
else:
title = 'Available recipes:'
- self.list_recipes(title, args.pnspec, False, False, args.filenames, args.multiple, inheritlist)
+ self.list_recipes(title, args.pnspec, False, False, args.filenames, args.recipes_only, args.multiple, args.layer, args.bare, inheritlist)
- def list_recipes(self, title, pnspec, show_overlayed_only, show_same_ver_only, show_filenames, show_multi_provider_only, inherits):
+ def list_recipes(self, title, pnspec, show_overlayed_only, show_same_ver_only, show_filenames, show_recipes_only, show_multi_provider_only, selected_layer, bare, inherits):
if inherits:
bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
for classname in inherits:
@@ -144,24 +144,30 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
preferred_versions[p] = (ver, fn)
def print_item(f, pn, ver, layer, ispref):
- if f in skiplist:
- skipped = ' (skipped)'
- else:
- skipped = ''
- if show_filenames:
- if ispref:
- logger.plain("%s%s", f, skipped)
+ if not selected_layer or layer == selected_layer:
+ if not bare and f in skiplist:
+ skipped = ' (skipped)'
else:
- logger.plain(" %s%s", f, skipped)
- else:
- if ispref:
- logger.plain("%s:", pn)
- logger.plain(" %s %s%s", layer.ljust(20), ver, skipped)
+ skipped = ''
+ if show_filenames:
+ if ispref:
+ logger.plain("%s%s", f, skipped)
+ else:
+ logger.plain(" %s%s", f, skipped)
+ elif show_recipes_only:
+ if pn not in show_unique_pn:
+ show_unique_pn.append(pn)
+ logger.plain("%s%s", pn, skipped)
+ else:
+ if ispref:
+ logger.plain("%s:", pn)
+ logger.plain(" %s %s%s", layer.ljust(20), ver, skipped)
global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split()
cls_re = re.compile('classes/')
preffiles = []
+ show_unique_pn = []
items_listed = False
for p in sorted(pkg_pn):
if pnspec:
@@ -493,8 +499,11 @@ NOTE: .bbappend files can impact the dependencies.
parser_show_recipes = self.add_command(sp, 'show-recipes', self.do_show_recipes)
parser_show_recipes.add_argument('-f', '--filenames', help='instead of the default formatting, list filenames of higher priority recipes with the ones they overlay indented underneath', action='store_true')
+ parser_show_recipes.add_argument('-r', '--recipes-only', help='instead of the default formatting, list recipes only', action='store_true')
parser_show_recipes.add_argument('-m', '--multiple', help='only list where multiple recipes (in the same layer or different layers) exist for the same recipe name', action='store_true')
parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class(es) - separate multiple classes using , (without spaces)', metavar='CLASS', default='')
+ parser_show_recipes.add_argument('-l', '--layer', help='only list recipes from the selected layer', default='')
+ parser_show_recipes.add_argument('-b', '--bare', help='output just names without the "(skipped)" marker', action='store_true')
parser_show_recipes.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends)
diff --git a/poky/bitbake/lib/hashserv/__init__.py b/poky/bitbake/lib/hashserv/__init__.py
index fdc9ced9f..c3318620f 100644
--- a/poky/bitbake/lib/hashserv/__init__.py
+++ b/poky/bitbake/lib/hashserv/__init__.py
@@ -1,126 +1,25 @@
-# Copyright (C) 2018 Garmin Ltd.
+# Copyright (C) 2018-2019 Garmin Ltd.
#
# SPDX-License-Identifier: GPL-2.0-only
#
-from http.server import BaseHTTPRequestHandler, HTTPServer
-import contextlib
-import urllib.parse
+from contextlib import closing
+import re
import sqlite3
-import json
-import traceback
-import logging
-from datetime import datetime
-
-logger = logging.getLogger('hashserv')
-
-class HashEquivalenceServer(BaseHTTPRequestHandler):
- def log_message(self, f, *args):
- logger.debug(f, *args)
-
- def do_GET(self):
- try:
- p = urllib.parse.urlparse(self.path)
-
- if p.path != self.prefix + '/v1/equivalent':
- self.send_error(404)
- return
-
- query = urllib.parse.parse_qs(p.query, strict_parsing=True)
- method = query['method'][0]
- taskhash = query['taskhash'][0]
-
- d = None
- with contextlib.closing(self.db.cursor()) as cursor:
- cursor.execute('SELECT taskhash, method, unihash FROM tasks_v1 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1',
- {'method': method, 'taskhash': taskhash})
-
- row = cursor.fetchone()
-
- if row is not None:
- logger.debug('Found equivalent task %s', row['taskhash'])
- d = {k: row[k] for k in ('taskhash', 'method', 'unihash')}
-
- self.send_response(200)
- self.send_header('Content-Type', 'application/json; charset=utf-8')
- self.end_headers()
- self.wfile.write(json.dumps(d).encode('utf-8'))
- except:
- logger.exception('Error in GET')
- self.send_error(400, explain=traceback.format_exc())
- return
-
- def do_POST(self):
- try:
- p = urllib.parse.urlparse(self.path)
-
- if p.path != self.prefix + '/v1/equivalent':
- self.send_error(404)
- return
-
- length = int(self.headers['content-length'])
- data = json.loads(self.rfile.read(length).decode('utf-8'))
-
- with contextlib.closing(self.db.cursor()) as cursor:
- cursor.execute('''
- SELECT taskhash, method, unihash FROM tasks_v1 WHERE method=:method AND outhash=:outhash
- ORDER BY CASE WHEN taskhash=:taskhash THEN 1 ELSE 2 END,
- created ASC
- LIMIT 1
- ''', {k: data[k] for k in ('method', 'outhash', 'taskhash')})
-
- row = cursor.fetchone()
-
- if row is None or row['taskhash'] != data['taskhash']:
- unihash = data['unihash']
- if row is not None:
- unihash = row['unihash']
-
- insert_data = {
- 'method': data['method'],
- 'outhash': data['outhash'],
- 'taskhash': data['taskhash'],
- 'unihash': unihash,
- 'created': datetime.now()
- }
-
- for k in ('owner', 'PN', 'PV', 'PR', 'task', 'outhash_siginfo'):
- if k in data:
- insert_data[k] = data[k]
-
- cursor.execute('''INSERT INTO tasks_v1 (%s) VALUES (%s)''' % (
- ', '.join(sorted(insert_data.keys())),
- ', '.join(':' + k for k in sorted(insert_data.keys()))),
- insert_data)
-
- logger.info('Adding taskhash %s with unihash %s', data['taskhash'], unihash)
- cursor.execute('SELECT taskhash, method, unihash FROM tasks_v1 WHERE id=:id', {'id': cursor.lastrowid})
- row = cursor.fetchone()
-
- self.db.commit()
-
- d = {k: row[k] for k in ('taskhash', 'method', 'unihash')}
-
- self.send_response(200)
- self.send_header('Content-Type', 'application/json; charset=utf-8')
- self.end_headers()
- self.wfile.write(json.dumps(d).encode('utf-8'))
- except:
- logger.exception('Error in POST')
- self.send_error(400, explain=traceback.format_exc())
- return
-
-def create_server(addr, db, prefix=''):
- class Handler(HashEquivalenceServer):
- pass
-
- Handler.prefix = prefix
- Handler.db = db
+
+UNIX_PREFIX = "unix://"
+
+ADDR_TYPE_UNIX = 0
+ADDR_TYPE_TCP = 1
+
+
+def setup_database(database, sync=True):
+ db = sqlite3.connect(database)
db.row_factory = sqlite3.Row
- with contextlib.closing(db.cursor()) as cursor:
+ with closing(db.cursor()) as cursor:
cursor.execute('''
- CREATE TABLE IF NOT EXISTS tasks_v1 (
+ CREATE TABLE IF NOT EXISTS tasks_v2 (
id INTEGER PRIMARY KEY AUTOINCREMENT,
method TEXT NOT NULL,
outhash TEXT NOT NULL,
@@ -134,9 +33,61 @@ def create_server(addr, db, prefix=''):
PV TEXT,
PR TEXT,
task TEXT,
- outhash_siginfo TEXT
+ outhash_siginfo TEXT,
+
+ UNIQUE(method, outhash, taskhash)
)
''')
+ cursor.execute('PRAGMA journal_mode = WAL')
+ cursor.execute('PRAGMA synchronous = %s' % ('NORMAL' if sync else 'OFF'))
+
+ # Drop old indexes
+ cursor.execute('DROP INDEX IF EXISTS taskhash_lookup')
+ cursor.execute('DROP INDEX IF EXISTS outhash_lookup')
+
+ # Create new indexes
+ cursor.execute('CREATE INDEX IF NOT EXISTS taskhash_lookup_v2 ON tasks_v2 (method, taskhash, created)')
+ cursor.execute('CREATE INDEX IF NOT EXISTS outhash_lookup_v2 ON tasks_v2 (method, outhash)')
+
+ return db
+
+
+def parse_address(addr):
+ if addr.startswith(UNIX_PREFIX):
+ return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX):],))
+ else:
+ m = re.match(r'\[(?P<host>[^\]]*)\]:(?P<port>\d+)$', addr)
+ if m is not None:
+ host = m.group('host')
+ port = m.group('port')
+ else:
+ host, port = addr.split(':')
+
+ return (ADDR_TYPE_TCP, (host, int(port)))
+
+
+def create_server(addr, dbname, *, sync=True):
+ from . import server
+ db = setup_database(dbname, sync=sync)
+ s = server.Server(db)
+
+ (typ, a) = parse_address(addr)
+ if typ == ADDR_TYPE_UNIX:
+ s.start_unix_server(*a)
+ else:
+ s.start_tcp_server(*a)
+
+ return s
+
+
+def create_client(addr):
+ from . import client
+ c = client.Client()
+
+ (typ, a) = parse_address(addr)
+ if typ == ADDR_TYPE_UNIX:
+ c.connect_unix(*a)
+ else:
+ c.connect_tcp(*a)
- logger.info('Starting server on %s', addr)
- return HTTPServer(addr, Handler)
+ return c
diff --git a/poky/bitbake/lib/hashserv/client.py b/poky/bitbake/lib/hashserv/client.py
new file mode 100644
index 000000000..f65956617
--- /dev/null
+++ b/poky/bitbake/lib/hashserv/client.py
@@ -0,0 +1,157 @@
+# Copyright (C) 2019 Garmin Ltd.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from contextlib import closing
+import json
+import logging
+import socket
+import os
+
+
+logger = logging.getLogger('hashserv.client')
+
+
+class HashConnectionError(Exception):
+ pass
+
+
+class Client(object):
+ MODE_NORMAL = 0
+ MODE_GET_STREAM = 1
+
+ def __init__(self):
+ self._socket = None
+ self.reader = None
+ self.writer = None
+ self.mode = self.MODE_NORMAL
+
+ def connect_tcp(self, address, port):
+ def connect_sock():
+ s = socket.create_connection((address, port))
+
+ s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
+ s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ return s
+
+ self._connect_sock = connect_sock
+
+ def connect_unix(self, path):
+ def connect_sock():
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ # AF_UNIX has path length issues so chdir here to workaround
+ cwd = os.getcwd()
+ try:
+ os.chdir(os.path.dirname(path))
+ s.connect(os.path.basename(path))
+ finally:
+ os.chdir(cwd)
+ return s
+
+ self._connect_sock = connect_sock
+
+ def connect(self):
+ if self._socket is None:
+ self._socket = self._connect_sock()
+
+ self.reader = self._socket.makefile('r', encoding='utf-8')
+ self.writer = self._socket.makefile('w', encoding='utf-8')
+
+ self.writer.write('OEHASHEQUIV 1.0\n\n')
+ self.writer.flush()
+
+ # Restore mode if the socket is being re-created
+ cur_mode = self.mode
+ self.mode = self.MODE_NORMAL
+ self._set_mode(cur_mode)
+
+ return self._socket
+
+ def close(self):
+ if self._socket is not None:
+ self._socket.close()
+ self._socket = None
+ self.reader = None
+ self.writer = None
+
+ def _send_wrapper(self, proc):
+ count = 0
+ while True:
+ try:
+ self.connect()
+ return proc()
+ except (OSError, HashConnectionError, json.JSONDecodeError, UnicodeDecodeError) as e:
+ logger.warning('Error talking to server: %s' % e)
+ if count >= 3:
+ if not isinstance(e, HashConnectionError):
+ raise HashConnectionError(str(e))
+ raise e
+ self.close()
+ count += 1
+
+ def send_message(self, msg):
+ def proc():
+ self.writer.write('%s\n' % json.dumps(msg))
+ self.writer.flush()
+
+ l = self.reader.readline()
+ if not l:
+ raise HashConnectionError('Connection closed')
+
+ if not l.endswith('\n'):
+ raise HashConnectionError('Bad message %r' % message)
+
+ return json.loads(l)
+
+ return self._send_wrapper(proc)
+
+ def send_stream(self, msg):
+ def proc():
+ self.writer.write("%s\n" % msg)
+ self.writer.flush()
+ l = self.reader.readline()
+ if not l:
+ raise HashConnectionError('Connection closed')
+ return l.rstrip()
+
+ return self._send_wrapper(proc)
+
+ def _set_mode(self, new_mode):
+ if new_mode == self.MODE_NORMAL and self.mode == self.MODE_GET_STREAM:
+ r = self.send_stream('END')
+ if r != 'ok':
+ raise HashConnectionError('Bad response from server %r' % r)
+ elif new_mode == self.MODE_GET_STREAM and self.mode == self.MODE_NORMAL:
+ r = self.send_message({'get-stream': None})
+ if r != 'ok':
+ raise HashConnectionError('Bad response from server %r' % r)
+ elif new_mode != self.mode:
+ raise Exception('Undefined mode transition %r -> %r' % (self.mode, new_mode))
+
+ self.mode = new_mode
+
+ def get_unihash(self, method, taskhash):
+ self._set_mode(self.MODE_GET_STREAM)
+ r = self.send_stream('%s %s' % (method, taskhash))
+ if not r:
+ return None
+ return r
+
+ def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
+ self._set_mode(self.MODE_NORMAL)
+ m = extra.copy()
+ m['taskhash'] = taskhash
+ m['method'] = method
+ m['outhash'] = outhash
+ m['unihash'] = unihash
+ return self.send_message({'report': m})
+
+ def get_stats(self):
+ self._set_mode(self.MODE_NORMAL)
+ return self.send_message({'get-stats': None})
+
+ def reset_stats(self):
+ self._set_mode(self.MODE_NORMAL)
+ return self.send_message({'reset-stats': None})
diff --git a/poky/bitbake/lib/hashserv/server.py b/poky/bitbake/lib/hashserv/server.py
new file mode 100644
index 000000000..0aff77688
--- /dev/null
+++ b/poky/bitbake/lib/hashserv/server.py
@@ -0,0 +1,414 @@
+# Copyright (C) 2019 Garmin Ltd.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from contextlib import closing
+from datetime import datetime
+import asyncio
+import json
+import logging
+import math
+import os
+import signal
+import socket
+import time
+
+logger = logging.getLogger('hashserv.server')
+
+
+class Measurement(object):
+ def __init__(self, sample):
+ self.sample = sample
+
+ def start(self):
+ self.start_time = time.perf_counter()
+
+ def end(self):
+ self.sample.add(time.perf_counter() - self.start_time)
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.end()
+
+
+class Sample(object):
+ def __init__(self, stats):
+ self.stats = stats
+ self.num_samples = 0
+ self.elapsed = 0
+
+ def measure(self):
+ return Measurement(self)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.end()
+
+ def add(self, elapsed):
+ self.num_samples += 1
+ self.elapsed += elapsed
+
+ def end(self):
+ if self.num_samples:
+ self.stats.add(self.elapsed)
+ self.num_samples = 0
+ self.elapsed = 0
+
+
+class Stats(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.num = 0
+ self.total_time = 0
+ self.max_time = 0
+ self.m = 0
+ self.s = 0
+ self.current_elapsed = None
+
+ def add(self, elapsed):
+ self.num += 1
+ if self.num == 1:
+ self.m = elapsed
+ self.s = 0
+ else:
+ last_m = self.m
+ self.m = last_m + (elapsed - last_m) / self.num
+ self.s = self.s + (elapsed - last_m) * (elapsed - self.m)
+
+ self.total_time += elapsed
+
+ if self.max_time < elapsed:
+ self.max_time = elapsed
+
+ def start_sample(self):
+ return Sample(self)
+
+ @property
+ def average(self):
+ if self.num == 0:
+ return 0
+ return self.total_time / self.num
+
+ @property
+ def stdev(self):
+ if self.num <= 1:
+ return 0
+ return math.sqrt(self.s / (self.num - 1))
+
+ def todict(self):
+ return {k: getattr(self, k) for k in ('num', 'total_time', 'max_time', 'average', 'stdev')}
+
+
+class ServerClient(object):
+ def __init__(self, reader, writer, db, request_stats):
+ self.reader = reader
+ self.writer = writer
+ self.db = db
+ self.request_stats = request_stats
+
+ async def process_requests(self):
+ try:
+ self.addr = self.writer.get_extra_info('peername')
+ logger.debug('Client %r connected' % (self.addr,))
+
+ # Read protocol and version
+ protocol = await self.reader.readline()
+ if protocol is None:
+ return
+
+ (proto_name, proto_version) = protocol.decode('utf-8').rstrip().split()
+ if proto_name != 'OEHASHEQUIV' or proto_version != '1.0':
+ return
+
+ # Read headers. Currently, no headers are implemented, so look for
+ # an empty line to signal the end of the headers
+ while True:
+ line = await self.reader.readline()
+ if line is None:
+ return
+
+ line = line.decode('utf-8').rstrip()
+ if not line:
+ break
+
+ # Handle messages
+ handlers = {
+ 'get': self.handle_get,
+ 'report': self.handle_report,
+ 'get-stream': self.handle_get_stream,
+ 'get-stats': self.handle_get_stats,
+ 'reset-stats': self.handle_reset_stats,
+ }
+
+ while True:
+ d = await self.read_message()
+ if d is None:
+ break
+
+ for k in handlers.keys():
+ if k in d:
+ logger.debug('Handling %s' % k)
+ if 'stream' in k:
+ await handlers[k](d[k])
+ else:
+ with self.request_stats.start_sample() as self.request_sample, \
+ self.request_sample.measure():
+ await handlers[k](d[k])
+ break
+ else:
+ logger.warning("Unrecognized command %r" % d)
+ break
+
+ await self.writer.drain()
+ finally:
+ self.writer.close()
+
+ def write_message(self, msg):
+ self.writer.write(('%s\n' % json.dumps(msg)).encode('utf-8'))
+
+ async def read_message(self):
+ l = await self.reader.readline()
+ if not l:
+ return None
+
+ try:
+ message = l.decode('utf-8')
+
+ if not message.endswith('\n'):
+ return None
+
+ return json.loads(message)
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
+ logger.error('Bad message from client: %r' % message)
+ raise e
+
+ async def handle_get(self, request):
+ method = request['method']
+ taskhash = request['taskhash']
+
+ row = self.query_equivalent(method, taskhash)
+ if row is not None:
+ logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash']))
+ d = {k: row[k] for k in ('taskhash', 'method', 'unihash')}
+
+ self.write_message(d)
+ else:
+ self.write_message(None)
+
+ async def handle_get_stream(self, request):
+ self.write_message('ok')
+
+ while True:
+ l = await self.reader.readline()
+ if not l:
+ return
+
+ try:
+ # This inner loop is very sensitive and must be as fast as
+ # possible (which is why the request sample is handled manually
+ # instead of using 'with', and also why logging statements are
+ # commented out.
+ self.request_sample = self.request_stats.start_sample()
+ request_measure = self.request_sample.measure()
+ request_measure.start()
+
+ l = l.decode('utf-8').rstrip()
+ if l == 'END':
+ self.writer.write('ok\n'.encode('utf-8'))
+ return
+
+ (method, taskhash) = l.split()
+ #logger.debug('Looking up %s %s' % (method, taskhash))
+ row = self.query_equivalent(method, taskhash)
+ if row is not None:
+ msg = ('%s\n' % row['unihash']).encode('utf-8')
+ #logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash']))
+ else:
+ msg = '\n'.encode('utf-8')
+
+ self.writer.write(msg)
+ finally:
+ request_measure.end()
+ self.request_sample.end()
+
+ await self.writer.drain()
+
+ async def handle_report(self, data):
+ with closing(self.db.cursor()) as cursor:
+ cursor.execute('''
+ -- Find tasks with a matching outhash (that is, tasks that
+ -- are equivalent)
+ SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND outhash=:outhash
+
+ -- If there is an exact match on the taskhash, return it.
+ -- Otherwise return the oldest matching outhash of any
+ -- taskhash
+ ORDER BY CASE WHEN taskhash=:taskhash THEN 1 ELSE 2 END,
+ created ASC
+
+ -- Only return one row
+ LIMIT 1
+ ''', {k: data[k] for k in ('method', 'outhash', 'taskhash')})
+
+ row = cursor.fetchone()
+
+ # If no matching outhash was found, or one *was* found but it
+ # wasn't an exact match on the taskhash, a new entry for this
+ # taskhash should be added
+ if row is None or row['taskhash'] != data['taskhash']:
+ # If a row matching the outhash was found, the unihash for
+ # the new taskhash should be the same as that one.
+ # Otherwise the caller provided unihash is used.
+ unihash = data['unihash']
+ if row is not None:
+ unihash = row['unihash']
+
+ insert_data = {
+ 'method': data['method'],
+ 'outhash': data['outhash'],
+ 'taskhash': data['taskhash'],
+ 'unihash': unihash,
+ 'created': datetime.now()
+ }
+
+ for k in ('owner', 'PN', 'PV', 'PR', 'task', 'outhash_siginfo'):
+ if k in data:
+ insert_data[k] = data[k]
+
+ cursor.execute('''INSERT INTO tasks_v2 (%s) VALUES (%s)''' % (
+ ', '.join(sorted(insert_data.keys())),
+ ', '.join(':' + k for k in sorted(insert_data.keys()))),
+ insert_data)
+
+ self.db.commit()
+
+ logger.info('Adding taskhash %s with unihash %s',
+ data['taskhash'], unihash)
+
+ d = {
+ 'taskhash': data['taskhash'],
+ 'method': data['method'],
+ 'unihash': unihash
+ }
+ else:
+ d = {k: row[k] for k in ('taskhash', 'method', 'unihash')}
+
+ self.write_message(d)
+
+ async def handle_get_stats(self, request):
+ d = {
+ 'requests': self.request_stats.todict(),
+ }
+
+ self.write_message(d)
+
+ async def handle_reset_stats(self, request):
+ d = {
+ 'requests': self.request_stats.todict(),
+ }
+
+ self.request_stats.reset()
+ self.write_message(d)
+
+ def query_equivalent(self, method, taskhash):
+ # This is part of the inner loop and must be as fast as possible
+ try:
+ cursor = self.db.cursor()
+ cursor.execute('SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1',
+ {'method': method, 'taskhash': taskhash})
+ return cursor.fetchone()
+ except:
+ cursor.close()
+
+
+class Server(object):
+ def __init__(self, db, loop=None):
+ self.request_stats = Stats()
+ self.db = db
+
+ if loop is None:
+ self.loop = asyncio.new_event_loop()
+ self.close_loop = True
+ else:
+ self.loop = loop
+ self.close_loop = False
+
+ self._cleanup_socket = None
+
+ def start_tcp_server(self, host, port):
+ self.server = self.loop.run_until_complete(
+ asyncio.start_server(self.handle_client, host, port, loop=self.loop)
+ )
+
+ for s in self.server.sockets:
+ logger.info('Listening on %r' % (s.getsockname(),))
+ # Newer python does this automatically. Do it manually here for
+ # maximum compatibility
+ s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
+ s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
+
+ name = self.server.sockets[0].getsockname()
+ if self.server.sockets[0].family == socket.AF_INET6:
+ self.address = "[%s]:%d" % (name[0], name[1])
+ else:
+ self.address = "%s:%d" % (name[0], name[1])
+
+ def start_unix_server(self, path):
+ def cleanup():
+ os.unlink(path)
+
+ cwd = os.getcwd()
+ try:
+ # Work around path length limits in AF_UNIX
+ os.chdir(os.path.dirname(path))
+ self.server = self.loop.run_until_complete(
+ asyncio.start_unix_server(self.handle_client, os.path.basename(path), loop=self.loop)
+ )
+ finally:
+ os.chdir(cwd)
+
+ logger.info('Listening on %r' % path)
+
+ self._cleanup_socket = cleanup
+ self.address = "unix://%s" % os.path.abspath(path)
+
+ async def handle_client(self, reader, writer):
+ # writer.transport.set_write_buffer_limits(0)
+ try:
+ client = ServerClient(reader, writer, self.db, self.request_stats)
+ await client.process_requests()
+ except Exception as e:
+ import traceback
+ logger.error('Error from client: %s' % str(e), exc_info=True)
+ traceback.print_exc()
+ writer.close()
+ logger.info('Client disconnected')
+
+ def serve_forever(self):
+ def signal_handler():
+ self.loop.stop()
+
+ self.loop.add_signal_handler(signal.SIGTERM, signal_handler)
+
+ try:
+ self.loop.run_forever()
+ except KeyboardInterrupt:
+ pass
+
+ self.server.close()
+ self.loop.run_until_complete(self.server.wait_closed())
+ logger.info('Server shutting down')
+
+ if self.close_loop:
+ self.loop.close()
+
+ if self._cleanup_socket is not None:
+ self._cleanup_socket()
diff --git a/poky/bitbake/lib/hashserv/tests.py b/poky/bitbake/lib/hashserv/tests.py
index 8300a2559..a5472a996 100644
--- a/poky/bitbake/lib/hashserv/tests.py
+++ b/poky/bitbake/lib/hashserv/tests.py
@@ -1,48 +1,48 @@
#! /usr/bin/env python3
#
-# Copyright (C) 2018 Garmin Ltd.
+# Copyright (C) 2018-2019 Garmin Ltd.
#
# SPDX-License-Identifier: GPL-2.0-only
#
-import unittest
-import threading
-import sqlite3
+from . import create_server, create_client
import hashlib
-import urllib.request
-import json
-from . import create_server
+import logging
+import multiprocessing
+import sys
+import tempfile
+import threading
+import unittest
+
+
+class TestHashEquivalenceServer(object):
+ METHOD = 'TestMethod'
+
+ def _run_server(self):
+ # logging.basicConfig(level=logging.DEBUG, filename='bbhashserv.log', filemode='w',
+ # format='%(levelname)s %(filename)s:%(lineno)d %(message)s')
+ self.server.serve_forever()
-class TestHashEquivalenceServer(unittest.TestCase):
def setUp(self):
- # Start an in memory hash equivalence server in the background bound to
- # an ephemeral port
- db = sqlite3.connect(':memory:', check_same_thread=False)
- self.server = create_server(('localhost', 0), db)
- self.server_addr = 'http://localhost:%d' % self.server.socket.getsockname()[1]
- self.server_thread = threading.Thread(target=self.server.serve_forever)
+ if sys.version_info < (3, 5, 0):
+ self.skipTest('Python 3.5 or later required')
+
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-hashserv')
+ self.dbfile = os.path.join(self.temp_dir.name, 'db.sqlite')
+
+ self.server = create_server(self.get_server_addr(), self.dbfile)
+ self.server_thread = multiprocessing.Process(target=self._run_server)
self.server_thread.start()
+ self.client = create_client(self.server.address)
def tearDown(self):
# Shutdown server
s = getattr(self, 'server', None)
if s is not None:
- self.server.shutdown()
+ self.server_thread.terminate()
self.server_thread.join()
- self.server.server_close()
-
- def send_get(self, path):
- url = '%s/%s' % (self.server_addr, path)
- request = urllib.request.Request(url)
- response = urllib.request.urlopen(request)
- return json.loads(response.read().decode('utf-8'))
-
- def send_post(self, path, data):
- headers = {'content-type': 'application/json'}
- url = '%s/%s' % (self.server_addr, path)
- request = urllib.request.Request(url, json.dumps(data).encode('utf-8'), headers)
- response = urllib.request.urlopen(request)
- return json.loads(response.read().decode('utf-8'))
+ self.client.close()
+ self.temp_dir.cleanup()
def test_create_hash(self):
# Simple test that hashes can be created
@@ -50,16 +50,11 @@ class TestHashEquivalenceServer(unittest.TestCase):
outhash = '2765d4a5884be49b28601445c2760c5f21e7e5c0ee2b7e3fce98fd7e5970796f'
unihash = 'f46d3fbb439bd9b921095da657a4de906510d2cd'
- d = self.send_get('v1/equivalent?method=TestMethod&taskhash=%s' % taskhash)
- self.assertIsNone(d, msg='Found unexpected task, %r' % d)
+ result = self.client.get_unihash(self.METHOD, taskhash)
+ self.assertIsNone(result, msg='Found unexpected task, %r' % result)
- d = self.send_post('v1/equivalent', {
- 'taskhash': taskhash,
- 'method': 'TestMethod',
- 'outhash': outhash,
- 'unihash': unihash,
- })
- self.assertEqual(d['unihash'], unihash, 'Server returned bad unihash')
+ result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
+ self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash')
def test_create_equivalent(self):
# Tests that a second reported task with the same outhash will be
@@ -67,25 +62,16 @@ class TestHashEquivalenceServer(unittest.TestCase):
taskhash = '53b8dce672cb6d0c73170be43f540460bfc347b4'
outhash = '5a9cb1649625f0bf41fc7791b635cd9c2d7118c7f021ba87dcd03f72b67ce7a8'
unihash = 'f37918cc02eb5a520b1aff86faacbc0a38124646'
- d = self.send_post('v1/equivalent', {
- 'taskhash': taskhash,
- 'method': 'TestMethod',
- 'outhash': outhash,
- 'unihash': unihash,
- })
- self.assertEqual(d['unihash'], unihash, 'Server returned bad unihash')
+
+ result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
+ self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash')
# Report a different task with the same outhash. The returned unihash
# should match the first task
taskhash2 = '3bf6f1e89d26205aec90da04854fbdbf73afe6b4'
unihash2 = 'af36b199320e611fbb16f1f277d3ee1d619ca58b'
- d = self.send_post('v1/equivalent', {
- 'taskhash': taskhash2,
- 'method': 'TestMethod',
- 'outhash': outhash,
- 'unihash': unihash2,
- })
- self.assertEqual(d['unihash'], unihash, 'Server returned bad unihash')
+ result = self.client.report_unihash(taskhash2, self.METHOD, outhash, unihash2)
+ self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash')
def test_duplicate_taskhash(self):
# Tests that duplicate reports of the same taskhash with different
@@ -94,38 +80,63 @@ class TestHashEquivalenceServer(unittest.TestCase):
taskhash = '8aa96fcffb5831b3c2c0cb75f0431e3f8b20554a'
outhash = 'afe240a439959ce86f5e322f8c208e1fedefea9e813f2140c81af866cc9edf7e'
unihash = '218e57509998197d570e2c98512d0105985dffc9'
- d = self.send_post('v1/equivalent', {
- 'taskhash': taskhash,
- 'method': 'TestMethod',
- 'outhash': outhash,
- 'unihash': unihash,
- })
+ self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
- d = self.send_get('v1/equivalent?method=TestMethod&taskhash=%s' % taskhash)
- self.assertEqual(d['unihash'], unihash)
+ result = self.client.get_unihash(self.METHOD, taskhash)
+ self.assertEqual(result, unihash)
outhash2 = '0904a7fe3dc712d9fd8a74a616ddca2a825a8ee97adf0bd3fc86082c7639914d'
unihash2 = 'ae9a7d252735f0dafcdb10e2e02561ca3a47314c'
- d = self.send_post('v1/equivalent', {
- 'taskhash': taskhash,
- 'method': 'TestMethod',
- 'outhash': outhash2,
- 'unihash': unihash2
- })
+ self.client.report_unihash(taskhash, self.METHOD, outhash2, unihash2)
- d = self.send_get('v1/equivalent?method=TestMethod&taskhash=%s' % taskhash)
- self.assertEqual(d['unihash'], unihash)
+ result = self.client.get_unihash(self.METHOD, taskhash)
+ self.assertEqual(result, unihash)
outhash3 = '77623a549b5b1a31e3732dfa8fe61d7ce5d44b3370f253c5360e136b852967b4'
unihash3 = '9217a7d6398518e5dc002ed58f2cbbbc78696603'
- d = self.send_post('v1/equivalent', {
- 'taskhash': taskhash,
- 'method': 'TestMethod',
- 'outhash': outhash3,
- 'unihash': unihash3
- })
+ self.client.report_unihash(taskhash, self.METHOD, outhash3, unihash3)
+
+ result = self.client.get_unihash(self.METHOD, taskhash)
+ self.assertEqual(result, unihash)
+
+ def test_stress(self):
+ def query_server(failures):
+ client = Client(self.server.address)
+ try:
+ for i in range(1000):
+ taskhash = hashlib.sha256()
+ taskhash.update(str(i).encode('utf-8'))
+ taskhash = taskhash.hexdigest()
+ result = client.get_unihash(self.METHOD, taskhash)
+ if result != taskhash:
+ failures.append("taskhash mismatch: %s != %s" % (result, taskhash))
+ finally:
+ client.close()
+
+ # Report hashes
+ for i in range(1000):
+ taskhash = hashlib.sha256()
+ taskhash.update(str(i).encode('utf-8'))
+ taskhash = taskhash.hexdigest()
+ self.client.report_unihash(taskhash, self.METHOD, taskhash, taskhash)
+
+ failures = []
+ threads = [threading.Thread(target=query_server, args=(failures,)) for t in range(100)]
+
+ for t in threads:
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ self.assertFalse(failures)
+
- d = self.send_get('v1/equivalent?method=TestMethod&taskhash=%s' % taskhash)
- self.assertEqual(d['unihash'], unihash)
+class TestHashEquivalenceUnixServer(TestHashEquivalenceServer, unittest.TestCase):
+ def get_server_addr(self):
+ return "unix://" + os.path.join(self.temp_dir.name, 'sock')
+class TestHashEquivalenceTCPServer(TestHashEquivalenceServer, unittest.TestCase):
+ def get_server_addr(self):
+ return "localhost:0"
diff --git a/poky/bitbake/lib/layerindexlib/__init__.py b/poky/bitbake/lib/layerindexlib/__init__.py
index d231cf6a9..77196b408 100644
--- a/poky/bitbake/lib/layerindexlib/__init__.py
+++ b/poky/bitbake/lib/layerindexlib/__init__.py
@@ -376,7 +376,7 @@ layerBranches set. If not, they are effectively blank.'''
invalid.append(name)
- def _resolve_dependencies(layerbranches, ignores, dependencies, invalid):
+ def _resolve_dependencies(layerbranches, ignores, dependencies, invalid, processed=None):
for layerbranch in layerbranches:
if ignores and layerbranch.layer.name in ignores:
continue
@@ -388,6 +388,13 @@ layerBranches set. If not, they are effectively blank.'''
if ignores and deplayerbranch.layer.name in ignores:
continue
+ # Since this is depth first, we need to know what we're currently processing
+ # in order to avoid infinite recursion on a loop.
+ if processed and deplayerbranch.layer.name in processed:
+ # We have found a recursion...
+ logger.warning('Circular layer dependency found: %s -> %s' % (processed, deplayerbranch.layer.name))
+ continue
+
# This little block is why we can't re-use the LayerIndexObj version,
# we must be able to satisfy each dependencies across layer indexes and
# use the layer index order for priority. (r stands for replacement below)
@@ -411,7 +418,17 @@ layerBranches set. If not, they are effectively blank.'''
# New dependency, we need to resolve it now... depth-first
if deplayerbranch.layer.name not in dependencies:
- (dependencies, invalid) = _resolve_dependencies([deplayerbranch], ignores, dependencies, invalid)
+ # Avoid recursion on this branch.
+ # We copy so we don't end up polluting the depth-first branch with other
+ # branches. Duplication between individual branches IS expected and
+ # handled by 'dependencies' processing.
+ if not processed:
+ local_processed = []
+ else:
+ local_processed = processed.copy()
+ local_processed.append(deplayerbranch.layer.name)
+
+ (dependencies, invalid) = _resolve_dependencies([deplayerbranch], ignores, dependencies, invalid, local_processed)
if deplayerbranch.layer.name not in dependencies:
dependencies[deplayerbranch.layer.name] = [deplayerbranch, layerdependency]
diff --git a/poky/bitbake/lib/prserv/db.py b/poky/bitbake/lib/prserv/db.py
index d6188a679..117d8c052 100644
--- a/poky/bitbake/lib/prserv/db.py
+++ b/poky/bitbake/lib/prserv/db.py
@@ -257,7 +257,7 @@ class PRData(object):
self.connection=sqlite3.connect(self.filename, isolation_level="EXCLUSIVE", check_same_thread = False)
self.connection.row_factory=sqlite3.Row
self.connection.execute("pragma synchronous = off;")
- self.connection.execute("PRAGMA journal_mode = WAL;")
+ self.connection.execute("PRAGMA journal_mode = MEMORY;")
self._tables={}
def disconnect(self):
diff --git a/poky/bitbake/lib/pyinotify.py b/poky/bitbake/lib/pyinotify.py
index 9dcfd0d2e..1528a22e8 100644
--- a/poky/bitbake/lib/pyinotify.py
+++ b/poky/bitbake/lib/pyinotify.py
@@ -1,5 +1,4 @@
-#!/usr/bin/env python
-
+#
# pyinotify.py - python interface to inotify
# Copyright (c) 2005-2015 Sebastien Martini <seb@dbzteam.org>
#
diff --git a/poky/bitbake/lib/toaster/orm/models.py b/poky/bitbake/lib/toaster/orm/models.py
index 41a9f819d..bb6b5decf 100644
--- a/poky/bitbake/lib/toaster/orm/models.py
+++ b/poky/bitbake/lib/toaster/orm/models.py
@@ -965,12 +965,12 @@ class TargetSDKFile(models.Model):
class Target_Image_File(models.Model):
# valid suffixes for image files produced by a build
SUFFIXES = {
- 'btrfs', 'cpio', 'cpio.gz', 'cpio.lz4', 'cpio.lzma', 'cpio.xz',
- 'cramfs', 'elf', 'ext2', 'ext2.bz2', 'ext2.gz', 'ext2.lzma', 'ext4',
- 'ext4.gz', 'ext3', 'ext3.gz', 'hdddirect', 'hddimg', 'iso', 'jffs2',
- 'jffs2.sum', 'multiubi', 'qcow2', 'squashfs', 'squashfs-lzo',
+ 'btrfs', 'container', 'cpio', 'cpio.gz', 'cpio.lz4', 'cpio.lzma',
+ 'cpio.xz', 'cramfs', 'ext2', 'ext2.bz2', 'ext2.gz', 'ext2.lzma',
+ 'ext3', 'ext3.gz', 'ext4', 'ext4.gz', 'f2fs', 'hddimg', 'iso', 'jffs2',
+ 'jffs2.sum', 'multiubi', 'squashfs', 'squashfs-lz4', 'squashfs-lzo',
'squashfs-xz', 'tar', 'tar.bz2', 'tar.gz', 'tar.lz4', 'tar.xz', 'ubi',
- 'ubifs', 'vdi', 'vmdk', 'wic', 'wic.bmap', 'wic.bz2', 'wic.gz', 'wic.lzma'
+ 'ubifs', 'wic', 'wic.bz2', 'wic.gz', 'wic.lzma'
}
target = models.ForeignKey(Target)
diff --git a/poky/bitbake/lib/toaster/tests/browser/selenium_helpers.py b/poky/bitbake/lib/toaster/tests/browser/selenium_helpers.py
index 6d9bb8092..02d4f4b5c 100644
--- a/poky/bitbake/lib/toaster/tests/browser/selenium_helpers.py
+++ b/poky/bitbake/lib/toaster/tests/browser/selenium_helpers.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/selenium_helpers_base.py b/poky/bitbake/lib/toaster/tests/browser/selenium_helpers_base.py
index 8417aa3b2..6c94684e8 100644
--- a/poky/bitbake/lib/toaster/tests/browser/selenium_helpers_base.py
+++ b/poky/bitbake/lib/toaster/tests/browser/selenium_helpers_base.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_all_builds_page.py b/poky/bitbake/lib/toaster/tests/browser/test_all_builds_page.py
index f4021614b..fba627bd2 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_all_builds_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_all_builds_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_all_projects_page.py b/poky/bitbake/lib/toaster/tests/browser/test_all_projects_page.py
index f86d19d28..afd2d3566 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_all_projects_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_all_projects_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page.py b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page.py
index 53c125ec5..d972aff1b 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_artifacts.py b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_artifacts.py
index c560d7de1..e2623e8ad 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_artifacts.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_artifacts.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_recipes.py b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_recipes.py
index e4f3d685e..c542d45f1 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_recipes.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_recipes.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_tasks.py b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_tasks.py
index bdb0c27be..22acb470a 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_tasks.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_builddashboard_page_tasks.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_js_unit_tests.py b/poky/bitbake/lib/toaster/tests/browser/test_js_unit_tests.py
index 63f3f4a74..e8b4295b8 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_js_unit_tests.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_js_unit_tests.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_landing_page.py b/poky/bitbake/lib/toaster/tests/browser/test_landing_page.py
index 0a00fccc1..07901989d 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_landing_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_landing_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_layerdetails_page.py b/poky/bitbake/lib/toaster/tests/browser/test_layerdetails_page.py
index e34aa13db..f81e696a2 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_layerdetails_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_layerdetails_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_most_recent_builds_states.py b/poky/bitbake/lib/toaster/tests/browser/test_most_recent_builds_states.py
index d52b18429..15d25dc3a 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_most_recent_builds_states.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_most_recent_builds_states.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_new_custom_image_page.py b/poky/bitbake/lib/toaster/tests/browser/test_new_custom_image_page.py
index 3b47a497e..0aa3b7a77 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_new_custom_image_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_new_custom_image_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_new_project_page.py b/poky/bitbake/lib/toaster/tests/browser/test_new_project_page.py
index d250bd143..8e56bb043 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_new_project_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_new_project_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_project_builds_page.py b/poky/bitbake/lib/toaster/tests/browser/test_project_builds_page.py
index 065f3ebe6..47fb10b26 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_project_builds_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_project_builds_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_project_config_page.py b/poky/bitbake/lib/toaster/tests/browser/test_project_config_page.py
index 48508dff3..2816eb907 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_project_config_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_project_config_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_project_page.py b/poky/bitbake/lib/toaster/tests/browser/test_project_page.py
index 5cb607ddd..8b5e1b673 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_project_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_project_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_sample.py b/poky/bitbake/lib/toaster/tests/browser/test_sample.py
index 008ba14a2..f4ad670a3 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_sample.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_sample.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_task_page.py b/poky/bitbake/lib/toaster/tests/browser/test_task_page.py
index 47c8c1aee..26f3dca83 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_task_page.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_task_page.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/browser/test_toastertable_ui.py b/poky/bitbake/lib/toaster/tests/browser/test_toastertable_ui.py
index b4f83447f..ef78cbb1e 100644
--- a/poky/bitbake/lib/toaster/tests/browser/test_toastertable_ui.py
+++ b/poky/bitbake/lib/toaster/tests/browser/test_toastertable_ui.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/builds/buildtest.py b/poky/bitbake/lib/toaster/tests/builds/buildtest.py
index 9f40f978d..872bbd377 100644
--- a/poky/bitbake/lib/toaster/tests/builds/buildtest.py
+++ b/poky/bitbake/lib/toaster/tests/builds/buildtest.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/builds/test_core_image_min.py b/poky/bitbake/lib/toaster/tests/builds/test_core_image_min.py
index 3d3aa2a8a..44b6cbec7 100644
--- a/poky/bitbake/lib/toaster/tests/builds/test_core_image_min.py
+++ b/poky/bitbake/lib/toaster/tests/builds/test_core_image_min.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/commands/test_loaddata.py b/poky/bitbake/lib/toaster/tests/commands/test_loaddata.py
index b633d9774..9e8d5553c 100644
--- a/poky/bitbake/lib/toaster/tests/commands/test_loaddata.py
+++ b/poky/bitbake/lib/toaster/tests/commands/test_loaddata.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/commands/test_lsupdates.py b/poky/bitbake/lib/toaster/tests/commands/test_lsupdates.py
index 23a84a248..3c4fbe055 100644
--- a/poky/bitbake/lib/toaster/tests/commands/test_lsupdates.py
+++ b/poky/bitbake/lib/toaster/tests/commands/test_lsupdates.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/commands/test_runbuilds.py b/poky/bitbake/lib/toaster/tests/commands/test_runbuilds.py
index 29bc7c900..e223b95fc 100644
--- a/poky/bitbake/lib/toaster/tests/commands/test_runbuilds.py
+++ b/poky/bitbake/lib/toaster/tests/commands/test_runbuilds.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/eventreplay/__init__.py b/poky/bitbake/lib/toaster/tests/eventreplay/__init__.py
index 3606cba4f..8ed6792ef 100644
--- a/poky/bitbake/lib/toaster/tests/eventreplay/__init__.py
+++ b/poky/bitbake/lib/toaster/tests/eventreplay/__init__.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py b/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py
index 6a3f74baa..455c408e9 100644
--- a/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py
+++ b/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster functional tests implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py b/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py
index 2b3a2886c..56c84fba8 100644
--- a/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py
+++ b/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster functional tests implementation
#
diff --git a/poky/bitbake/lib/toaster/tests/views/test_views.py b/poky/bitbake/lib/toaster/tests/views/test_views.py
index 477654ea5..68d9e9de1 100644
--- a/poky/bitbake/lib/toaster/tests/views/test_views.py
+++ b/poky/bitbake/lib/toaster/tests/views/test_views.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
diff --git a/poky/bitbake/lib/toaster/toastergui/static/js/importlayer.js b/poky/bitbake/lib/toaster/toastergui/static/js/importlayer.js
index 296483985..8e2032de2 100644
--- a/poky/bitbake/lib/toaster/toastergui/static/js/importlayer.js
+++ b/poky/bitbake/lib/toaster/toastergui/static/js/importlayer.js
@@ -17,11 +17,15 @@ function importLayerPageInit (ctx) {
var currentLayerDepSelection;
var validLayerName = /^(\w|-)+$/;
+ /* Catch 'disable' race condition between type-ahead started and "input change" */
+ var typeAheadStarted = 0;
+
libtoaster.makeTypeahead(layerDepInput,
libtoaster.ctx.layersTypeAheadUrl,
{ include_added: "true" }, function(item){
currentLayerDepSelection = item;
layerDepBtn.removeAttr("disabled");
+ typeAheadStarted = 1;
});
layerDepInput.on("typeahead:select", function(event, data){
@@ -34,7 +38,10 @@ function importLayerPageInit (ctx) {
// disable the "Add layer" button when the layer input typeahead is empty
// or not in the typeahead choices
layerDepInput.on("input change", function(){
- layerDepBtn.attr("disabled","disabled");
+ if (0 == typeAheadStarted) {
+ layerDepBtn.attr("disabled","disabled");
+ }
+ typeAheadStarted = 0;
});
/* We automatically add "openembedded-core" layer for convenience as a
@@ -50,6 +57,7 @@ function importLayerPageInit (ctx) {
});
layerDepBtn.click(function(){
+ typeAheadStarted = 0;
if (currentLayerDepSelection == undefined)
return;
@@ -77,7 +85,7 @@ function importLayerPageInit (ctx) {
$("#layer-deps-list").append(newLayerDep);
- libtoaster.getLayerDepsForProject(currentLayerDepSelection.layerdetailurl,
+ libtoaster.getLayerDepsForProject(currentLayerDepSelection.xhrLayerUrl,
function (data){
/* These are the dependencies of the layer added as a dependency */
if (data.list.length > 0) {
diff --git a/poky/bitbake/lib/toaster/toastermain/management/commands/checksocket.py b/poky/bitbake/lib/toaster/toastermain/management/commands/checksocket.py
index c1758f340..811fd5d51 100644
--- a/poky/bitbake/lib/toaster/toastermain/management/commands/checksocket.py
+++ b/poky/bitbake/lib/toaster/toastermain/management/commands/checksocket.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# BitBake Toaster Implementation
#