summaryrefslogtreecommitdiff
path: root/poky/bitbake/lib/bb
diff options
context:
space:
mode:
Diffstat (limited to 'poky/bitbake/lib/bb')
-rw-r--r--poky/bitbake/lib/bb/codeparser.py2
-rw-r--r--poky/bitbake/lib/bb/command.py14
-rw-r--r--poky/bitbake/lib/bb/cooker.py213
-rw-r--r--poky/bitbake/lib/bb/data.py1
-rw-r--r--poky/bitbake/lib/bb/event.py8
-rw-r--r--poky/bitbake/lib/bb/fetch2/__init__.py1
-rw-r--r--poky/bitbake/lib/bb/fetch2/git.py15
-rw-r--r--poky/bitbake/lib/bb/parse/__init__.py8
-rw-r--r--poky/bitbake/lib/bb/runqueue.py10
-rw-r--r--poky/bitbake/lib/bb/server/process.py11
-rw-r--r--poky/bitbake/lib/bb/tests/codeparser.py26
-rw-r--r--poky/bitbake/lib/bb/tinfoil.py8
-rw-r--r--poky/bitbake/lib/bb/utils.py23
13 files changed, 172 insertions, 168 deletions
diff --git a/poky/bitbake/lib/bb/codeparser.py b/poky/bitbake/lib/bb/codeparser.py
index d6b8102585..eabeda591a 100644
--- a/poky/bitbake/lib/bb/codeparser.py
+++ b/poky/bitbake/lib/bb/codeparser.py
@@ -82,7 +82,7 @@ def add_module_functions(fn, functions, namespace):
execs.remove(e)
execs.add(namespace + "." + e)
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy()]
- #bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, src, parser.references, parser.execs, parser.var_execs, parser.contains))
+ #bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
def update_module_dependencies(d):
for mod in modulecode_deps:
diff --git a/poky/bitbake/lib/bb/command.py b/poky/bitbake/lib/bb/command.py
index b494f84a0a..f2ee587161 100644
--- a/poky/bitbake/lib/bb/command.py
+++ b/poky/bitbake/lib/bb/command.py
@@ -85,8 +85,6 @@ class Command:
if not hasattr(command_method, 'readonly') or not getattr(command_method, 'readonly'):
return None, "Not able to execute not readonly commands in readonly mode"
try:
- if command != "ping":
- self.cooker.process_inotify_updates_apply()
if getattr(command_method, 'needconfig', True):
self.cooker.updateCacheSync()
result = command_method(self, commandline)
@@ -110,7 +108,6 @@ class Command:
def runAsyncCommand(self, _, process_server, halt):
try:
- self.cooker.process_inotify_updates_apply()
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
# updateCache will trigger a shutdown of the parser
# and then raise BBHandledException triggering an exit
@@ -310,6 +307,11 @@ class CommandsSync:
return ret
getLayerPriorities.readonly = True
+ def revalidateCaches(self, command, params):
+ """Called by UI clients when metadata may have changed"""
+ command.cooker.revalidateCaches()
+ parseConfiguration.needconfig = False
+
def getRecipes(self, command, params):
try:
mc = params[0]
@@ -779,3 +781,9 @@ class CommandsAsync:
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc])
command.finishAsyncCommand()
findSigInfo.needcache = False
+
+ def getTaskSignatures(self, command, params):
+ res = command.cooker.getTaskSignatures(params[0], params[1])
+ bb.event.fire(bb.event.GetTaskSignatureResult(res), command.cooker.data)
+ command.finishAsyncCommand()
+ getTaskSignatures.needcache = True
diff --git a/poky/bitbake/lib/bb/cooker.py b/poky/bitbake/lib/bb/cooker.py
index 064e3cae6e..599c7ddaa2 100644
--- a/poky/bitbake/lib/bb/cooker.py
+++ b/poky/bitbake/lib/bb/cooker.py
@@ -22,7 +22,6 @@ from bb import utils, data, parse, event, cache, providers, taskdata, runqueue,
import queue
import signal
import prserv.serv
-import pyinotify
import json
import pickle
import codecs
@@ -173,17 +172,9 @@ class BBCooker:
self.waitIdle = server.wait_for_idle
bb.debug(1, "BBCooker starting %s" % time.time())
- sys.stdout.flush()
- self.configwatcher = None
- self.confignotifier = None
-
- self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
- pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
- pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
-
- self.watcher = None
- self.notifier = None
+ self.configwatched = {}
+ self.parsewatched = {}
# If being called by something like tinfoil, we need to clean cached data
# which may now be invalid
@@ -194,8 +185,6 @@ class BBCooker:
self.hashserv = None
self.hashservaddr = None
- self.inotify_modified_files = []
-
# TOSTOP must not be set or our children will hang when they output
try:
fd = sys.stdout.fileno()
@@ -219,53 +208,13 @@ class BBCooker:
signal.signal(signal.SIGHUP, self.sigterm_exception)
bb.debug(1, "BBCooker startup complete %s" % time.time())
- sys.stdout.flush()
-
- self.inotify_threadlock = threading.Lock()
def init_configdata(self):
if not hasattr(self, "data"):
self.initConfigurationData()
bb.debug(1, "BBCooker parsed base configuration %s" % time.time())
- sys.stdout.flush()
self.handlePRServ()
- def setupConfigWatcher(self):
- with bb.utils.lock_timeout(self.inotify_threadlock):
- if self.configwatcher:
- self.configwatcher.close()
- self.confignotifier = None
- self.configwatcher = None
- self.configwatcher = pyinotify.WatchManager()
- self.configwatcher.bbseen = set()
- self.configwatcher.bbwatchedfiles = set()
- self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
-
- def setupParserWatcher(self):
- with bb.utils.lock_timeout(self.inotify_threadlock):
- if self.watcher:
- self.watcher.close()
- self.notifier = None
- self.watcher = None
- self.watcher = pyinotify.WatchManager()
- self.watcher.bbseen = set()
- self.watcher.bbwatchedfiles = set()
- self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
-
- def process_inotify_updates(self):
- with bb.utils.lock_timeout(self.inotify_threadlock):
- for n in [self.confignotifier, self.notifier]:
- if n and n.check_events(timeout=0):
- # read notified events and enqueue them
- n.read_events()
-
- def process_inotify_updates_apply(self):
- with bb.utils.lock_timeout(self.inotify_threadlock):
- for n in [self.confignotifier, self.notifier]:
- if n and n.check_events(timeout=0):
- n.read_events()
- n.process_events()
-
def _baseconfig_set(self, value):
if value and not self.baseconfig_valid:
bb.server.process.serverlog("Base config valid")
@@ -280,88 +229,16 @@ class BBCooker:
bb.server.process.serverlog("Parse cache invalidated")
self.parsecache_valid = value
- def config_notifications(self, event):
- if event.maskname == "IN_Q_OVERFLOW":
- bb.warn("inotify event queue overflowed, invalidating caches.")
- self._parsecache_set(False)
- self._baseconfig_set(False)
- bb.parse.clear_cache()
- return
- if not event.pathname in self.configwatcher.bbwatchedfiles:
- return
- if "IN_ISDIR" in event.maskname:
- if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
- if event.pathname in self.configwatcher.bbseen:
- self.configwatcher.bbseen.remove(event.pathname)
- # Could remove all entries starting with the directory but for now...
- bb.parse.clear_cache()
- if not event.pathname in self.inotify_modified_files:
- self.inotify_modified_files.append(event.pathname)
- self._baseconfig_set(False)
-
- def notifications(self, event):
- if event.maskname == "IN_Q_OVERFLOW":
- bb.warn("inotify event queue overflowed, invalidating caches.")
- self._parsecache_set(False)
- bb.parse.clear_cache()
- return
- if event.pathname.endswith("bitbake-cookerdaemon.log") \
- or event.pathname.endswith("bitbake.lock"):
- return
- if "IN_ISDIR" in event.maskname:
- if "IN_CREATE" in event.maskname or "IN_DELETE" in event.maskname:
- if event.pathname in self.watcher.bbseen:
- self.watcher.bbseen.remove(event.pathname)
- # Could remove all entries starting with the directory but for now...
- bb.parse.clear_cache()
- if not event.pathname in self.inotify_modified_files:
- self.inotify_modified_files.append(event.pathname)
- self._parsecache_set(False)
+ def add_filewatch(self, deps, configwatcher=False):
+ if configwatcher:
+ watcher = self.configwatched
+ else:
+ watcher = self.parsewatched
- def add_filewatch(self, deps, watcher=None, dirs=False):
- if not watcher:
- watcher = self.watcher
for i in deps:
- watcher.bbwatchedfiles.add(i[0])
- if dirs:
- f = i[0]
- else:
- f = os.path.dirname(i[0])
- if f in watcher.bbseen:
- continue
- watcher.bbseen.add(f)
- watchtarget = None
- while True:
- # We try and add watches for files that don't exist but if they did, would influence
- # the parser. The parent directory of these files may not exist, in which case we need
- # to watch any parent that does exist for changes.
- try:
- watcher.add_watch(f, self.watchmask, quiet=False)
- if watchtarget:
- watcher.bbwatchedfiles.add(watchtarget)
- break
- except pyinotify.WatchManagerError as e:
- if 'ENOENT' in str(e):
- watchtarget = f
- f = os.path.dirname(f)
- if f in watcher.bbseen:
- break
- watcher.bbseen.add(f)
- continue
- if 'ENOSPC' in str(e):
- providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?")
- providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.")
- providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=<value>.")
- providerlog.error("Root privilege is required to modify max_user_watches.")
- raise
-
- def handle_inotify_updates(self):
- # reload files for which we got notifications
- for p in self.inotify_modified_files:
- bb.parse.update_cache(p)
- if p in bb.parse.BBHandler.cached_statements:
- del bb.parse.BBHandler.cached_statements[p]
- self.inotify_modified_files = []
+ f = i[0]
+ mtime = i[1]
+ watcher[f] = mtime
def sigterm_exception(self, signum, stackframe):
if signum == signal.SIGTERM:
@@ -392,8 +269,7 @@ class BBCooker:
if mod not in self.orig_sysmodules:
del sys.modules[mod]
- self.handle_inotify_updates()
- self.setupConfigWatcher()
+ self.configwatched = {}
# Need to preserve BB_CONSOLELOG over resets
consolelog = None
@@ -436,7 +312,7 @@ class BBCooker:
self.disableDataTracking()
for mc in self.databuilder.mcdata.values():
- self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
+ self.add_filewatch(mc.getVar("__base_depends", False), configwatcher=True)
self._baseconfig_set(True)
self._parsecache_set(False)
@@ -486,6 +362,29 @@ class BBCooker:
if hasattr(self, "data"):
self.data.disableTracking()
+ def revalidateCaches(self):
+ bb.parse.clear_cache()
+
+ clean = True
+ for f in self.configwatched:
+ if not bb.parse.check_mtime(f, self.configwatched[f]):
+ bb.server.process.serverlog("Found %s changed, invalid cache" % f)
+ self._baseconfig_set(False)
+ self._parsecache_set(False)
+ clean = False
+ break
+
+ if clean:
+ for f in self.parsewatched:
+ if not bb.parse.check_mtime(f, self.parsewatched[f]):
+ bb.server.process.serverlog("Found %s changed, invalid cache" % f)
+ self._parsecache_set(False)
+ clean = False
+ break
+
+ if not clean:
+ bb.parse.BBHandler.cached_statements = {}
+
def parseConfiguration(self):
self.updateCacheSync()
@@ -566,6 +465,7 @@ class BBCooker:
# Now update all the variables not in the datastore to match
self.configuration.env = environment
+ self.revalidateCaches()
if not clean:
logger.debug("Base environment change, triggering reparse")
self.reset()
@@ -1542,6 +1442,37 @@ class BBCooker:
self.idleCallBackRegister(buildFileIdle, rq)
+ def getTaskSignatures(self, target, tasks):
+ sig = []
+ getAllTaskSignatures = False
+
+ if not tasks:
+ tasks = ["do_build"]
+ getAllTaskSignatures = True
+
+ for task in tasks:
+ taskdata, runlist = self.buildTaskData(target, task, self.configuration.halt)
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
+ rq.rqdata.prepare()
+
+ for l in runlist:
+ mc, pn, taskname, fn = l
+
+ taskdep = rq.rqdata.dataCaches[mc].task_deps[fn]
+ for t in taskdep['tasks']:
+ if t in taskdep['nostamp'] or "setscene" in t:
+ continue
+ tid = bb.runqueue.build_tid(mc, fn, t)
+
+ if t in task or getAllTaskSignatures:
+ try:
+ rq.rqdata.prepare_task_hash(tid)
+ sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
+ except KeyError:
+ sig.append(self.getTaskSignatures(target, [t])[0])
+
+ return sig
+
def buildTargets(self, targets, task):
"""
Attempt to build the targets specified
@@ -1644,8 +1575,6 @@ class BBCooker:
if self.state == state.running:
return
- self.handle_inotify_updates()
-
if not self.baseconfig_valid:
logger.debug("Reloading base configuration data")
self.initConfigurationData()
@@ -1667,7 +1596,7 @@ class BBCooker:
if self.state != state.parsing and not self.parsecache_valid:
bb.server.process.serverlog("Parsing started")
- self.setupParserWatcher()
+ self.parsewatched = {}
bb.parse.siggen.reset(self.data)
self.parseConfiguration ()
@@ -1692,9 +1621,9 @@ class BBCooker:
total_masked += masked
searchdirs |= set(search)
- # Add inotify watches for directories searched for bb/bbappend files
+ # Add mtimes for directories searched for bb/bbappend files
for dirent in searchdirs:
- self.add_filewatch([[dirent]], dirs=True)
+ self.add_filewatch([(dirent, bb.parse.cached_mtime_noerror(dirent))])
self.parser = CookerParser(self, mcfilelist, total_masked)
self._parsecache_set(True)
@@ -1881,7 +1810,7 @@ class CookerCollectFiles(object):
collectlog.error("no recipe files to build, check your BBPATH and BBFILES?")
bb.event.fire(CookerExit(), eventdata)
- # We need to track where we look so that we can add inotify watches. There
+ # We need to track where we look so that we can know when the cache is invalid. There
# is no nice way to do this, this is horrid. We intercept the os.listdir()
# (or os.scandir() for python 3.6+) calls while we run glob().
origlistdir = os.listdir
diff --git a/poky/bitbake/lib/bb/data.py b/poky/bitbake/lib/bb/data.py
index 3ee8f5e7db..505f42950f 100644
--- a/poky/bitbake/lib/bb/data.py
+++ b/poky/bitbake/lib/bb/data.py
@@ -285,6 +285,7 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
value += "\n_remove of %s" % r
deps |= r2.references
deps = deps | (keys & r2.execs)
+ value = handle_contains(value, r2.contains, exclusions, d)
return value
deps = set()
diff --git a/poky/bitbake/lib/bb/event.py b/poky/bitbake/lib/bb/event.py
index 0d0e0a68aa..f8acacd80d 100644
--- a/poky/bitbake/lib/bb/event.py
+++ b/poky/bitbake/lib/bb/event.py
@@ -857,6 +857,14 @@ class FindSigInfoResult(Event):
Event.__init__(self)
self.result = result
+class GetTaskSignatureResult(Event):
+ """
+ Event to return results from GetTaskSignatures command
+ """
+ def __init__(self, sig):
+ Event.__init__(self)
+ self.sig = sig
+
class ParseError(Event):
"""
Event to indicate parse failed
diff --git a/poky/bitbake/lib/bb/fetch2/__init__.py b/poky/bitbake/lib/bb/fetch2/__init__.py
index 765aedd51d..ffb1a92b26 100644
--- a/poky/bitbake/lib/bb/fetch2/__init__.py
+++ b/poky/bitbake/lib/bb/fetch2/__init__.py
@@ -874,6 +874,7 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
'AWS_SECRET_ACCESS_KEY',
'AWS_DEFAULT_REGION',
'GIT_CACHE_PATH',
+ 'REMOTE_CONTAINERS_IPC',
'SSL_CERT_DIR']
def get_fetcher_environment(d):
diff --git a/poky/bitbake/lib/bb/fetch2/git.py b/poky/bitbake/lib/bb/fetch2/git.py
index e11271b757..4385d0b37a 100644
--- a/poky/bitbake/lib/bb/fetch2/git.py
+++ b/poky/bitbake/lib/bb/fetch2/git.py
@@ -373,20 +373,17 @@ class Git(FetchMethod):
try:
# Since clones can be bare, use --absolute-git-dir instead of --show-toplevel
output = runfetchcmd("LANG=C %s rev-parse --absolute-git-dir" % ud.basecmd, d, workdir=ud.clonedir)
+ toplevel = output.rstrip()
- toplevel = os.path.abspath(output.rstrip())
- abs_clonedir = os.path.abspath(ud.clonedir).rstrip('/')
- # The top level Git directory must either be the clone directory
- # or a child of the clone directory. Any ancestor directory of
- # the clone directory is not valid as the Git directory (and
- # probably belongs to some other unrelated repository), so a
- # clone is required
- if os.path.commonprefix([abs_clonedir, toplevel]) != abs_clonedir:
- logger.warning("Top level directory '%s' doesn't match expected '%s'. Re-cloning", toplevel, ud.clonedir)
+ if not bb.utils.path_is_descendant(toplevel, ud.clonedir):
+ logger.warning("Top level directory '%s' is not a descendant of '%s'. Re-cloning", toplevel, ud.clonedir)
needs_clone = True
except bb.fetch2.FetchError as e:
logger.warning("Unable to get top level for %s (not a git directory?): %s", ud.clonedir, e)
needs_clone = True
+ except FileNotFoundError as e:
+ logger.warning("%s", e)
+ needs_clone = True
if needs_clone:
shutil.rmtree(ud.clonedir)
diff --git a/poky/bitbake/lib/bb/parse/__init__.py b/poky/bitbake/lib/bb/parse/__init__.py
index 4cd82f115b..a4358f1374 100644
--- a/poky/bitbake/lib/bb/parse/__init__.py
+++ b/poky/bitbake/lib/bb/parse/__init__.py
@@ -60,6 +60,14 @@ def cached_mtime_noerror(f):
return 0
return __mtime_cache[f]
+def check_mtime(f, mtime):
+ try:
+ current_mtime = os.stat(f)[stat.ST_MTIME]
+ __mtime_cache[f] = current_mtime
+ except OSError:
+ current_mtime = 0
+ return current_mtime == mtime
+
def update_mtime(f):
try:
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
diff --git a/poky/bitbake/lib/bb/runqueue.py b/poky/bitbake/lib/bb/runqueue.py
index c88d7129ca..56147c50a8 100644
--- a/poky/bitbake/lib/bb/runqueue.py
+++ b/poky/bitbake/lib/bb/runqueue.py
@@ -1324,6 +1324,8 @@ class RunQueue:
if self.cooker.configuration.profile:
magic = "decafbadbad"
fakerootlogs = None
+
+ workerscript = os.path.realpath(os.path.dirname(__file__) + "/../../bin/bitbake-worker")
if fakeroot:
magic = magic + "beef"
mcdata = self.cooker.databuilder.mcdata[mc]
@@ -1332,10 +1334,10 @@ class RunQueue:
env = os.environ.copy()
for key, value in (var.split('=') for var in fakerootenv):
env[key] = value
- worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
+ worker = subprocess.Popen(fakerootcmd + [sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
else:
- worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ worker = subprocess.Popen([sys.executable, workerscript, magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
bb.utils.nonblockingfd(worker.stdout)
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs)
@@ -3159,7 +3161,7 @@ class runQueuePipe():
if pipeout:
pipeout.close()
bb.utils.nonblockingfd(self.input)
- self.queue = b""
+ self.queue = bytearray()
self.d = d
self.rq = rq
self.rqexec = rqexec
@@ -3178,7 +3180,7 @@ class runQueuePipe():
start = len(self.queue)
try:
- self.queue = self.queue + (self.input.read(102400) or b"")
+ self.queue.extend(self.input.read(102400) or b"")
except (OSError, IOError) as e:
if e.errno != errno.EAGAIN:
raise
diff --git a/poky/bitbake/lib/bb/server/process.py b/poky/bitbake/lib/bb/server/process.py
index 40cb99bc97..d495ac6245 100644
--- a/poky/bitbake/lib/bb/server/process.py
+++ b/poky/bitbake/lib/bb/server/process.py
@@ -43,7 +43,8 @@ def currenttime():
def serverlog(msg):
print(str(os.getpid()) + " " + currenttime() + " " + msg)
- sys.stdout.flush()
+ #Seems a flush here triggers filesytem sync like behaviour and long hangs in the server
+ #sys.stdout.flush()
#
# When we have lockfile issues, try and find infomation about which process is
@@ -410,12 +411,6 @@ class ProcessServer():
nextsleep = 0.1
fds = []
- try:
- self.cooker.process_inotify_updates()
- except Exception as exc:
- serverlog("Exception %s in inofify updates broke the idle_thread, exiting" % traceback.format_exc())
- self.quit = True
-
with bb.utils.lock_timeout(self._idlefuncsLock):
items = list(self._idlefuns.items())
@@ -625,7 +620,7 @@ class BitBakeServer(object):
os.set_inheritable(self.bitbake_lock.fileno(), True)
os.set_inheritable(self.readypipein, True)
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
- os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
+ os.execl(sys.executable, sys.executable, serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(int(self.profile)), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface, profile):
diff --git a/poky/bitbake/lib/bb/tests/codeparser.py b/poky/bitbake/lib/bb/tests/codeparser.py
index a64c614b0b..b6f2b77ee3 100644
--- a/poky/bitbake/lib/bb/tests/codeparser.py
+++ b/poky/bitbake/lib/bb/tests/codeparser.py
@@ -436,6 +436,32 @@ esac
self.assertEqual(deps, set(["TESTVAR2"]))
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
+ def test_contains_vardeps_override_operators(self):
+ # Check override operators handle dependencies correctly with the contains functionality
+ expr_plain = 'testval'
+ expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
+ expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
+ expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
+ # Check dependencies
+ self.d.setVar('ANOTHERVAR', expr_plain)
+ self.d.prependVar('ANOTHERVAR', expr_prepend)
+ self.d.appendVar('ANOTHERVAR', expr_append)
+ self.d.setVar('ANOTHERVAR:remove', expr_remove)
+ self.d.setVar('TESTVAR1', 'blah')
+ self.d.setVar('TESTVAR2', 'testval2')
+ self.d.setVar('TESTVAR3', 'no-testval')
+ deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), set(), self.d, self.d)
+ self.assertEqual(sorted(values.splitlines()),
+ sorted([
+ expr_prepend + expr_plain + expr_append,
+ '_remove of ' + expr_remove,
+ 'TESTVAR1{testval1} = Unset',
+ 'TESTVAR2{testval2} = Set',
+ 'TESTVAR3{no-testval} = Set',
+ ]))
+ # Check final value
+ self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
+
#Currently no wildcard support
#def test_vardeps_wildcards(self):
# self.d.setVar("oe_libinstall", "echo test")
diff --git a/poky/bitbake/lib/bb/tinfoil.py b/poky/bitbake/lib/bb/tinfoil.py
index 91fbf1b13e..dcd3910cc4 100644
--- a/poky/bitbake/lib/bb/tinfoil.py
+++ b/poky/bitbake/lib/bb/tinfoil.py
@@ -325,11 +325,11 @@ class Tinfoil:
self.recipes_parsed = False
self.quiet = 0
self.oldhandlers = self.logger.handlers[:]
+ self.localhandlers = []
if setup_logging:
# This is the *client-side* logger, nothing to do with
# logging messages from the server
bb.msg.logger_create('BitBake', output)
- self.localhandlers = []
for handler in self.logger.handlers:
if handler not in self.oldhandlers:
self.localhandlers.append(handler)
@@ -449,6 +449,12 @@ class Tinfoil:
self.run_actions(config_params)
self.recipes_parsed = True
+ def modified_files(self):
+ """
+ Notify the server it needs to revalidate it's caches since the client has modified files
+ """
+ self.run_command("revalidateCaches")
+
def run_command(self, command, *params, handle_events=True):
"""
Run a command on the server (as implemented in bb.command).
diff --git a/poky/bitbake/lib/bb/utils.py b/poky/bitbake/lib/bb/utils.py
index 0624a4f3e9..b401fa5ec7 100644
--- a/poky/bitbake/lib/bb/utils.py
+++ b/poky/bitbake/lib/bb/utils.py
@@ -1828,6 +1828,29 @@ def mkstemp(suffix=None, prefix=None, dir=None, text=False):
prefix = tempfile.gettempprefix() + entropy
return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
+def path_is_descendant(descendant, ancestor):
+ """
+ Returns True if the path `descendant` is a descendant of `ancestor`
+ (including being equivalent to `ancestor` itself). Otherwise returns False.
+ Correctly accounts for symlinks, bind mounts, etc. by using
+ os.path.samestat() to compare paths
+
+ May raise any exception that os.stat() raises
+ """
+
+ ancestor_stat = os.stat(ancestor)
+
+ # Recurse up each directory component of the descendant to see if it is
+ # equivalent to the ancestor
+ check_dir = os.path.abspath(descendant).rstrip("/")
+ while check_dir:
+ check_stat = os.stat(check_dir)
+ if os.path.samestat(check_stat, ancestor_stat):
+ return True
+ check_dir = os.path.dirname(check_dir).rstrip("/")
+
+ return False
+
# If we don't have a timeout of some kind and a process/thread exits badly (for example
# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.