diff options
Diffstat (limited to 'import-layers/yocto-poky/bitbake/lib/bb')
43 files changed, 3504 insertions, 1975 deletions
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/COW.py b/import-layers/yocto-poky/bitbake/lib/bb/COW.py index 36ebbd9d1..bec620809 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/COW.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/COW.py @@ -3,7 +3,7 @@ # # This is a copy on write dictionary and set which abuses classes to try and be nice and fast. # -# Copyright (C) 2006 Tim Amsell +# Copyright (C) 2006 Tim Ansell # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as diff --git a/import-layers/yocto-poky/bitbake/lib/bb/__init__.py b/import-layers/yocto-poky/bitbake/lib/bb/__init__.py index bfe0ca5d8..526883154 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/__init__.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/__init__.py @@ -21,7 +21,7 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -__version__ = "1.34.0" +__version__ = "1.36.0" import sys if sys.version_info < (3, 4, 0): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cache.py b/import-layers/yocto-poky/bitbake/lib/bb/cache.py index e7eeb4f50..86ce0e786 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/cache.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/cache.py @@ -86,9 +86,9 @@ class RecipeInfoCommon(object): class CoreRecipeInfo(RecipeInfoCommon): __slots__ = () - cachefile = "bb_cache.dat" + cachefile = "bb_cache.dat" - def __init__(self, filename, metadata): + def __init__(self, filename, metadata): self.file_depends = metadata.getVar('__depends', False) self.timestamp = bb.parse.cached_mtime(filename) self.variants = self.listvar('__VARIANTS', metadata) + [''] @@ -107,7 +107,7 @@ class CoreRecipeInfo(RecipeInfoCommon): self.pn = self.getvar('PN', metadata) self.packages = self.listvar('PACKAGES', metadata) - if not self.pn in self.packages: + if not self.packages: self.packages.append(self.pn) self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata) @@ -122,7 +122,7 @@ class CoreRecipeInfo(RecipeInfoCommon): self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata) self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata) self.stamp = self.getvar('STAMP', metadata) - self.stampclean = self.getvar('STAMPCLEAN', metadata) + self.stampclean = self.getvar('STAMPCLEAN', metadata) self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata) self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True) self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata) @@ -217,7 +217,7 @@ class CoreRecipeInfo(RecipeInfoCommon): cachedata.packages_dynamic[package].append(fn) # Build hash of runtime depends and recommends - for package in self.packages + [self.pn]: + for package in self.packages: cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package] cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package] @@ -375,8 +375,8 @@ class Cache(NoCache): data = databuilder.data # Pass caches_array information into Cache Constructor - # It will be used later for deciding whether we - # need extra cache file dump/load support + # It will be used later for deciding whether we + # need extra cache file dump/load support self.caches_array = caches_array self.cachedir = data.getVar("CACHE") self.clean = set() @@ -421,7 +421,7 @@ class Cache(NoCache): cachesize += os.fstat(cachefile.fileno()).st_size bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data) - + for cache_class in self.caches_array: cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) with open(cachefile, "rb") as cachefile: @@ -438,8 +438,8 @@ class Cache(NoCache): logger.info('Cache version mismatch, rebuilding...') return elif bitbake_ver != bb.__version__: - logger.info('Bitbake version mismatch, rebuilding...') - return + logger.info('Bitbake version mismatch, rebuilding...') + return # Load the rest of the cache file current_progress = 0 @@ -616,13 +616,13 @@ class Cache(NoCache): a = fl.find(":True") b = fl.find(":False") if ((a < 0) and b) or ((b > 0) and (b < a)): - f = fl[:b+6] - fl = fl[b+7:] + f = fl[:b+6] + fl = fl[b+7:] elif ((b < 0) and a) or ((a > 0) and (a < b)): - f = fl[:a+5] - fl = fl[a+6:] + f = fl[:a+5] + fl = fl[a+6:] else: - break + break fl = fl.strip() if "*" in f: continue @@ -886,4 +886,3 @@ class MultiProcessCache(object): p.dump([data, self.__class__.CACHE_VERSION]) bb.utils.unlockfile(glf) - diff --git a/import-layers/yocto-poky/bitbake/lib/bb/command.py b/import-layers/yocto-poky/bitbake/lib/bb/command.py index a919f58d2..6c966e3db 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/command.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/command.py @@ -50,6 +50,8 @@ class CommandFailed(CommandExit): def __init__(self, message): self.error = message CommandExit.__init__(self, 1) + def __str__(self): + return "Command execution failed: %s" % self.error class CommandError(Exception): pass @@ -76,7 +78,8 @@ class Command: if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'): return None, "Not able to execute not readonly commands in readonly mode" try: - if getattr(command_method, 'needconfig', False): + self.cooker.process_inotify_updates() + if getattr(command_method, 'needconfig', True): self.cooker.updateCacheSync() result = command_method(self, commandline) except CommandError as exc: @@ -96,6 +99,7 @@ class Command: def runAsyncCommand(self): try: + self.cooker.process_inotify_updates() if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown): # updateCache will trigger a shutdown of the parser # and then raise BBHandledException triggering an exit @@ -141,6 +145,9 @@ class Command: self.currentAsyncCommand = None self.cooker.finishcommand() + def reset(self): + self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) + def split_mc_pn(pn): if pn.startswith("multiconfig:"): _, mc, pn = pn.split(":", 2) @@ -233,59 +240,15 @@ class CommandsSync: command.cooker.configuration.postfile = postfiles setPrePostConfFiles.needconfig = False - def getCpuCount(self, command, params): - """ - Get the CPU count on the bitbake server - """ - return bb.utils.cpu_count() - getCpuCount.readonly = True - getCpuCount.needconfig = False - def matchFile(self, command, params): fMatch = params[0] return command.cooker.matchFile(fMatch) matchFile.needconfig = False - def generateNewImage(self, command, params): - image = params[0] - base_image = params[1] - package_queue = params[2] - timestamp = params[3] - description = params[4] - return command.cooker.generateNewImage(image, base_image, - package_queue, timestamp, description) - - def ensureDir(self, command, params): - directory = params[0] - bb.utils.mkdirhier(directory) - ensureDir.needconfig = False - - def setVarFile(self, command, params): - """ - Save a variable in a file; used for saving in a configuration file - """ - var = params[0] - val = params[1] - default_file = params[2] - op = params[3] - command.cooker.modifyConfigurationVar(var, val, default_file, op) - setVarFile.needconfig = False - - def removeVarFile(self, command, params): - """ - Remove a variable declaration from a file - """ - var = params[0] - command.cooker.removeConfigurationVar(var) - removeVarFile.needconfig = False - - def createConfigFile(self, command, params): - """ - Create an extra configuration file - """ - name = params[0] - command.cooker.createConfigFile(name) - createConfigFile.needconfig = False + def getUIHandlerNum(self, command, params): + return bb.event.get_uihandler() + getUIHandlerNum.needconfig = False + getUIHandlerNum.readonly = True def setEventMask(self, command, params): handlerNum = params[0] @@ -323,6 +286,7 @@ class CommandsSync: parseConfiguration.needconfig = False def getLayerPriorities(self, command, params): + command.cooker.parseConfiguration() ret = [] # regex objects cannot be marshalled by xmlrpc for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities: @@ -354,6 +318,38 @@ class CommandsSync: return command.cooker.recipecaches[mc].pkg_pepvpr getRecipeVersions.readonly = True + def getRecipeProvides(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].fn_provides + getRecipeProvides.readonly = True + + def getRecipePackages(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].packages + getRecipePackages.readonly = True + + def getRecipePackagesDynamic(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].packages_dynamic + getRecipePackagesDynamic.readonly = True + + def getRProviders(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].rproviders + getRProviders.readonly = True + def getRuntimeDepends(self, command, params): ret = [] try: @@ -592,11 +588,14 @@ class CommandsAsync: bfile = params[0] task = params[1] if len(params) > 2: - hidewarning = params[2] + internal = params[2] else: - hidewarning = False + internal = False - command.cooker.buildFile(bfile, task, hidewarning) + if internal: + command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True) + else: + command.cooker.buildFile(bfile, task) buildFile.needcache = False def buildTargets(self, command, params): @@ -646,17 +645,6 @@ class CommandsAsync: command.finishAsyncCommand() generateTargetsTree.needcache = True - def findCoreBaseFiles(self, command, params): - """ - Find certain files in COREBASE directory. i.e. Layers - """ - subdir = params[0] - filename = params[1] - - command.cooker.findCoreBaseFiles(subdir, filename) - command.finishAsyncCommand() - findCoreBaseFiles.needcache = False - def findConfigFiles(self, command, params): """ Find config files which provide appropriate values @@ -764,3 +752,14 @@ class CommandsAsync: command.finishAsyncCommand() clientComplete.needcache = False + def findSigInfo(self, command, params): + """ + Find signature info files via the signature generator + """ + pn = params[0] + taskname = params[1] + sigs = params[2] + res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data) + bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data) + command.finishAsyncCommand() + findSigInfo.needcache = False diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cooker.py b/import-layers/yocto-poky/bitbake/lib/bb/cooker.py index 3c9e88cd2..c7fdd7290 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/cooker.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/cooker.py @@ -181,15 +181,15 @@ class BBCooker: self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications) self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \ pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \ - pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO + pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO self.watcher = pyinotify.WatchManager() self.watcher.bbseen = [] self.watcher.bbwatchedfiles = [] self.notifier = pyinotify.Notifier(self.watcher, self.notifications) - # If being called by something like tinfoil, we need to clean cached data + # If being called by something like tinfoil, we need to clean cached data # which may now be invalid - bb.parse.__mtime_cache = {} + bb.parse.clear_cache() bb.parse.BBHandler.cached_statements = {} self.ui_cmdline = None @@ -205,31 +205,11 @@ class BBCooker: self.inotify_modified_files = [] - def _process_inotify_updates(server, notifier_list, abort): - for n in notifier_list: - if n.check_events(timeout=0): - # read notified events and enqeue them - n.read_events() - n.process_events() + def _process_inotify_updates(server, cooker, abort): + cooker.process_inotify_updates() return 1.0 - self.configuration.server_register_idlecallback(_process_inotify_updates, [self.confignotifier, self.notifier]) - - self.baseconfig_valid = True - self.parsecache_valid = False - - # Take a lock so only one copy of bitbake can run against a given build - # directory at a time - if not self.lockBitbake(): - bb.fatal("Only one copy of bitbake should be run against a build directory") - try: - self.lock.seek(0) - self.lock.truncate() - if len(configuration.interface) >= 2: - self.lock.write("%s:%s\n" % (configuration.interface[0], configuration.interface[1])); - self.lock.flush() - except: - pass + self.configuration.server_register_idlecallback(_process_inotify_updates, self) # TOSTOP must not be set or our children will hang when they output try: @@ -253,10 +233,19 @@ class BBCooker: # Let SIGHUP exit as SIGTERM signal.signal(signal.SIGHUP, self.sigterm_exception) + def process_inotify_updates(self): + for n in [self.confignotifier, self.notifier]: + if n.check_events(timeout=0): + # read notified events and enqeue them + n.read_events() + n.process_events() + def config_notifications(self, event): if event.maskname == "IN_Q_OVERFLOW": bb.warn("inotify event queue overflowed, invalidating caches.") + self.parsecache_valid = False self.baseconfig_valid = False + bb.parse.clear_cache() return if not event.pathname in self.configwatcher.bbwatchedfiles: return @@ -268,6 +257,10 @@ class BBCooker: if event.maskname == "IN_Q_OVERFLOW": bb.warn("inotify event queue overflowed, invalidating caches.") self.parsecache_valid = False + bb.parse.clear_cache() + return + if event.pathname.endswith("bitbake-cookerdaemon.log") \ + or event.pathname.endswith("bitbake.lock"): return if not event.pathname in self.inotify_modified_files: self.inotify_modified_files.append(event.pathname) @@ -288,7 +281,7 @@ class BBCooker: watchtarget = None while True: # We try and add watches for files that don't exist but if they did, would influence - # the parser. The parent directory of these files may not exist, in which case we need + # the parser. The parent directory of these files may not exist, in which case we need # to watch any parent that does exist for changes. try: watcher.add_watch(f, self.watchmask, quiet=False) @@ -382,6 +375,15 @@ class BBCooker: self.data.renameVar("__depends", "__base_depends") self.add_filewatch(self.data.getVar("__base_depends", False), self.configwatcher) + self.baseconfig_valid = True + self.parsecache_valid = False + + def handlePRServ(self): + # Setup a PR Server based on the new configuration + try: + self.prhost = prserv.serv.auto_start(self.data) + except prserv.serv.PRServiceConfigError as e: + bb.fatal("Unable to start PR Server, exitting") def enableDataTracking(self): self.configuration.tracking = True @@ -393,138 +395,6 @@ class BBCooker: if hasattr(self, "data"): self.data.disableTracking() - def modifyConfigurationVar(self, var, val, default_file, op): - if op == "append": - self.appendConfigurationVar(var, val, default_file) - elif op == "set": - self.saveConfigurationVar(var, val, default_file, "=") - elif op == "earlyAssign": - self.saveConfigurationVar(var, val, default_file, "?=") - - - def appendConfigurationVar(self, var, val, default_file): - #add append var operation to the end of default_file - default_file = bb.cookerdata.findConfigFile(default_file, self.data) - - total = "#added by hob" - total += "\n%s += \"%s\"\n" % (var, val) - - with open(default_file, 'a') as f: - f.write(total) - - #add to history - loginfo = {"op":"append", "file":default_file, "line":total.count("\n")} - self.data.appendVar(var, val, **loginfo) - - def saveConfigurationVar(self, var, val, default_file, op): - - replaced = False - #do not save if nothing changed - if str(val) == self.data.getVar(var, False): - return - - conf_files = self.data.varhistory.get_variable_files(var) - - #format the value when it is a list - if isinstance(val, list): - listval = "" - for value in val: - listval += "%s " % value - val = listval - - topdir = self.data.getVar("TOPDIR", False) - - #comment or replace operations made on var - for conf_file in conf_files: - if topdir in conf_file: - with open(conf_file, 'r') as f: - contents = f.readlines() - - lines = self.data.varhistory.get_variable_lines(var, conf_file) - for line in lines: - total = "" - i = 0 - for c in contents: - total += c - i = i + 1 - if i==int(line): - end_index = len(total) - index = total.rfind(var, 0, end_index) - - begin_line = total.count("\n",0,index) - end_line = int(line) - - #check if the variable was saved before in the same way - #if true it replace the place where the variable was declared - #else it comments it - if contents[begin_line-1]== "#added by hob\n": - contents[begin_line] = "%s %s \"%s\"\n" % (var, op, val) - replaced = True - else: - for ii in range(begin_line, end_line): - contents[ii] = "#" + contents[ii] - - with open(conf_file, 'w') as f: - f.writelines(contents) - - if replaced == False: - #remove var from history - self.data.varhistory.del_var_history(var) - - #add var to the end of default_file - default_file = bb.cookerdata.findConfigFile(default_file, self.data) - - #add the variable on a single line, to be easy to replace the second time - total = "\n#added by hob" - total += "\n%s %s \"%s\"\n" % (var, op, val) - - with open(default_file, 'a') as f: - f.write(total) - - #add to history - loginfo = {"op":"set", "file":default_file, "line":total.count("\n")} - self.data.setVar(var, val, **loginfo) - - def removeConfigurationVar(self, var): - conf_files = self.data.varhistory.get_variable_files(var) - topdir = self.data.getVar("TOPDIR", False) - - for conf_file in conf_files: - if topdir in conf_file: - with open(conf_file, 'r') as f: - contents = f.readlines() - - lines = self.data.varhistory.get_variable_lines(var, conf_file) - for line in lines: - total = "" - i = 0 - for c in contents: - total += c - i = i + 1 - if i==int(line): - end_index = len(total) - index = total.rfind(var, 0, end_index) - - begin_line = total.count("\n",0,index) - - #check if the variable was saved before in the same way - if contents[begin_line-1]== "#added by hob\n": - contents[begin_line-1] = contents[begin_line] = "\n" - else: - contents[begin_line] = "\n" - #remove var from history - self.data.varhistory.del_var_history(var, conf_file, line) - #remove variable - self.data.delVar(var) - - with open(conf_file, 'w') as f: - f.writelines(contents) - - def createConfigFile(self, name): - path = os.getcwd() - confpath = os.path.join(path, "conf", name) - open(confpath, 'w').close() - def parseConfiguration(self): # Set log file verbosity verboselogs = bb.utils.to_boolean(self.data.getVar("BB_VERBOSE_LOGS", False)) @@ -547,21 +417,27 @@ class BBCooker: self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS")) + self.parsecache_valid = False + def updateConfigOpts(self, options, environment, cmdline): self.ui_cmdline = cmdline clean = True for o in options: if o in ['prefile', 'postfile']: + # Only these options may require a reparse + try: + if getattr(self.configuration, o) == options[o]: + # Value is the same, no need to mark dirty + continue + except AttributeError: + pass + logger.debug(1, "Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) + print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) clean = False - server_val = getattr(self.configuration, "%s_server" % o) - if not options[o] and server_val: - # restore value provided on server start - setattr(self.configuration, o, server_val) - continue setattr(self.configuration, o, options[o]) for k in bb.utils.approved_variables(): if k in environment and k not in self.configuration.env: - logger.debug(1, "Updating environment variable %s to %s" % (k, environment[k])) + logger.debug(1, "Updating new environment variable %s to %s" % (k, environment[k])) self.configuration.env[k] = environment[k] clean = False if k in self.configuration.env and k not in environment: @@ -569,14 +445,13 @@ class BBCooker: del self.configuration.env[k] clean = False if k not in self.configuration.env and k not in environment: - continue + continue if environment[k] != self.configuration.env[k]: - logger.debug(1, "Updating environment variable %s to %s" % (k, environment[k])) + logger.debug(1, "Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k])) self.configuration.env[k] = environment[k] clean = False if not clean: logger.debug(1, "Base environment change, triggering reparse") - self.baseconfig_valid = False self.reset() def runCommands(self, server, data, abort): @@ -616,6 +491,12 @@ class BBCooker: if not pkgs_to_build: pkgs_to_build = [] + orig_tracking = self.configuration.tracking + if not orig_tracking: + self.enableDataTracking() + self.reset() + + if buildfile: # Parse the configuration here. We need to do it explicitly here since # this showEnvironment() code path doesn't use the cache @@ -660,6 +541,9 @@ class BBCooker: if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False): logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False)) + if not orig_tracking: + self.disableDataTracking() + self.reset() def buildTaskData(self, pkgs_to_build, task, abort, allowincomplete=False): """ @@ -817,12 +701,12 @@ class BBCooker: depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] + dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid)) + if not dotname in depend_tree["tdepends"]: + depend_tree["tdepends"][dotname] = [] for dep in rq.rqdata.runtaskentries[tid].depends: (depmc, depfn, deptaskname, deptaskfn) = bb.runqueue.split_tid_mcfn(dep) deppn = self.recipecaches[mc].pkg_fn[deptaskfn] - dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid)) - if not dotname in depend_tree["tdepends"]: - depend_tree["tdepends"][dotname] = [] depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep))) if taskfn not in seen_fns: seen_fns.append(taskfn) @@ -913,13 +797,13 @@ class BBCooker: seen_fns.append(taskfn) depend_tree["depends"][pn] = [] - for item in taskdata[mc].depids[taskfn]: + for dep in taskdata[mc].depids[taskfn]: pn_provider = "" if dep in taskdata[mc].build_targets and taskdata[mc].build_targets[dep]: fn_provider = taskdata[mc].build_targets[dep][0] pn_provider = self.recipecaches[mc].pkg_fn[fn_provider] else: - pn_provider = item + pn_provider = dep pn_provider = self.add_mc_prefix(mc, pn_provider) depend_tree["depends"][pn].append(pn_provider) @@ -1046,18 +930,6 @@ class BBCooker: providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecaches[mc].preferred[providee]) self.recipecaches[mc].preferred[providee] = provider - def findCoreBaseFiles(self, subdir, configfile): - corebase = self.data.getVar('COREBASE') or "" - paths = [] - for root, dirs, files in os.walk(corebase + '/' + subdir): - for d in dirs: - configfilepath = os.path.join(root, d, configfile) - if os.path.exists(configfilepath): - paths.append(os.path.join(root, d)) - - if paths: - bb.event.fire(bb.event.CoreBaseFilesFound(paths), self.data) - def findConfigFilePath(self, configfile): """ Find the location on disk of configfile and if it exists and was parsed by BitBake @@ -1314,12 +1186,26 @@ class BBCooker: """ Setup any variables needed before starting a build """ - t = time.gmtime() - if not self.data.getVar("BUILDNAME", False): - self.data.setVar("BUILDNAME", "${DATE}${TIME}") - self.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', t)) - self.data.setVar("DATE", time.strftime('%Y%m%d', t)) - self.data.setVar("TIME", time.strftime('%H%M%S', t)) + t = time.gmtime() + for mc in self.databuilder.mcdata: + ds = self.databuilder.mcdata[mc] + if not ds.getVar("BUILDNAME", False): + ds.setVar("BUILDNAME", "${DATE}${TIME}") + ds.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', t)) + ds.setVar("DATE", time.strftime('%Y%m%d', t)) + ds.setVar("TIME", time.strftime('%H%M%S', t)) + + def reset_mtime_caches(self): + """ + Reset mtime caches - this is particularly important when memory resident as something + which is cached is not unlikely to have changed since the last invocation (e.g. a + file associated with a recipe might have been modified by the user). + """ + build.reset_cache() + bb.fetch._checksum_cache.mtime_cache.clear() + siggen_cache = getattr(bb.parse.siggen, 'checksum_cache', None) + if siggen_cache: + bb.parse.siggen.checksum_cache.mtime_cache.clear() def matchFiles(self, bf): """ @@ -1360,16 +1246,22 @@ class BBCooker: raise NoSpecificMatch return matches[0] - def buildFile(self, buildfile, task, hidewarning=False): + def buildFile(self, buildfile, task): """ Build the file matching regexp buildfile """ bb.event.fire(bb.event.BuildInit(), self.data) - if not hidewarning: - # Too many people use -b because they think it's how you normally - # specify a target to be built, so show a warning - bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.") + # Too many people use -b because they think it's how you normally + # specify a target to be built, so show a warning + bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.") + + self.buildFileInternal(buildfile, task) + + def buildFileInternal(self, buildfile, task, fireevents=True, quietlog=False): + """ + Build the file matching regexp buildfile + """ # Parse the configuration here. We need to do it explicitly here since # buildFile() doesn't use the cache @@ -1385,6 +1277,7 @@ class BBCooker: fn = self.matchFile(fn) self.buildSetVars() + self.reset_mtime_caches() bb_cache = bb.cache.Cache(self.databuilder, self.data_hash, self.caches_array) @@ -1411,8 +1304,8 @@ class BBCooker: # Remove external dependencies self.recipecaches[mc].task_deps[fn]['depends'] = {} self.recipecaches[mc].deps[fn] = [] - self.recipecaches[mc].rundeps[fn] = [] - self.recipecaches[mc].runrecs[fn] = [] + self.recipecaches[mc].rundeps[fn] = defaultdict(list) + self.recipecaches[mc].runrecs[fn] = defaultdict(list) # Invalidate task for target if force mode active if self.configuration.force: @@ -1422,10 +1315,15 @@ class BBCooker: # Setup taskdata structure taskdata = {} taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort) - taskdata[mc].add_provider(self.data, self.recipecaches[mc], item) + taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item) - buildname = self.data.getVar("BUILDNAME") - bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.data) + if quietlog: + rqloglevel = bb.runqueue.logger.getEffectiveLevel() + bb.runqueue.logger.setLevel(logging.WARNING) + + buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME") + if fireevents: + bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc]) # Execute the runqueue runlist = [[mc, item, task, fn]] @@ -1452,11 +1350,20 @@ class BBCooker: retval = False except SystemExit as exc: self.command.finishAsyncCommand(str(exc)) + if quietlog: + bb.runqueue.logger.setLevel(rqloglevel) return False if not retval: - bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.data) + if fireevents: + bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc]) self.command.finishAsyncCommand(msg) + # We trashed self.recipecaches above + self.parsecache_valid = False + self.configuration.limited_deps = False + bb.parse.siggen.reset(self.data) + if quietlog: + bb.runqueue.logger.setLevel(rqloglevel) return False if retval is True: return True @@ -1491,14 +1398,17 @@ class BBCooker: return False if not retval: - bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.data) - self.command.finishAsyncCommand(msg) + try: + for mc in self.multiconfigs: + bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc]) + finally: + self.command.finishAsyncCommand(msg) return False if retval is True: return True return retval - build.reset_cache() + self.reset_mtime_caches() self.buildSetVars() # If we are told to do the None task then query the default task @@ -1523,7 +1433,8 @@ class BBCooker: ntargets.append("multiconfig:%s:%s:%s" % (target[0], target[1], target[2])) ntargets.append("%s:%s" % (target[1], target[2])) - bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.data) + for mc in self.multiconfigs: + bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc]) rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) if 'universe' in targets: @@ -1556,55 +1467,6 @@ class BBCooker: return dump - def generateNewImage(self, image, base_image, package_queue, timestamp, description): - ''' - Create a new image with a "require"/"inherit" base_image statement - ''' - if timestamp: - image_name = os.path.splitext(image)[0] - timestr = time.strftime("-%Y%m%d-%H%M%S") - dest = image_name + str(timestr) + ".bb" - else: - if not image.endswith(".bb"): - dest = image + ".bb" - else: - dest = image - - basename = False - if base_image: - with open(base_image, 'r') as f: - require_line = f.readline() - p = re.compile("IMAGE_BASENAME *=") - for line in f: - if p.search(line): - basename = True - - with open(dest, "w") as imagefile: - if base_image is None: - imagefile.write("inherit core-image\n") - else: - topdir = self.data.getVar("TOPDIR", False) - if topdir in base_image: - base_image = require_line.split()[1] - imagefile.write("require " + base_image + "\n") - image_install = "IMAGE_INSTALL = \"" - for package in package_queue: - image_install += str(package) + " " - image_install += "\"\n" - imagefile.write(image_install) - - description_var = "DESCRIPTION = \"" + description + "\"\n" - imagefile.write(description_var) - - if basename: - # If this is overwritten in a inherited image, reset it to default - image_basename = "IMAGE_BASENAME = \"${PN}\"\n" - imagefile.write(image_basename) - - self.state = state.initial - if timestamp: - return timestr - def updateCacheSync(self): if self.state == state.running: return @@ -1619,8 +1481,7 @@ class BBCooker: if not self.baseconfig_valid: logger.debug(1, "Reloading base configuration data") self.initConfigurationData() - self.baseconfig_valid = True - self.parsecache_valid = False + self.handlePRServ() # This is called for all async commands when self.state != running def updateCache(self): @@ -1636,6 +1497,7 @@ class BBCooker: self.updateCacheSync() if self.state != state.parsing and not self.parsecache_valid: + bb.parse.siggen.reset(self.data) self.parseConfiguration () if CookerFeatures.SEND_SANITYEVENTS in self.featureset: for mc in self.multiconfigs: @@ -1723,46 +1585,14 @@ class BBCooker: return pkgs_to_build def pre_serve(self): - # Empty the environment. The environment will be populated as - # necessary from the data store. - #bb.utils.empty_environment() - try: - self.prhost = prserv.serv.auto_start(self.data) - except prserv.serv.PRServiceConfigError: - bb.event.fire(CookerExit(), self.data) - self.state = state.error + # We now are in our own process so we can call this here. + # PRServ exits if its parent process exits + self.handlePRServ() return def post_serve(self): - prserv.serv.auto_shutdown(self.data) + prserv.serv.auto_shutdown() bb.event.fire(CookerExit(), self.data) - lockfile = self.lock.name - self.lock.close() - self.lock = None - - while not self.lock: - with bb.utils.timeout(3): - self.lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True) - if not self.lock: - # Some systems may not have lsof available - procs = None - try: - procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT) - except OSError as e: - if e.errno != errno.ENOENT: - raise - if procs is None: - # Fall back to fuser if lsof is unavailable - try: - procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock" - if procs: - msg += ":\n%s" % str(procs) - print(msg) def shutdown(self, force = False): @@ -1784,46 +1614,12 @@ class BBCooker: def clientComplete(self): """Called when the client is done using the server""" - if self.configuration.server_only: - self.finishcommand() - else: - self.shutdown(True) - - def lockBitbake(self): - if not hasattr(self, 'lock'): - self.lock = None - if self.data: - lockfile = self.data.expand("${TOPDIR}/bitbake.lock") - if lockfile: - self.lock = bb.utils.lockfile(lockfile, False, False) - return self.lock - - def unlockBitbake(self): - if hasattr(self, 'lock') and self.lock: - bb.utils.unlockfile(self.lock) - -def server_main(cooker, func, *args): - cooker.pre_serve() - - if cooker.configuration.profile: - try: - import cProfile as profile - except: - import profile - prof = profile.Profile() - - ret = profile.Profile.runcall(prof, func, *args) - - prof.dump_stats("profile.log") - bb.utils.process_profilelog("profile.log") - print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed") - - else: - ret = func(*args) - - cooker.post_serve() + self.finishcommand() + self.extraconfigdata = {} + self.command.reset() + self.databuilder.reset() + self.data = self.databuilder.data - return ret class CookerExit(bb.event.Event): """ @@ -1890,15 +1686,23 @@ class CookerCollectFiles(object): # We need to track where we look so that we can add inotify watches. There # is no nice way to do this, this is horrid. We intercept the os.listdir() - # calls while we run glob(). + # (or os.scandir() for python 3.6+) calls while we run glob(). origlistdir = os.listdir + if hasattr(os, 'scandir'): + origscandir = os.scandir searchdirs = [] def ourlistdir(d): searchdirs.append(d) return origlistdir(d) + def ourscandir(d): + searchdirs.append(d) + return origscandir(d) + os.listdir = ourlistdir + if hasattr(os, 'scandir'): + os.scandir = ourscandir try: # Can't use set here as order is important newfiles = [] @@ -1918,6 +1722,8 @@ class CookerCollectFiles(object): newfiles.append(g) finally: os.listdir = origlistdir + if hasattr(os, 'scandir'): + os.scandir = origscandir bbmask = config.getVar('BBMASK') diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py b/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py index e408a35e1..fab47c75f 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py @@ -41,10 +41,6 @@ class ConfigParameters(object): self.options.pkgs_to_build = targets or [] - self.options.tracking = False - if hasattr(self.options, "show_environment") and self.options.show_environment: - self.options.tracking = True - for key, val in self.options.__dict__.items(): setattr(self, key, val) @@ -73,15 +69,15 @@ class ConfigParameters(object): def updateToServer(self, server, environment): options = {} - for o in ["abort", "tryaltconfigs", "force", "invalidate_stamp", - "verbose", "debug", "dry_run", "dump_signatures", + for o in ["abort", "force", "invalidate_stamp", + "verbose", "debug", "dry_run", "dump_signatures", "debug_domains", "extra_assume_provided", "profile", - "prefile", "postfile"]: + "prefile", "postfile", "server_timeout"]: options[o] = getattr(self.options, o) ret, error = server.runCommand(["updateConfig", options, environment, sys.argv]) if error: - raise Exception("Unable to update the server configuration with local parameters: %s" % error) + raise Exception("Unable to update the server configuration with local parameters: %s" % error) def parseActions(self): # Parse any commandline into actions @@ -131,8 +127,6 @@ class CookerConfiguration(object): self.extra_assume_provided = [] self.prefile = [] self.postfile = [] - self.prefile_server = [] - self.postfile_server = [] self.debug = 0 self.cmd = None self.abort = True @@ -144,7 +138,8 @@ class CookerConfiguration(object): self.dump_signatures = [] self.dry_run = False self.tracking = False - self.interface = [] + self.xmlrpcinterface = [] + self.server_timeout = None self.writeeventlog = False self.server_only = False self.limited_deps = False @@ -157,7 +152,6 @@ class CookerConfiguration(object): if key in parameters.options.__dict__: setattr(self, key, parameters.options.__dict__[key]) self.env = parameters.environment.copy() - self.tracking = parameters.tracking def setServerRegIdleCallback(self, srcb): self.server_register_idlecallback = srcb @@ -173,7 +167,7 @@ class CookerConfiguration(object): def __setstate__(self,state): for k in state: - setattr(self, k, state[k]) + setattr(self, k, state[k]) def catch_parse_error(func): @@ -230,6 +224,27 @@ def findConfigFile(configfile, data): return None +# +# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working +# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH. +# + +def findTopdir(): + d = bb.data.init() + bbpath = None + if 'BBPATH' in os.environ: + bbpath = os.environ['BBPATH'] + d.setVar('BBPATH', bbpath) + + layerconf = findConfigFile("bblayers.conf", d) + if layerconf: + return os.path.dirname(os.path.dirname(layerconf)) + if bbpath: + bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf") + if bitbakeconf: + return os.path.dirname(os.path.dirname(bitbakeconf)) + return None + class CookerDataBuilder(object): def __init__(self, cookercfg, worker = False): @@ -255,7 +270,7 @@ class CookerDataBuilder(object): filtered_keys = bb.utils.approved_variables() bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys) self.basedata.setVar("BB_ORIGENV", self.savedenv) - + if worker: self.basedata.setVar("BB_WORKERCONTEXT", "1") @@ -294,6 +309,8 @@ class CookerDataBuilder(object): mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config) bb.event.fire(bb.event.ConfigParsed(), mcdata) self.mcdata[config] = mcdata + if multiconfig: + bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data) except (SyntaxError, bb.BBHandledException): raise bb.BBHandledException @@ -304,6 +321,18 @@ class CookerDataBuilder(object): logger.exception("Error parsing configuration files") raise bb.BBHandledException + # Create a copy so we can reset at a later date when UIs disconnect + self.origdata = self.data + self.data = bb.data.createCopy(self.origdata) + self.mcdata[''] = self.data + + def reset(self): + # We may not have run parseBaseConfiguration() yet + if not hasattr(self, 'origdata'): + return + self.data = bb.data.createCopy(self.origdata) + self.mcdata[''] = self.data + def _findLayerConf(self, data): return findConfigFile("bblayers.conf", data) @@ -346,6 +375,27 @@ class CookerDataBuilder(object): data.delVar('LAYERDIR_RE') data.delVar('LAYERDIR') + bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split() + collections = (data.getVar('BBFILE_COLLECTIONS') or "").split() + invalid = [] + for entry in bbfiles_dynamic: + parts = entry.split(":", 1) + if len(parts) != 2: + invalid.append(entry) + continue + l, f = parts + if l in collections: + data.appendVar("BBFILES", " " + f) + if invalid: + bb.fatal("BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid)) + + layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split()) + for c in collections: + compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split()) + if compat and not (compat & layerseries): + bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)" + % (c, " ".join(layerseries), " ".join(compat))) + if not data.getVar("BBPATH"): msg = "The BBPATH variable is not set" if not layerconf: diff --git a/import-layers/yocto-poky/bitbake/lib/bb/daemonize.py b/import-layers/yocto-poky/bitbake/lib/bb/daemonize.py index ab4a95462..8300d1d0f 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/daemonize.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/daemonize.py @@ -1,48 +1,14 @@ """ Python Daemonizing helper -Configurable daemon behaviors: - - 1.) The current working directory set to the "/" directory. - 2.) The current file creation mode mask set to 0. - 3.) Close all open files (1024). - 4.) Redirect standard I/O streams to "/dev/null". - -A failed call to fork() now raises an exception. - -References: - 1) Advanced Programming in the Unix Environment: W. Richard Stevens - http://www.apuebook.com/apue3e.html - 2) The Linux Programming Interface: Michael Kerrisk - http://man7.org/tlpi/index.html - 3) Unix Programming Frequently Asked Questions: - http://www.faqs.org/faqs/unix-faq/programmer/faq/ - -Modified to allow a function to be daemonized and return for -bitbake use by Richard Purdie +Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified +to allow a function to be daemonized and return for bitbake use by Richard Purdie """ -__author__ = "Chad J. Schroeder" -__copyright__ = "Copyright (C) 2005 Chad J. Schroeder" -__version__ = "0.2" - -# Standard Python modules. -import os # Miscellaneous OS interfaces. -import sys # System-specific parameters and functions. - -# Default daemon parameters. -# File mode creation mask of the daemon. -# For BitBake's children, we do want to inherit the parent umask. -UMASK = None - -# Default maximum for the number of available file descriptors. -MAXFD = 1024 - -# The standard I/O file descriptors are redirected to /dev/null by default. -if (hasattr(os, "devnull")): - REDIRECT_TO = os.devnull -else: - REDIRECT_TO = "/dev/null" +import os +import sys +import io +import traceback def createDaemon(function, logfile): """ @@ -65,36 +31,6 @@ def createDaemon(function, logfile): # leader of the new process group, we call os.setsid(). The process is # also guaranteed not to have a controlling terminal. os.setsid() - - # Is ignoring SIGHUP necessary? - # - # It's often suggested that the SIGHUP signal should be ignored before - # the second fork to avoid premature termination of the process. The - # reason is that when the first child terminates, all processes, e.g. - # the second child, in the orphaned group will be sent a SIGHUP. - # - # "However, as part of the session management system, there are exactly - # two cases where SIGHUP is sent on the death of a process: - # - # 1) When the process that dies is the session leader of a session that - # is attached to a terminal device, SIGHUP is sent to all processes - # in the foreground process group of that terminal device. - # 2) When the death of a process causes a process group to become - # orphaned, and one or more processes in the orphaned group are - # stopped, then SIGHUP and SIGCONT are sent to all members of the - # orphaned group." [2] - # - # The first case can be ignored since the child is guaranteed not to have - # a controlling terminal. The second case isn't so easy to dismiss. - # The process group is orphaned when the first child terminates and - # POSIX.1 requires that every STOPPED process in an orphaned process - # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the - # second child is not STOPPED though, we can safely forego ignoring the - # SIGHUP signal. In any case, there are no ill-effects if it is ignored. - # - # import signal # Set handlers for asynchronous events. - # signal.signal(signal.SIGHUP, signal.SIG_IGN) - try: # Fork a second child and exit immediately to prevent zombies. This # causes the second child process to be orphaned, making the init @@ -108,86 +44,39 @@ def createDaemon(function, logfile): except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) - if (pid == 0): # The second child. - # We probably don't want the file mode creation mask inherited from - # the parent, so we give the child complete control over permissions. - if UMASK is not None: - os.umask(UMASK) - else: + if (pid != 0): # Parent (the first child) of the second child. + # exit() or _exit()? + # _exit is like exit(), but it doesn't call any functions registered + # with atexit (and on_exit) or any registered signal handlers. It also + # closes any open file descriptors. Using exit() may cause all stdio + # streams to be flushed twice and any temporary files may be unexpectedly + # removed. It's therefore recommended that child branches of a fork() + # and the parent branch(es) of a daemon use _exit(). os._exit(0) else: - # exit() or _exit()? - # _exit is like exit(), but it doesn't call any functions registered - # with atexit (and on_exit) or any registered signal handlers. It also - # closes any open file descriptors. Using exit() may cause all stdio - # streams to be flushed twice and any temporary files may be unexpectedly - # removed. It's therefore recommended that child branches of a fork() - # and the parent branch(es) of a daemon use _exit(). + os.waitpid(pid, 0) return - # Close all open file descriptors. This prevents the child from keeping - # open any file descriptors inherited from the parent. There is a variety - # of methods to accomplish this task. Three are listed below. - # - # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum - # number of open file descriptors to close. If it doesn't exist, use - # the default value (configurable). - # - # try: - # maxfd = os.sysconf("SC_OPEN_MAX") - # except (AttributeError, ValueError): - # maxfd = MAXFD - # - # OR - # - # if (os.sysconf_names.has_key("SC_OPEN_MAX")): - # maxfd = os.sysconf("SC_OPEN_MAX") - # else: - # maxfd = MAXFD - # - # OR - # - # Use the getrlimit method to retrieve the maximum file descriptor number - # that can be opened by this process. If there is no limit on the - # resource, use the default value. - # - import resource # Resource usage information. - maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] - if (maxfd == resource.RLIM_INFINITY): - maxfd = MAXFD - - # Iterate through and close all file descriptors. -# for fd in range(0, maxfd): -# try: -# os.close(fd) -# except OSError: # ERROR, fd wasn't open to begin with (ignored) -# pass - - # Redirect the standard I/O file descriptors to the specified file. Since - # the daemon has no controlling terminal, most daemons redirect stdin, - # stdout, and stderr to /dev/null. This is done to prevent side-effects - # from reads and writes to the standard I/O file descriptors. - - # This call to open is guaranteed to return the lowest file descriptor, - # which will be 0 (stdin), since it was closed above. -# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) - - # Duplicate standard input to standard output and standard error. -# os.dup2(0, 1) # standard output (1) -# os.dup2(0, 2) # standard error (2) - + # The second child. + # Replace standard fds with our own si = open('/dev/null', 'r') - so = open(logfile, 'w') - se = so - - - # Replace those fds with our own os.dup2(si.fileno(), sys.stdin.fileno()) - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(se.fileno(), sys.stderr.fileno()) - function() + try: + so = open(logfile, 'a+') + se = so + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + except io.UnsupportedOperation: + sys.stdout = open(logfile, 'a+') + sys.stderr = sys.stdout - os._exit(0) + try: + function() + except Exception as e: + traceback.print_exc() + finally: + bb.event.print_ui_queue() + os._exit(0) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/data.py b/import-layers/yocto-poky/bitbake/lib/bb/data.py index 134afaacc..80a7879cb 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/data.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/data.py @@ -290,7 +290,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d): return deps, value varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {} vardeps = varflags.get("vardeps") - value = d.getVar(key, False) + value = d.getVarFlag(key, "_content", False) def handle_contains(value, contains, d): newvalue = "" diff --git a/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py b/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py index 7dc1c6870..7b09af5cf 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py @@ -39,7 +39,7 @@ from bb.COW import COWDictBase logger = logging.getLogger("BitBake.Data") __setvar_keyword__ = ["_append", "_prepend", "_remove"] -__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>.*))?$') +__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$') __expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}") __expand_python_regexp__ = re.compile(r"\${@.+?}") diff --git a/import-layers/yocto-poky/bitbake/lib/bb/event.py b/import-layers/yocto-poky/bitbake/lib/bb/event.py index 6d8493b17..52072b580 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/event.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/event.py @@ -149,23 +149,34 @@ def print_ui_queue(): # First check to see if we have any proper messages msgprint = False + msgerrs = False + + # Should we print to stderr? + for event in ui_queue[:]: + if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING: + msgerrs = True + break + + if msgerrs: + logger.addHandler(stderr) + else: + logger.addHandler(stdout) + for event in ui_queue[:]: if isinstance(event, logging.LogRecord): if event.levelno > logging.DEBUG: - if event.levelno >= logging.WARNING: - logger.addHandler(stderr) - else: - logger.addHandler(stdout) logger.handle(event) msgprint = True - if msgprint: - return # Nope, so just print all of the messages we have (including debug messages) - logger.addHandler(stdout) - for event in ui_queue[:]: - if isinstance(event, logging.LogRecord): - logger.handle(event) + if not msgprint: + for event in ui_queue[:]: + if isinstance(event, logging.LogRecord): + logger.handle(event) + if msgerrs: + logger.removeHandler(stderr) + else: + logger.removeHandler(stdout) def fire_ui_handlers(event, d): global _thread_lock @@ -212,6 +223,12 @@ def fire(event, d): if worker_fire: worker_fire(event, d) else: + # If messages have been queued up, clear the queue + global _uiready, ui_queue + if _uiready and ui_queue: + for queue_event in ui_queue: + fire_ui_handlers(queue_event, d) + ui_queue = [] fire_ui_handlers(event, d) def fire_from_worker(event, d): @@ -264,6 +281,11 @@ def register(name, handler, mask=None, filename=None, lineno=None): def remove(name, handler): """Remove an Event handler""" _handlers.pop(name) + if name in _catchall_handlers: + _catchall_handlers.pop(name) + for event in _event_handler_map.keys(): + if name in _event_handler_map[event]: + _event_handler_map[event].pop(name) def get_handlers(): return _handlers @@ -277,20 +299,28 @@ def set_eventfilter(func): _eventfilter = func def register_UIHhandler(handler, mainui=False): - if mainui: - global _uiready - _uiready = True bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1 _ui_handlers[_ui_handler_seq] = handler level, debug_domains = bb.msg.constructLogOptions() _ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains) + if mainui: + global _uiready + _uiready = _ui_handler_seq return _ui_handler_seq -def unregister_UIHhandler(handlerNum): +def unregister_UIHhandler(handlerNum, mainui=False): + if mainui: + global _uiready + _uiready = False if handlerNum in _ui_handlers: del _ui_handlers[handlerNum] return +def get_uihandler(): + if _uiready is False: + return None + return _uiready + # Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC class UIEventFilter(object): def __init__(self, level, debug_domains): @@ -353,6 +383,12 @@ class OperationProgress(Event): class ConfigParsed(Event): """Configuration Parsing Complete""" +class MultiConfigParsed(Event): + """Multi-Config Parsing Complete""" + def __init__(self, mcdata): + self.mcdata = mcdata + Event.__init__(self) + class RecipeEvent(Event): def __init__(self, fn): self.fn = fn @@ -496,6 +532,28 @@ class NoProvider(Event): def isRuntime(self): return self._runtime + def __str__(self): + msg = '' + if self._runtime: + r = "R" + else: + r = "" + + extra = '' + if not self._reasons: + if self._close_matches: + extra = ". Close matches:\n %s" % '\n '.join(self._close_matches) + + if self._dependees: + msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra) + else: + msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra) + if self._reasons: + for reason in self._reasons: + msg += '\n' + reason + return msg + + class MultipleProviders(Event): """Multiple Providers""" @@ -523,6 +581,16 @@ class MultipleProviders(Event): """ return self._candidates + def __str__(self): + msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "", + self._item, + ", ".join(self._candidates)) + rtime = "" + if self._is_runtime: + rtime = "R" + msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item) + return msg + class ParseStarted(OperationStarted): """Recipe parsing for the runqueue has begun""" def __init__(self, total): @@ -616,14 +684,6 @@ class FilesMatchingFound(Event): self._pattern = pattern self._matches = matches -class CoreBaseFilesFound(Event): - """ - Event when a list of appropriate config files has been generated - """ - def __init__(self, paths): - Event.__init__(self) - self._paths = paths - class ConfigFilesFound(Event): """ Event when a list of appropriate config files has been generated @@ -694,19 +754,6 @@ class LogHandler(logging.Handler): record.taskpid = worker_pid return True -class RequestPackageInfo(Event): - """ - Event to request package information - """ - -class PackageInfo(Event): - """ - Package information for GUI - """ - def __init__(self, pkginfolist): - Event.__init__(self) - self._pkginfolist = pkginfolist - class MetadataEvent(Event): """ Generic event that target for OE-Core classes @@ -784,3 +831,10 @@ class NetworkTestFailed(Event): Event to indicate network test has failed """ +class FindSigInfoResult(Event): + """ + Event to return results from findSigInfo command + """ + def __init__(self, result): + Event.__init__(self) + self.result = result diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/__init__.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/__init__.py index b853da30b..f70f1b515 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/__init__.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/__init__.py @@ -39,6 +39,7 @@ import errno import bb.persist_data, bb.utils import bb.checksum import bb.process +import bb.event __version__ = "2" _checksum_cache = bb.checksum.FileChecksumCache() @@ -48,11 +49,11 @@ logger = logging.getLogger("BitBake.Fetcher") class BBFetchException(Exception): """Class all fetch exceptions inherit from""" def __init__(self, message): - self.msg = message - Exception.__init__(self, message) + self.msg = message + Exception.__init__(self, message) def __str__(self): - return self.msg + return self.msg class UntrustedUrl(BBFetchException): """Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS""" @@ -68,24 +69,24 @@ class UntrustedUrl(BBFetchException): class MalformedUrl(BBFetchException): """Exception raised when encountering an invalid url""" def __init__(self, url, message=''): - if message: - msg = message - else: - msg = "The URL: '%s' is invalid and cannot be interpreted" % url - self.url = url - BBFetchException.__init__(self, msg) - self.args = (url,) + if message: + msg = message + else: + msg = "The URL: '%s' is invalid and cannot be interpreted" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) class FetchError(BBFetchException): """General fetcher exception when something happens incorrectly""" def __init__(self, message, url = None): - if url: + if url: msg = "Fetcher failure for URL: '%s'. %s" % (url, message) - else: + else: msg = "Fetcher failure: %s" % message - self.url = url - BBFetchException.__init__(self, msg) - self.args = (message, url) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) class ChecksumError(FetchError): """Exception when mismatched checksum encountered""" @@ -99,49 +100,56 @@ class NoChecksumError(FetchError): class UnpackError(BBFetchException): """General fetcher exception when something happens incorrectly when unpacking""" def __init__(self, message, url): - msg = "Unpack failure for URL: '%s'. %s" % (url, message) - self.url = url - BBFetchException.__init__(self, msg) - self.args = (message, url) + msg = "Unpack failure for URL: '%s'. %s" % (url, message) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) class NoMethodError(BBFetchException): """Exception raised when there is no method to obtain a supplied url or set of urls""" def __init__(self, url): - msg = "Could not find a fetcher which supports the URL: '%s'" % url - self.url = url - BBFetchException.__init__(self, msg) - self.args = (url,) + msg = "Could not find a fetcher which supports the URL: '%s'" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) class MissingParameterError(BBFetchException): """Exception raised when a fetch method is missing a critical parameter in the url""" def __init__(self, missing, url): - msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) - self.url = url - self.missing = missing - BBFetchException.__init__(self, msg) - self.args = (missing, url) + msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) + self.url = url + self.missing = missing + BBFetchException.__init__(self, msg) + self.args = (missing, url) class ParameterError(BBFetchException): """Exception raised when a url cannot be proccessed due to invalid parameters.""" def __init__(self, message, url): - msg = "URL: '%s' has invalid parameters. %s" % (url, message) - self.url = url - BBFetchException.__init__(self, msg) - self.args = (message, url) + msg = "URL: '%s' has invalid parameters. %s" % (url, message) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) class NetworkAccess(BBFetchException): """Exception raised when network access is disabled but it is required.""" def __init__(self, url, cmd): - msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) - self.url = url - self.cmd = cmd - BBFetchException.__init__(self, msg) - self.args = (url, cmd) + msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) + self.url = url + self.cmd = cmd + BBFetchException.__init__(self, msg) + self.args = (url, cmd) class NonLocalMethod(Exception): def __init__(self): Exception.__init__(self) +class MissingChecksumEvent(bb.event.Event): + def __init__(self, url, md5sum, sha256sum): + self.url = url + self.checksums = {'md5sum': md5sum, + 'sha256sum': sha256sum} + bb.event.Event.__init__(self) + class URI(object): """ @@ -403,8 +411,6 @@ def encodeurl(decoded): type, host, path, user, pswd, p = decoded - if not path: - raise MissingParameterError('path', "encoded from the data %s" % str(decoded)) if not type: raise MissingParameterError('type', "encoded from the data %s" % str(decoded)) url = '%s://' % type @@ -415,17 +421,18 @@ def encodeurl(decoded): url += "@" if host and type != "file": url += "%s" % host - # Standardise path to ensure comparisons work - while '//' in path: - path = path.replace("//", "/") - url += "%s" % urllib.parse.quote(path) + if path: + # Standardise path to ensure comparisons work + while '//' in path: + path = path.replace("//", "/") + url += "%s" % urllib.parse.quote(path) if p: for parm in p: url += ";%s=%s" % (parm, p[parm]) return url -def uri_replace(ud, uri_find, uri_replace, replacements, d): +def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None): if not ud.url or not uri_find or not uri_replace: logger.error("uri_replace: passed an undefined value, not replacing") return None @@ -455,7 +462,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d): result_decoded[loc][k] = uri_replace_decoded[loc][k] elif (re.match(regexp, uri_decoded[loc])): if not uri_replace_decoded[loc]: - result_decoded[loc] = "" + result_decoded[loc] = "" else: for k in replacements: uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k]) @@ -464,9 +471,9 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d): if loc == 2: # Handle path manipulations basename = None - if uri_decoded[0] != uri_replace_decoded[0] and ud.mirrortarball: + if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball: # If the source and destination url types differ, must be a mirrortarball mapping - basename = os.path.basename(ud.mirrortarball) + basename = os.path.basename(mirrortarball) # Kill parameters, they make no sense for mirror tarballs uri_decoded[5] = {} elif ud.localpath and ud.method.supports_checksum(ud): @@ -584,6 +591,14 @@ def verify_checksum(ud, d, precomputed={}): ud.sha256_name, sha256data)) raise NoChecksumError('Missing SRC_URI checksum', ud.url) + bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d) + + if strict == "ignore": + return { + _MD5_KEY: md5data, + _SHA256_KEY: sha256data + } + # Log missing sums so user can more easily add them logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n' 'SRC_URI[%s] = "%s"', @@ -733,7 +748,7 @@ def get_srcrev(d, method_name='sortable_revision'): In the multi SCM case, we build a value based on SRCREV_FORMAT which must have been set. - The idea here is that we put the string "AUTOINC+" into return value if the revisions are not + The idea here is that we put the string "AUTOINC+" into return value if the revisions are not incremental, other code is then responsible for turning that into an increasing value (if needed) A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if @@ -785,7 +800,7 @@ def get_srcrev(d, method_name='sortable_revision'): format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format) if seenautoinc: - format = "AUTOINC+" + format + format = "AUTOINC+" + format return format @@ -892,45 +907,47 @@ def build_mirroruris(origud, mirrors, ld): replacements["BASENAME"] = origud.path.split("/")[-1] replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.') - def adduri(ud, uris, uds, mirrors): + def adduri(ud, uris, uds, mirrors, tarballs): for line in mirrors: try: (find, replace) = line except ValueError: continue - newuri = uri_replace(ud, find, replace, replacements, ld) - if not newuri or newuri in uris or newuri == origud.url: - continue - if not trusted_network(ld, newuri): - logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri)) - continue + for tarball in tarballs: + newuri = uri_replace(ud, find, replace, replacements, ld, tarball) + if not newuri or newuri in uris or newuri == origud.url: + continue - # Create a local copy of the mirrors minus the current line - # this will prevent us from recursively processing the same line - # as well as indirect recursion A -> B -> C -> A - localmirrors = list(mirrors) - localmirrors.remove(line) + if not trusted_network(ld, newuri): + logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri)) + continue + + # Create a local copy of the mirrors minus the current line + # this will prevent us from recursively processing the same line + # as well as indirect recursion A -> B -> C -> A + localmirrors = list(mirrors) + localmirrors.remove(line) - try: - newud = FetchData(newuri, ld) - newud.setup_localpath(ld) - except bb.fetch2.BBFetchException as e: - logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) - logger.debug(1, str(e)) try: - # setup_localpath of file:// urls may fail, we should still see - # if mirrors of the url exist - adduri(newud, uris, uds, localmirrors) - except UnboundLocalError: - pass - continue - uris.append(newuri) - uds.append(newud) + newud = FetchData(newuri, ld) + newud.setup_localpath(ld) + except bb.fetch2.BBFetchException as e: + logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) + logger.debug(1, str(e)) + try: + # setup_localpath of file:// urls may fail, we should still see + # if mirrors of the url exist + adduri(newud, uris, uds, localmirrors, tarballs) + except UnboundLocalError: + pass + continue + uris.append(newuri) + uds.append(newud) - adduri(newud, uris, uds, localmirrors) + adduri(newud, uris, uds, localmirrors, tarballs) - adduri(origud, uris, uds, mirrors) + adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None]) return uris, uds @@ -975,8 +992,8 @@ def try_mirror_url(fetch, origud, ud, ld, check = False): # We may be obtaining a mirror tarball which needs further processing by the real fetcher # If that tarball is a local file:// we need to provide a symlink to it dldir = ld.getVar("DL_DIR") - if origud.mirrortarball and os.path.basename(ud.localpath) == os.path.basename(origud.mirrortarball) \ - and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): + + if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): # Create donestamp in old format to avoid triggering a re-download if ud.donestamp: bb.utils.mkdirhier(os.path.dirname(ud.donestamp)) @@ -993,7 +1010,7 @@ def try_mirror_url(fetch, origud, ud, ld, check = False): pass if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld): origud.method.download(origud, ld) - if hasattr(origud.method,"build_mirror_data"): + if hasattr(origud.method, "build_mirror_data"): origud.method.build_mirror_data(origud, ld) return origud.localpath # Otherwise the result is a local file:// and we symlink to it @@ -1015,7 +1032,7 @@ def try_mirror_url(fetch, origud, ud, ld, check = False): except IOError as e: if e.errno in [os.errno.ESTALE]: - logger.warn("Stale Error Observed %s." % ud.url) + logger.warning("Stale Error Observed %s." % ud.url) return False raise @@ -1115,7 +1132,7 @@ def srcrev_internal_helper(ud, d, name): attempts.append("SRCREV") for a in attempts: - srcrev = d.getVar(a) + srcrev = d.getVar(a) if srcrev and srcrev != "INVALID": break @@ -1130,7 +1147,7 @@ def srcrev_internal_helper(ud, d, name): if srcrev == "INVALID" or not srcrev: return parmrev if srcrev != parmrev: - raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please spcify one valid value" % (srcrev, parmrev)) + raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev)) return parmrev if srcrev == "INVALID" or not srcrev: @@ -1190,7 +1207,7 @@ class FetchData(object): self.localfile = "" self.localpath = None self.lockfile = None - self.mirrortarball = None + self.mirrortarballs = [] self.basename = None self.basepath = None (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url)) @@ -1228,7 +1245,7 @@ class FetchData(object): for m in methods: if m.supports(self, d): self.method = m - break + break if not self.method: raise NoMethodError(url) @@ -1263,7 +1280,7 @@ class FetchData(object): elif self.basepath or self.basename: basepath = dldir + os.sep + (self.basepath or self.basename) else: - bb.fatal("Can't determine lock path for url %s" % url) + bb.fatal("Can't determine lock path for url %s" % url) self.donestamp = basepath + '.done' self.lockfile = basepath + '.lock' @@ -1326,13 +1343,13 @@ class FetchMethod(object): if os.path.isdir(urldata.localpath) == True: return False if urldata.localpath.find("*") != -1: - return False + return False return True def recommends_checksum(self, urldata): """ - Is the backend on where checksumming is recommended (should warnings + Is the backend on where checksumming is recommended (should warnings be displayed if there is no checksum)? """ return False @@ -1542,6 +1559,14 @@ class FetchMethod(object): key = self._revision_key(ud, d, name) return "%s-%s" % (key, d.getVar("PN") or "") + def latest_versionstring(self, ud, d): + """ + Compute the latest release name like "x.y.x" in "x.y.x+gitHASH" + by searching through the tags output of ls-remote, comparing + versions and returning the highest match as a (version, revision) pair. + """ + return ('', '') + class Fetch(object): def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None): if localonly and cache: @@ -1612,7 +1637,7 @@ class Fetch(object): try: self.d.setVar("BB_NO_NETWORK", network) - + if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d): localpath = ud.localpath elif m.try_premirror(ud, self.d): @@ -1708,9 +1733,8 @@ class Fetch(object): ret = try_mirrors(self, self.d, ud, mirrors, True) if not ret: # Next try checking from the original uri, u - try: - ret = m.checkstatus(self, ud, self.d) - except: + ret = m.checkstatus(self, ud, self.d) + if not ret: # Finally, try checking uri, u, from MIRRORS mirrors = mirror_from_string(self.d.getVar('MIRRORS')) ret = try_mirrors(self, self.d, ud, mirrors, True) @@ -1720,7 +1744,7 @@ class Fetch(object): def unpack(self, root, urls=None): """ - Check all urls exist upstream + Unpack urls to root """ if not urls: diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/git.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/git.py index 7442f8441..5ef8cd69e 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/git.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/git.py @@ -70,11 +70,14 @@ Supported SRC_URI options are: # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +import collections import errno +import fnmatch import os import re +import subprocess +import tempfile import bb -import errno import bb.progress from bb.fetch2 import FetchMethod from bb.fetch2 import runfetchcmd @@ -172,18 +175,66 @@ class Git(FetchMethod): branches = ud.parm.get("branch", "master").split(',') if len(branches) != len(ud.names): raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url) + + ud.cloneflags = "-s -n" + if ud.bareclone: + ud.cloneflags += " --mirror" + + ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1" + ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split() + + depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH") + if depth_default is not None: + try: + depth_default = int(depth_default or 0) + except ValueError: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default) + else: + if depth_default < 0: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default) + else: + depth_default = 1 + ud.shallow_depths = collections.defaultdict(lambda: depth_default) + + revs_default = d.getVar("BB_GIT_SHALLOW_REVS", True) + ud.shallow_revs = [] ud.branches = {} for pos, name in enumerate(ud.names): branch = branches[pos] ud.branches[name] = branch ud.unresolvedrev[name] = branch + shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name) + if shallow_depth is not None: + try: + shallow_depth = int(shallow_depth or 0) + except ValueError: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth)) + else: + if shallow_depth < 0: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth)) + ud.shallow_depths[name] = shallow_depth + + revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name) + if revs is not None: + ud.shallow_revs.extend(revs.split()) + elif revs_default is not None: + ud.shallow_revs.extend(revs_default.split()) + + if (ud.shallow and + not ud.shallow_revs and + all(ud.shallow_depths[n] == 0 for n in ud.names)): + # Shallow disabled for this URL + ud.shallow = False + if ud.usehead: ud.unresolvedrev['default'] = 'HEAD' ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0" - ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0") or ud.rebaseable + write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0" + ud.write_tarballs = write_tarballs != "0" or ud.rebaseable + ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0" ud.setup_revisions(d) @@ -205,13 +256,42 @@ class Git(FetchMethod): if ud.rebaseable: for name in ud.names: gitsrcname = gitsrcname + '_' + ud.revisions[name] - ud.mirrortarball = 'git2_%s.tar.gz' % gitsrcname - ud.fullmirror = os.path.join(d.getVar("DL_DIR"), ud.mirrortarball) - gitdir = d.getVar("GITDIR") or (d.getVar("DL_DIR") + "/git2/") - ud.clonedir = os.path.join(gitdir, gitsrcname) + dl_dir = d.getVar("DL_DIR") + gitdir = d.getVar("GITDIR") or (dl_dir + "/git2/") + ud.clonedir = os.path.join(gitdir, gitsrcname) ud.localfile = ud.clonedir + mirrortarball = 'git2_%s.tar.gz' % gitsrcname + ud.fullmirror = os.path.join(dl_dir, mirrortarball) + ud.mirrortarballs = [mirrortarball] + if ud.shallow: + tarballname = gitsrcname + if ud.bareclone: + tarballname = "%s_bare" % tarballname + + if ud.shallow_revs: + tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs))) + + for name, revision in sorted(ud.revisions.items()): + tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7]) + depth = ud.shallow_depths[name] + if depth: + tarballname = "%s-%s" % (tarballname, depth) + + shallow_refs = [] + if not ud.nobranch: + shallow_refs.extend(ud.branches.values()) + if ud.shallow_extra_refs: + shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs) + if shallow_refs: + tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.')) + + fetcher = self.__class__.__name__.lower() + ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname) + ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball) + ud.mirrortarballs.insert(0, ud.shallowtarball) + def localpath(self, ud, d): return ud.clonedir @@ -221,6 +301,8 @@ class Git(FetchMethod): for name in ud.names: if not self._contains_ref(ud, d, name, ud.clonedir): return True + if ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow): + return True if ud.write_tarballs and not os.path.exists(ud.fullmirror): return True return False @@ -237,8 +319,16 @@ class Git(FetchMethod): def download(self, ud, d): """Fetch url""" - # If the checkout doesn't exist and the mirror tarball does, extract it - if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror): + no_clone = not os.path.exists(ud.clonedir) + need_update = no_clone or self.need_update(ud, d) + + # A current clone is preferred to either tarball, a shallow tarball is + # preferred to an out of date clone, and a missing clone will use + # either tarball. + if ud.shallow and os.path.exists(ud.fullshallow) and need_update: + ud.localpath = ud.fullshallow + return + elif os.path.exists(ud.fullmirror) and no_clone: bb.utils.mkdirhier(ud.clonedir) runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir) @@ -284,9 +374,21 @@ class Git(FetchMethod): raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name])) def build_mirror_data(self, ud, d): - # Generate a mirror tarball if needed - if ud.write_tarballs and not os.path.exists(ud.fullmirror): - # it's possible that this symlink points to read-only filesystem with PREMIRROR + if ud.shallow and ud.write_shallow_tarballs: + if not os.path.exists(ud.fullshallow): + if os.path.islink(ud.fullshallow): + os.unlink(ud.fullshallow) + tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR')) + shallowclone = os.path.join(tempdir, 'git') + try: + self.clone_shallow_local(ud, shallowclone, d) + + logger.info("Creating tarball of git repository") + runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone) + runfetchcmd("touch %s.done" % ud.fullshallow, d) + finally: + bb.utils.remove(tempdir, recurse=True) + elif ud.write_tarballs and not os.path.exists(ud.fullmirror): if os.path.islink(ud.fullmirror): os.unlink(ud.fullmirror) @@ -294,6 +396,62 @@ class Git(FetchMethod): runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir) runfetchcmd("touch %s.done" % ud.fullmirror, d) + def clone_shallow_local(self, ud, dest, d): + """Clone the repo and make it shallow. + + The upstream url of the new clone isn't set at this time, as it'll be + set correctly when unpacked.""" + runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d) + + to_parse, shallow_branches = [], [] + for name in ud.names: + revision = ud.revisions[name] + depth = ud.shallow_depths[name] + if depth: + to_parse.append('%s~%d^{}' % (revision, depth - 1)) + + # For nobranch, we need a ref, otherwise the commits will be + # removed, and for non-nobranch, we truncate the branch to our + # srcrev, to avoid keeping unnecessary history beyond that. + branch = ud.branches[name] + if ud.nobranch: + ref = "refs/shallow/%s" % name + elif ud.bareclone: + ref = "refs/heads/%s" % branch + else: + ref = "refs/remotes/origin/%s" % branch + + shallow_branches.append(ref) + runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest) + + # Map srcrev+depths to revisions + parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest) + + # Resolve specified revisions + parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest) + shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines() + + # Apply extra ref wildcards + all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd, + d, workdir=dest).splitlines() + for r in ud.shallow_extra_refs: + if not ud.bareclone: + r = r.replace('refs/heads/', 'refs/remotes/origin/') + + if '*' in r: + matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs) + shallow_branches.extend(matches) + else: + shallow_branches.append(r) + + # Make the repository shallow + shallow_cmd = ['git', 'make-shallow', '-s'] + for b in shallow_branches: + shallow_cmd.append('-r') + shallow_cmd.append(b) + shallow_cmd.extend(shallow_revisions) + runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest) + def unpack(self, ud, destdir, d): """ unpack the downloaded src to destdir""" @@ -310,11 +468,12 @@ class Git(FetchMethod): if os.path.exists(destdir): bb.utils.prunedir(destdir) - cloneflags = "-s -n" - if ud.bareclone: - cloneflags += " --mirror" + if ud.shallow and (not os.path.exists(ud.clonedir) or self.need_update(ud, d)): + bb.utils.mkdirhier(destdir) + runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir) + else: + runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d) - runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, cloneflags, ud.clonedir, destdir), d) repourl = self._get_repo_url(ud) runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir) if not ud.nocheckout: diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitannex.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitannex.py index c66c21142..a9b69caab 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitannex.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitannex.py @@ -33,6 +33,11 @@ class GitANNEX(Git): """ return ud.type in ['gitannex'] + def urldata_init(self, ud, d): + super(GitANNEX, self).urldata_init(ud, d) + if ud.shallow: + ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*'] + def uses_annex(self, ud, d, wd): for name in ud.names: try: @@ -55,9 +60,21 @@ class GitANNEX(Git): def download(self, ud, d): Git.download(self, ud, d) - annex = self.uses_annex(ud, d, ud.clonedir) - if annex: - self.update_annex(ud, d, ud.clonedir) + if not ud.shallow or ud.localpath != ud.fullshallow: + if self.uses_annex(ud, d, ud.clonedir): + self.update_annex(ud, d, ud.clonedir) + + def clone_shallow_local(self, ud, dest, d): + super(GitANNEX, self).clone_shallow_local(ud, dest, d) + + try: + runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest) + except bb.fetch.FetchError: + pass + + if self.uses_annex(ud, d, dest): + runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest) + runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest) def unpack(self, ud, destdir, d): Git.unpack(self, ud, destdir, d) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitsm.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitsm.py index a95584c82..0aff1008e 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitsm.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/gitsm.py @@ -117,14 +117,19 @@ class GitSM(Git): def download(self, ud, d): Git.download(self, ud, d) - submodules = self.uses_submodules(ud, d, ud.clonedir) - if submodules: - self.update_submodules(ud, d) + if not ud.shallow or ud.localpath != ud.fullshallow: + submodules = self.uses_submodules(ud, d, ud.clonedir) + if submodules: + self.update_submodules(ud, d) + + def clone_shallow_local(self, ud, dest, d): + super(GitSM, self).clone_shallow_local(ud, dest, d) + + runfetchcmd('cp -fpPRH "%s/modules" "%s/"' % (ud.clonedir, os.path.join(dest, '.git')), d) def unpack(self, ud, destdir, d): Git.unpack(self, ud, destdir, d) - - submodules = self.uses_submodules(ud, d, ud.destdir) - if submodules: + + if self.uses_submodules(ud, d, ud.destdir): runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir) runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/hg.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/hg.py index b5f268601..d0857e63f 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/hg.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/hg.py @@ -76,8 +76,9 @@ class Hg(FetchMethod): # Create paths to mercurial checkouts hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \ ud.host, ud.path.replace('/', '.')) - ud.mirrortarball = 'hg_%s.tar.gz' % hgsrcname - ud.fullmirror = os.path.join(d.getVar("DL_DIR"), ud.mirrortarball) + mirrortarball = 'hg_%s.tar.gz' % hgsrcname + ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball) + ud.mirrortarballs = [mirrortarball] hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg/") ud.pkgdir = os.path.join(hgdir, hgsrcname) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/npm.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/npm.py index 73a75fe98..b5f148ca0 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/npm.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/npm.py @@ -91,9 +91,10 @@ class Npm(FetchMethod): ud.prefixdir = prefixdir ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0") - ud.mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version) - ud.mirrortarball = ud.mirrortarball.replace('/', '-') - ud.fullmirror = os.path.join(d.getVar("DL_DIR"), ud.mirrortarball) + mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version) + mirrortarball = mirrortarball.replace('/', '-') + ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball) + ud.mirrortarballs = [mirrortarball] def need_update(self, ud, d): if os.path.exists(ud.localpath): @@ -262,26 +263,27 @@ class Npm(FetchMethod): runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest) return - shwrf = d.getVar('NPM_SHRINKWRAP') - logger.debug(2, "NPM shrinkwrap file is %s" % shwrf) - if shwrf: - try: - with open(shwrf) as datafile: - shrinkobj = json.load(datafile) - except Exception as e: - raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e))) - elif not ud.ignore_checksums: - logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname) - lckdf = d.getVar('NPM_LOCKDOWN') - logger.debug(2, "NPM lockdown file is %s" % lckdf) - if lckdf: - try: - with open(lckdf) as datafile: - lockdown = json.load(datafile) - except Exception as e: - raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e))) - elif not ud.ignore_checksums: - logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname) + if ud.parm.get("noverify", None) != '1': + shwrf = d.getVar('NPM_SHRINKWRAP') + logger.debug(2, "NPM shrinkwrap file is %s" % shwrf) + if shwrf: + try: + with open(shwrf) as datafile: + shrinkobj = json.load(datafile) + except Exception as e: + raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e))) + elif not ud.ignore_checksums: + logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname) + lckdf = d.getVar('NPM_LOCKDOWN') + logger.debug(2, "NPM lockdown file is %s" % lckdf) + if lckdf: + try: + with open(lckdf) as datafile: + lockdown = json.load(datafile) + except Exception as e: + raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e))) + elif not ud.ignore_checksums: + logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname) if ('name' not in shrinkobj): self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/repo.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/repo.py index 1be91cc69..c22d9b557 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/repo.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/repo.py @@ -27,6 +27,7 @@ import os import bb from bb.fetch2 import FetchMethod from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger class Repo(FetchMethod): """Class to fetch a module or modules from repo (git) repositories""" diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py index ae0ffa8c9..7c49c2b12 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py @@ -30,6 +30,7 @@ import tempfile import subprocess import os import logging +import errno import bb import bb.progress import urllib.request, urllib.parse, urllib.error @@ -89,13 +90,13 @@ class Wget(FetchMethod): self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate" - def _runwget(self, ud, d, command, quiet): + def _runwget(self, ud, d, command, quiet, workdir=None): progresshandler = WgetProgressHandler(d) logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command)) bb.fetch2.check_network_access(d, command, ud.url) - runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler) + runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir) def download(self, ud, d): """Fetch urls""" @@ -206,8 +207,21 @@ class Wget(FetchMethod): h.request(req.get_method(), req.selector, req.data, headers) except socket.error as err: # XXX what error? # Don't close connection when cache is enabled. + # Instead, try to detect connections that are no longer + # usable (for example, closed unexpectedly) and remove + # them from the cache. if fetch.connection_cache is None: h.close() + elif isinstance(err, OSError) and err.errno == errno.EBADF: + # This happens when the server closes the connection despite the Keep-Alive. + # Apparently urllib then uses the file descriptor, expecting it to be + # connected, when in reality the connection is already gone. + # We let the request fail and expect it to be + # tried once more ("try_again" in check_status()), + # with the dead connection removed from the cache. + # If it still fails, we give up, which can happend for bad + # HTTP proxy settings. + fetch.connection_cache.remove_connection(h.host, h.port) raise urllib.error.URLError(err) else: try: @@ -269,11 +283,6 @@ class Wget(FetchMethod): """ http_error_403 = http_error_405 - """ - Some servers (e.g. FusionForge) returns 406 Not Acceptable when they - actually mean 405 Method Not Allowed. - """ - http_error_406 = http_error_405 class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler): """ @@ -302,7 +311,9 @@ class Wget(FetchMethod): uri = ud.url.split(";")[0] r = urllib.request.Request(uri) r.get_method = lambda: "HEAD" - + # Some servers (FusionForge, as used on Alioth) require that the + # optional Accept header is set. + r.add_header("Accept", "*/*") def add_basic_auth(login_str, request): '''Adds Basic auth to http request, pass in login:password as string''' import base64 @@ -408,17 +419,16 @@ class Wget(FetchMethod): Run fetch checkstatus to get directory information """ f = tempfile.NamedTemporaryFile() + with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f: + agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12" + fetchcmd = self.basecmd + fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'" + try: + self._runwget(ud, d, fetchcmd, True, workdir=workdir) + fetchresult = f.read() + except bb.fetch2.BBFetchException: + fetchresult = "" - agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12" - fetchcmd = self.basecmd - fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'" - try: - self._runwget(ud, d, fetchcmd, True) - fetchresult = f.read() - except bb.fetch2.BBFetchException: - fetchresult = "" - - f.close() return fetchresult def _check_latest_version(self, url, package, package_regex, current_version, ud, d): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/main.py b/import-layers/yocto-poky/bitbake/lib/bb/main.py index 8c948c2c1..7711b290d 100755 --- a/import-layers/yocto-poky/bitbake/lib/bb/main.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/main.py @@ -28,6 +28,8 @@ import logging import optparse import warnings import fcntl +import time +import traceback import bb from bb import event @@ -37,11 +39,17 @@ from bb import ui from bb import server from bb import cookerdata +import bb.server.process +import bb.server.xmlrpcclient + logger = logging.getLogger("BitBake") class BBMainException(Exception): pass +class BBMainFatal(bb.BBHandledException): + pass + def present_options(optionlist): if len(optionlist) > 1: return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]]) @@ -58,9 +66,6 @@ class BitbakeHelpFormatter(optparse.IndentedHelpFormatter): if option.dest == 'ui': valid_uis = list_extension_modules(bb.ui, 'main') option.help = option.help.replace('@CHOICES@', present_options(valid_uis)) - elif option.dest == 'servertype': - valid_server_types = list_extension_modules(bb.server, 'BitBakeServer') - option.help = option.help.replace('@CHOICES@', present_options(valid_server_types)) return optparse.IndentedHelpFormatter.format_option(self, option) @@ -148,11 +153,6 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters): "failed and anything depending on it cannot be built, as much as " "possible will be built before stopping.") - parser.add_option("-a", "--tryaltconfigs", action="store_true", - dest="tryaltconfigs", default=False, - help="Continue with builds by trying to use alternative providers " - "where possible.") - parser.add_option("-f", "--force", action="store_true", dest="force", default=False, help="Force the specified targets/task to run (invalidating any " "existing stamp file).") @@ -238,11 +238,6 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters): default=os.environ.get('BITBAKE_UI', 'knotty'), help="The user interface to use (@CHOICES@ - default %default).") - # @CHOICES@ is substituted out by BitbakeHelpFormatter above - parser.add_option("-t", "--servertype", action="store", dest="servertype", - default=["process", "xmlrpc"]["BBSERVER" in os.environ], - help="Choose which server type to use (@CHOICES@ - default %default).") - parser.add_option("", "--token", action="store", dest="xmlrpctoken", default=os.environ.get("BBTOKEN"), help="Specify the connection token to be used when connecting " @@ -258,15 +253,14 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters): help="Run bitbake without a UI, only starting a server " "(cooker) process.") - parser.add_option("", "--foreground", action="store_true", - help="Run bitbake server in foreground.") - parser.add_option("-B", "--bind", action="store", dest="bind", default=False, - help="The name/address for the bitbake server to bind to.") + help="The name/address for the bitbake xmlrpc server to bind to.") - parser.add_option("-T", "--idle-timeout", type=int, - default=int(os.environ.get("BBTIMEOUT", "0")), - help="Set timeout to unload bitbake server due to inactivity") + parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout", + default=os.getenv("BB_SERVER_TIMEOUT"), + help="Set timeout to unload bitbake server due to inactivity, " + "set to -1 means no unload, " + "default: Environment variable BB_SERVER_TIMEOUT.") parser.add_option("", "--no-setscene", action="store_true", dest="nosetscene", default=False, @@ -283,7 +277,7 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters): parser.add_option("-m", "--kill-server", action="store_true", dest="kill_server", default=False, - help="Terminate the remote server.") + help="Terminate any running bitbake server.") parser.add_option("", "--observe-only", action="store_true", dest="observe_only", default=False, @@ -322,70 +316,20 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters): eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S") options.writeeventlog = eventlog - # if BBSERVER says to autodetect, let's do that - if options.remote_server: - port = -1 - if options.remote_server != 'autostart': - host, port = options.remote_server.split(":", 2) + if options.bind: + try: + #Checking that the port is a number and is a ':' delimited value + (host, port) = options.bind.split(':') port = int(port) - # use automatic port if port set to -1, means read it from - # the bitbake.lock file; this is a bit tricky, but we always expect - # to be in the base of the build directory if we need to have a - # chance to start the server later, anyway - if port == -1: - lock_location = "./bitbake.lock" - # we try to read the address at all times; if the server is not started, - # we'll try to start it after the first connect fails, below - try: - lf = open(lock_location, 'r') - remotedef = lf.readline() - [host, port] = remotedef.split(":") - port = int(port) - lf.close() - options.remote_server = remotedef - except Exception as e: - if options.remote_server != 'autostart': - raise BBMainException("Failed to read bitbake.lock (%s), invalid port" % str(e)) + except (ValueError,IndexError): + raise BBMainException("FATAL: Malformed host:port bind parameter") + options.xmlrpcinterface = (host, port) + else: + options.xmlrpcinterface = (None, 0) return options, targets[1:] -def start_server(servermodule, configParams, configuration, features): - server = servermodule.BitBakeServer() - single_use = not configParams.server_only and os.getenv('BBSERVER') != 'autostart' - if configParams.bind: - (host, port) = configParams.bind.split(':') - server.initServer((host, int(port)), single_use=single_use, - idle_timeout=configParams.idle_timeout) - configuration.interface = [server.serverImpl.host, server.serverImpl.port] - else: - server.initServer(single_use=single_use) - configuration.interface = [] - - try: - configuration.setServerRegIdleCallback(server.getServerIdleCB()) - - cooker = bb.cooker.BBCooker(configuration, features) - - server.addcooker(cooker) - server.saveConnectionDetails() - except Exception as e: - while hasattr(server, "event_queue"): - import queue - try: - event = server.event_queue.get(block=False) - except (queue.Empty, IOError): - break - if isinstance(event, logging.LogRecord): - logger.handle(event) - raise - if not configParams.foreground: - server.detach() - cooker.shutdown() - cooker.lock.close() - return server - - def bitbake_main(configParams, configuration): # Python multiprocessing requires /dev/shm on Linux @@ -406,45 +350,15 @@ def bitbake_main(configParams, configuration): configuration.setConfigParameters(configParams) - if configParams.server_only: - if configParams.servertype != "xmlrpc": - raise BBMainException("FATAL: If '--server-only' is defined, we must set the " - "servertype as 'xmlrpc'.\n") - if not configParams.bind: - raise BBMainException("FATAL: The '--server-only' option requires a name/address " - "to bind to with the -B option.\n") - else: - try: - #Checking that the port is a number - int(configParams.bind.split(":")[1]) - except (ValueError,IndexError): - raise BBMainException( - "FATAL: Malformed host:port bind parameter") - if configParams.remote_server: + if configParams.server_only and configParams.remote_server: raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" % ("the BBSERVER environment variable" if "BBSERVER" in os.environ \ else "the '--remote-server' option")) - elif configParams.foreground: - raise BBMainException("FATAL: The '--foreground' option can only be used " - "with --server-only.\n") - - if configParams.bind and configParams.servertype != "xmlrpc": - raise BBMainException("FATAL: If '-B' or '--bind' is defined, we must " - "set the servertype as 'xmlrpc'.\n") - - if configParams.remote_server and configParams.servertype != "xmlrpc": - raise BBMainException("FATAL: If '--remote-server' is defined, we must " - "set the servertype as 'xmlrpc'.\n") - - if configParams.observe_only and (not configParams.remote_server or configParams.bind): + if configParams.observe_only and not (configParams.remote_server or configParams.bind): raise BBMainException("FATAL: '--observe-only' can only be used by UI clients " "connecting to a server.\n") - if configParams.kill_server and not configParams.remote_server: - raise BBMainException("FATAL: '--kill-server' can only be used to " - "terminate a remote server") - if "BBDEBUG" in os.environ: level = int(os.environ["BBDEBUG"]) if level > configuration.debug: @@ -453,9 +367,13 @@ def bitbake_main(configParams, configuration): bb.msg.init_msgconfig(configParams.verbose, configuration.debug, configuration.debug_domains) - server, server_connection, ui_module = setup_bitbake(configParams, configuration) - if server_connection is None and configParams.kill_server: - return 0 + server_connection, ui_module = setup_bitbake(configParams, configuration) + # No server connection + if server_connection is None: + if configParams.status_only: + return 1 + if configParams.kill_server: + return 0 if not configParams.server_only: if configParams.status_only: @@ -463,16 +381,15 @@ def bitbake_main(configParams, configuration): return 0 try: + for event in bb.event.ui_queue: + server_connection.events.queue_event(event) + bb.event.ui_queue = [] + return ui_module.main(server_connection.connection, server_connection.events, configParams) finally: - bb.event.ui_queue = [] server_connection.terminate() else: - print("Bitbake server address: %s, server port: %s" % (server.serverImpl.host, - server.serverImpl.port)) - if configParams.foreground: - server.serverImpl.serve_forever() return 0 return 1 @@ -495,58 +412,93 @@ def setup_bitbake(configParams, configuration, extrafeatures=None): # Collect the feature set for the UI featureset = getattr(ui_module, "featureSet", []) - if configParams.server_only: - for param in ('prefile', 'postfile'): - value = getattr(configParams, param) - if value: - setattr(configuration, "%s_server" % param, value) - param = "%s_server" % param - if extrafeatures: for feature in extrafeatures: if not feature in featureset: featureset.append(feature) - servermodule = import_extension_module(bb.server, - configParams.servertype, - 'BitBakeServer') + server_connection = None + if configParams.remote_server: - if os.getenv('BBSERVER') == 'autostart': - if configParams.remote_server == 'autostart' or \ - not servermodule.check_connection(configParams.remote_server, timeout=2): - configParams.bind = 'localhost:0' - srv = start_server(servermodule, configParams, configuration, featureset) - configParams.remote_server = '%s:%d' % tuple(configuration.interface) - bb.event.ui_queue = [] - # we start a stub server that is actually a XMLRPClient that connects to a real server - from bb.server.xmlrpc import BitBakeXMLRPCClient - server = servermodule.BitBakeXMLRPCClient(configParams.observe_only, - configParams.xmlrpctoken) - server.saveConnectionDetails(configParams.remote_server) + # Connect to a remote XMLRPC server + server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset, + configParams.observe_only, configParams.xmlrpctoken) else: - # we start a server with a given configuration - server = start_server(servermodule, configParams, configuration, featureset) + retries = 8 + while retries: + try: + topdir, lock = lockBitbake() + sockname = topdir + "/bitbake.sock" + if lock: + if configParams.status_only or configParams.kill_server: + logger.info("bitbake server is not running.") + lock.close() + return None, None + # we start a server with a given configuration + logger.info("Starting bitbake server...") + # Clear the event queue since we already displayed messages + bb.event.ui_queue = [] + server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset) + + else: + logger.info("Reconnecting to bitbake server...") + if not os.path.exists(sockname): + print("Previous bitbake instance shutting down?, waiting to retry...") + i = 0 + lock = None + # Wait for 5s or until we can get the lock + while not lock and i < 50: + time.sleep(0.1) + _, lock = lockBitbake() + i += 1 + if lock: + bb.utils.unlockfile(lock) + raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?") + if not configParams.server_only: + try: + server_connection = bb.server.process.connectProcessServer(sockname, featureset) + except EOFError: + # The server may have been shutting down but not closed the socket yet. If that happened, + # ignore it. + pass + + if server_connection or configParams.server_only: + break + except BBMainFatal: + raise + except (Exception, bb.server.process.ProcessTimeout) as e: + if not retries: + raise + retries -= 1 + if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)): + logger.info("Retrying server connection...") + else: + logger.info("Retrying server connection... (%s)" % traceback.format_exc()) + if not retries: + bb.fatal("Unable to connect to bitbake server, or start one") + if retries < 5: + time.sleep(5) + + if configParams.kill_server: + server_connection.connection.terminateServer() + server_connection.terminate() bb.event.ui_queue = [] + logger.info("Terminated bitbake server.") + return None, None - if configParams.server_only: - server_connection = None - else: - try: - server_connection = server.establishConnection(featureset) - except Exception as e: - bb.fatal("Could not connect to server %s: %s" % (configParams.remote_server, str(e))) - - if configParams.kill_server: - server_connection.connection.terminateServer() - bb.event.ui_queue = [] - return None, None, None + # Restore the environment in case the UI needs it + for k in cleanedvars: + os.environ[k] = cleanedvars[k] - server_connection.setupEventQueue() + logger.removeHandler(handler) - # Restore the environment in case the UI needs it - for k in cleanedvars: - os.environ[k] = cleanedvars[k] + return server_connection, ui_module - logger.removeHandler(handler) +def lockBitbake(): + topdir = bb.cookerdata.findTopdir() + if not topdir: + bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?") + raise BBMainFatal + lockfile = topdir + "/bitbake.lock" + return topdir, bb.utils.lockfile(lockfile, False, False) - return server, server_connection, ui_module diff --git a/import-layers/yocto-poky/bitbake/lib/bb/msg.py b/import-layers/yocto-poky/bitbake/lib/bb/msg.py index 90b158238..f1723be79 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/msg.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/msg.py @@ -216,3 +216,10 @@ def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers logger.handlers = [console] logger.setLevel(level) return logger + +def has_console_handler(logger): + for handler in logger.handlers: + if isinstance(handler, logging.StreamHandler): + if handler.stream in [sys.stderr, sys.stdout]: + return True + return False diff --git a/import-layers/yocto-poky/bitbake/lib/bb/parse/__init__.py b/import-layers/yocto-poky/bitbake/lib/bb/parse/__init__.py index a2952ecc0..2fc4002db 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/parse/__init__.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/parse/__init__.py @@ -84,6 +84,10 @@ def update_cache(f): logger.debug(1, "Updating mtime cache for %s" % f) update_mtime(f) +def clear_cache(): + global __mtime_cache + __mtime_cache = {} + def mark_dependency(d, f): if f.startswith('./'): f = "%s/%s" % (os.getcwd(), f[2:]) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/BBHandler.py b/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/BBHandler.py index fe918a41f..f89ad2427 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/BBHandler.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/BBHandler.py @@ -144,7 +144,7 @@ def handle(fn, d, include): try: statements.eval(d) except bb.parse.SkipRecipe: - bb.data.setVar("__SKIPPED", True, d) + d.setVar("__SKIPPED", True) if include == 0: return { "" : d } diff --git a/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py index f7d0cf74a..97aa13043 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py @@ -32,7 +32,7 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle __config_regexp__ = re.compile( r""" ^ - (?P<exp>export\s*)? + (?P<exp>export\s+)? (?P<var>[a-zA-Z0-9\-_+.${}/~]+?) (\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])? @@ -69,18 +69,26 @@ def init(data): def supports(fn, d): return fn[-5:] == ".conf" -def include(parentfn, fn, lineno, data, error_out): +def include(parentfn, fns, lineno, data, error_out): """ error_out: A string indicating the verb (e.g. "include", "inherit") to be used in a ParseError that will be raised if the file to be included could not be included. Specify False to avoid raising an error in this case. """ + fns = data.expand(fns) + parentfn = data.expand(parentfn) + + # "include" or "require" accept zero to n space-separated file names to include. + for fn in fns.split(): + include_single_file(parentfn, fn, lineno, data, error_out) + +def include_single_file(parentfn, fn, lineno, data, error_out): + """ + Helper function for include() which does not expand or split its parameters. + """ if parentfn == fn: # prevent infinite recursion return None - fn = data.expand(fn) - parentfn = data.expand(parentfn) - if not os.path.isabs(fn): dname = os.path.dirname(parentfn) bbpath = "%s:%s" % (dname, data.getVar("BBPATH")) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/process.py b/import-layers/yocto-poky/bitbake/lib/bb/process.py index a4a559982..e69697cb6 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/process.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/process.py @@ -94,45 +94,52 @@ def _logged_communicate(pipe, log, input, extrafiles): if data is not None: func(data) + def read_all_pipes(log, rin, outdata, errdata): + rlist = rin + stdoutbuf = b"" + stderrbuf = b"" + + try: + r,w,e = select.select (rlist, [], [], 1) + except OSError as e: + if e.errno != errno.EINTR: + raise + + readextras(r) + + if pipe.stdout in r: + data = stdoutbuf + pipe.stdout.read() + if data is not None and len(data) > 0: + try: + data = data.decode("utf-8") + outdata.append(data) + log.write(data) + log.flush() + stdoutbuf = b"" + except UnicodeDecodeError: + stdoutbuf = data + + if pipe.stderr in r: + data = stderrbuf + pipe.stderr.read() + if data is not None and len(data) > 0: + try: + data = data.decode("utf-8") + errdata.append(data) + log.write(data) + log.flush() + stderrbuf = b"" + except UnicodeDecodeError: + stderrbuf = data + try: + # Read all pipes while the process is open while pipe.poll() is None: - rlist = rin - stdoutbuf = b"" - stderrbuf = b"" - try: - r,w,e = select.select (rlist, [], [], 1) - except OSError as e: - if e.errno != errno.EINTR: - raise - - if pipe.stdout in r: - data = stdoutbuf + pipe.stdout.read() - if data is not None and len(data) > 0: - try: - data = data.decode("utf-8") - outdata.append(data) - log.write(data) - stdoutbuf = b"" - except UnicodeDecodeError: - stdoutbuf = data - - if pipe.stderr in r: - data = stderrbuf + pipe.stderr.read() - if data is not None and len(data) > 0: - try: - data = data.decode("utf-8") - errdata.append(data) - log.write(data) - stderrbuf = b"" - except UnicodeDecodeError: - stderrbuf = data - - readextras(r) - - finally: - log.flush() + read_all_pipes(log, rin, outdata, errdata) - readextras([fobj for fobj, _ in extrafiles]) + # Pocess closed, drain all pipes... + read_all_pipes(log, rin, outdata, errdata) + finally: + log.flush() if pipe.stdout is not None: pipe.stdout.close() diff --git a/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py b/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py index 7d2ff818e..ae12c2504 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py @@ -1355,12 +1355,7 @@ class RunQueue: logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) if self.state is runQueueFailed: - if not self.rqdata.taskData[''].tryaltconfigs: - raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) - for tid in self.rqexe.failed_tids: - (mc, fn, tn, _) = split_tid_mcfn(tid) - self.rqdata.taskData[mc].fail_fn(fn) - self.rqdata.reset() + raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) if self.state is runQueueComplete: # All done @@ -1839,7 +1834,7 @@ class RunQueueExecuteTasks(RunQueueExecute): Run the tasks in a queue prepared by rqdata.prepare() """ - if self.rqdata.setscenewhitelist and not self.rqdata.setscenewhitelist_checked: + if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked: self.rqdata.setscenewhitelist_checked = True # Check tasks that are going to run against the whitelist @@ -1932,7 +1927,7 @@ class RunQueueExecuteTasks(RunQueueExecute): self.rq.state = runQueueFailed self.stats.taskFailed() return True - self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") + self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") self.rq.fakeworker[mc].process.stdin.flush() else: self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>") @@ -2254,7 +2249,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): self.scenequeue_updatecounters(task) def check_taskfail(self, task): - if self.rqdata.setscenewhitelist: + if self.rqdata.setscenewhitelist is not None: realtask = task.split('_setscene')[0] (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] @@ -2372,7 +2367,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): self.rq.scenequeue_covered = self.scenequeue_covered self.rq.scenequeue_notcovered = self.scenequeue_notcovered - logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered)) + logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered))) self.rq.state = runQueueRunInit @@ -2488,6 +2483,9 @@ class runQueueTaskFailed(runQueueEvent): runQueueEvent.__init__(self, task, stats, rq) self.exitcode = exitcode + def __str__(self): + return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode) + class sceneQueueTaskFailed(sceneQueueEvent): """ Event notifying a setscene task failed @@ -2496,6 +2494,9 @@ class sceneQueueTaskFailed(sceneQueueEvent): sceneQueueEvent.__init__(self, task, stats, rq) self.exitcode = exitcode + def __str__(self): + return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode) + class sceneQueueComplete(sceneQueueEvent): """ Event when all the sceneQueue tasks are complete @@ -2602,7 +2603,7 @@ def get_setscene_enforce_whitelist(d): def check_setscene_enforce_whitelist(pn, taskname, whitelist): import fnmatch - if whitelist: + if whitelist is not None: item = '%s:%s' % (pn, taskname) for whitelist_item in whitelist: if fnmatch.fnmatch(item, whitelist_item): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/server/__init__.py b/import-layers/yocto-poky/bitbake/lib/bb/server/__init__.py index 538a633fe..5a3fba968 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/server/__init__.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/server/__init__.py @@ -18,82 +18,4 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -""" Base code for Bitbake server process -Have a common base for that all Bitbake server classes ensures a consistent -approach to the interface, and minimize risks associated with code duplication. - -""" - -""" BaseImplServer() the base class for all XXServer() implementations. - - These classes contain the actual code that runs the server side, i.e. - listens for the commands and executes them. Although these implementations - contain all the data of the original bitbake command, i.e the cooker instance, - they may well run on a different process or even machine. - -""" - -class BaseImplServer(): - def __init__(self): - self._idlefuns = {} - - def addcooker(self, cooker): - self.cooker = cooker - - def register_idle_function(self, function, data): - """Register a function to be called while the server is idle""" - assert hasattr(function, '__call__') - self._idlefuns[function] = data - - - -""" BitBakeBaseServerConnection class is the common ancestor to all - BitBakeServerConnection classes. - - These classes control the remote server. The only command currently - implemented is the terminate() command. - -""" - -class BitBakeBaseServerConnection(): - def __init__(self, serverImpl): - pass - - def terminate(self): - pass - - def setupEventQueue(self): - pass - - -""" BitBakeBaseServer class is the common ancestor to all Bitbake servers - - Derive this class in order to implement a BitBakeServer which is the - controlling stub for the actual server implementation - -""" -class BitBakeBaseServer(object): - def initServer(self): - self.serverImpl = None # we ensure a runtime crash if not overloaded - self.connection = None - return - - def addcooker(self, cooker): - self.cooker = cooker - self.serverImpl.addcooker(cooker) - - def getServerIdleCB(self): - return self.serverImpl.register_idle_function - - def saveConnectionDetails(self): - return - - def detach(self): - return - - def establishConnection(self, featureset): - raise "Must redefine the %s.establishConnection()" % self.__class__.__name__ - - def endSession(self): - self.connection.terminate() diff --git a/import-layers/yocto-poky/bitbake/lib/bb/server/process.py b/import-layers/yocto-poky/bitbake/lib/bb/server/process.py index c3c1450a5..3d31355fd 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/server/process.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/server/process.py @@ -22,125 +22,245 @@ import bb import bb.event -import itertools import logging import multiprocessing +import threading +import array import os -import signal import sys import time import select -from queue import Empty -from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager - -from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer +import socket +import subprocess +import errno +import re +import datetime +import bb.server.xmlrpcserver +from bb import daemonize +from multiprocessing import queues logger = logging.getLogger('BitBake') -class ServerCommunicator(): - def __init__(self, connection, event_handle, server): - self.connection = connection - self.event_handle = event_handle - self.server = server - - def runCommand(self, command): - # @todo try/except - self.connection.send(command) - - if not self.server.is_alive(): - raise SystemExit - - while True: - # don't let the user ctrl-c while we're waiting for a response - try: - for idx in range(0,4): # 0, 1, 2, 3 - if self.connection.poll(5): - return self.connection.recv() - else: - bb.warn("Timeout while attempting to communicate with bitbake server") - bb.fatal("Gave up; Too many tries: timeout while attempting to communicate with bitbake server") - except KeyboardInterrupt: - pass - - def getEventHandle(self): - return self.event_handle.value - -class EventAdapter(): - """ - Adapter to wrap our event queue since the caller (bb.event) expects to - call a send() method, but our actual queue only has put() - """ - def __init__(self, queue): - self.queue = queue - - def send(self, event): - try: - self.queue.put(event) - except Exception as err: - print("EventAdapter puked: %s" % str(err)) - +class ProcessTimeout(SystemExit): + pass -class ProcessServer(Process, BaseImplServer): +class ProcessServer(multiprocessing.Process): profile_filename = "profile.log" profile_processed_filename = "profile.log.processed" - def __init__(self, command_channel, event_queue, featurelist): - BaseImplServer.__init__(self) - Process.__init__(self) - self.command_channel = command_channel - self.event_queue = event_queue - self.event = EventAdapter(event_queue) - self.featurelist = featurelist + def __init__(self, lock, sock, sockname): + multiprocessing.Process.__init__(self) + self.command_channel = False + self.command_channel_reply = False self.quit = False self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore. self.next_heartbeat = time.time() - self.quitin, self.quitout = Pipe() - self.event_handle = multiprocessing.Value("i") + self.event_handle = None + self.haveui = False + self.lastui = False + self.xmlrpc = False + + self._idlefuns = {} + + self.bitbake_lock = lock + self.sock = sock + self.sockname = sockname + + def register_idle_function(self, function, data): + """Register a function to be called while the server is idle""" + assert hasattr(function, '__call__') + self._idlefuns[function] = data def run(self): - for event in bb.event.ui_queue: - self.event_queue.put(event) - self.event_handle.value = bb.event.register_UIHhandler(self, True) + + if self.xmlrpcinterface[0]: + self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self) + + print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port)) heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT') if heartbeat_event: try: self.heartbeat_seconds = float(heartbeat_event) except: - # Throwing an exception here causes bitbake to hang. - # Just warn about the invalid setting and continue bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event) - bb.cooker.server_main(self.cooker, self.main) + + self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT') + try: + if self.timeout: + self.timeout = float(self.timeout) + except: + bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout) + + + try: + self.bitbake_lock.seek(0) + self.bitbake_lock.truncate() + if self.xmlrpc: + self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port)) + else: + self.bitbake_lock.write("%s\n" % (os.getpid())) + self.bitbake_lock.flush() + except Exception as e: + print("Error writing to lock file: %s" % str(e)) + pass + + if self.cooker.configuration.profile: + try: + import cProfile as profile + except: + import profile + prof = profile.Profile() + + ret = profile.Profile.runcall(prof, self.main) + + prof.dump_stats("profile.log") + bb.utils.process_profilelog("profile.log") + print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed") + + else: + ret = self.main() + + return ret def main(self): - # Ignore SIGINT within the server, as all SIGINT handling is done by - # the UI and communicated to us - self.quitin.close() - signal.signal(signal.SIGINT, signal.SIG_IGN) + self.cooker.pre_serve() + bb.utils.set_process_name("Cooker") + + ready = [] + + self.controllersock = False + fds = [self.sock] + if self.xmlrpc: + fds.append(self.xmlrpc) + print("Entering server connection loop") + + def disconnect_client(self, fds): + if not self.haveui: + return + print("Disconnecting Client") + fds.remove(self.controllersock) + fds.remove(self.command_channel) + bb.event.unregister_UIHhandler(self.event_handle, True) + self.command_channel_reply.writer.close() + self.event_writer.writer.close() + del self.event_writer + self.controllersock.close() + self.controllersock = False + self.haveui = False + self.lastui = time.time() + self.cooker.clientComplete() + if self.timeout is None: + print("No timeout, exiting.") + self.quit = True + while not self.quit: - try: - if self.command_channel.poll(): - command = self.command_channel.recv() - self.runCommand(command) - if self.quitout.poll(): - self.quitout.recv() - self.quit = True - try: - self.runCommand(["stateForceShutdown"]) - except: - pass + if self.sock in ready: + self.controllersock, address = self.sock.accept() + if self.haveui: + print("Dropping connection attempt as we have a UI %s" % (str(ready))) + self.controllersock.close() + else: + print("Accepting %s" % (str(ready))) + fds.append(self.controllersock) + if self.controllersock in ready: + try: + print("Connecting Client") + ui_fds = recvfds(self.controllersock, 3) + + # Where to write events to + writer = ConnectionWriter(ui_fds[0]) + self.event_handle = bb.event.register_UIHhandler(writer, True) + self.event_writer = writer + + # Where to read commands from + reader = ConnectionReader(ui_fds[1]) + fds.append(reader) + self.command_channel = reader + + # Where to send command return values to + writer = ConnectionWriter(ui_fds[2]) + self.command_channel_reply = writer - self.idle_commands(.1, [self.command_channel, self.quitout]) - except Exception: - logger.exception('Running command %s', command) + self.haveui = True + + except (EOFError, OSError): + disconnect_client(self, fds) + + if not self.timeout == -1.0 and not self.haveui and self.lastui and self.timeout and \ + (self.lastui + self.timeout) < time.time(): + print("Server timeout, exiting.") + self.quit = True - self.event_queue.close() - bb.event.unregister_UIHhandler(self.event_handle.value) - self.command_channel.close() - self.cooker.shutdown(True) - self.quitout.close() + if self.command_channel in ready: + try: + command = self.command_channel.get() + except EOFError: + # Client connection shutting down + ready = [] + disconnect_client(self, fds) + continue + if command[0] == "terminateServer": + self.quit = True + continue + try: + print("Running command %s" % command) + self.command_channel_reply.send(self.cooker.command.runCommand(command)) + except Exception as e: + logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e))) + + if self.xmlrpc in ready: + self.xmlrpc.handle_requests() + + ready = self.idle_commands(.1, fds) + + print("Exiting") + # Remove the socket file so we don't get any more connections to avoid races + os.unlink(self.sockname) + self.sock.close() + + try: + self.cooker.shutdown(True) + except: + pass + + self.cooker.post_serve() + + # Finally release the lockfile but warn about other processes holding it open + lock = self.bitbake_lock + lockfile = lock.name + lock.close() + lock = None + + while not lock: + with bb.utils.timeout(3): + lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True) + if not lock: + # Some systems may not have lsof available + procs = None + try: + procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT) + except OSError as e: + if e.errno != errno.ENOENT: + raise + if procs is None: + # Fall back to fuser if lsof is unavailable + try: + procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock" + if procs: + msg += ":\n%s" % str(procs) + print(msg) + return + # We hold the lock so we can remove the file (hide stale pid data) + bb.utils.remove(lockfile) + bb.utils.unlockfile(lock) def idle_commands(self, delay, fds=None): nextsleep = delay @@ -186,109 +306,317 @@ class ProcessServer(Process, BaseImplServer): nextsleep = self.next_heartbeat - now if nextsleep is not None: - select.select(fds,[],[],nextsleep) + if self.xmlrpc: + nextsleep = self.xmlrpc.get_timeout(nextsleep) + try: + return select.select(fds,[],[],nextsleep)[0] + except InterruptedError: + # Ignore EINTR + return [] + else: + return select.select(fds,[],[],0)[0] + + +class ServerCommunicator(): + def __init__(self, connection, recv): + self.connection = connection + self.recv = recv def runCommand(self, command): - """ - Run a cooker command on the server - """ - self.command_channel.send(self.cooker.command.runCommand(command)) - - def stop(self): - self.quitin.send("quit") - self.quitin.close() - -class BitBakeProcessServerConnection(BitBakeBaseServerConnection): - def __init__(self, serverImpl, ui_channel, event_queue): - self.procserver = serverImpl - self.ui_channel = ui_channel - self.event_queue = event_queue - self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle, self.procserver) - self.events = self.event_queue - self.terminated = False - - def sigterm_terminate(self): - bb.error("UI received SIGTERM") - self.terminate() + self.connection.send(command) + if not self.recv.poll(30): + raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server") + return self.recv.get() + + def updateFeatureSet(self, featureset): + _, error = self.runCommand(["setFeatures", featureset]) + if error: + logger.error("Unable to set the cooker to the correct featureset: %s" % error) + raise BaseException(error) + + def getEventHandle(self): + handle, error = self.runCommand(["getUIHandlerNum"]) + if error: + logger.error("Unable to get UI Handler Number: %s" % error) + raise BaseException(error) + + return handle + + def terminateServer(self): + self.connection.send(['terminateServer']) + return + +class BitBakeProcessServerConnection(object): + def __init__(self, ui_channel, recv, eq, sock): + self.connection = ServerCommunicator(ui_channel, recv) + self.events = eq + # Save sock so it doesn't get gc'd for the life of our connection + self.socket_connection = sock def terminate(self): - if self.terminated: - return - self.terminated = True - def flushevents(): - while True: - try: - event = self.event_queue.get(block=False) - except (Empty, IOError): - break - if isinstance(event, logging.LogRecord): - logger.handle(event) - - self.procserver.stop() - - while self.procserver.is_alive(): - flushevents() - self.procserver.join(0.1) - - self.ui_channel.close() - self.event_queue.close() - self.event_queue.setexit() - # XXX: Call explicity close in _writer to avoid - # fd leakage because isn't called on Queue.close() - self.event_queue._writer.close() - -# Wrap Queue to provide API which isn't server implementation specific -class ProcessEventQueue(multiprocessing.queues.Queue): - def __init__(self, maxsize): - multiprocessing.queues.Queue.__init__(self, maxsize, ctx=multiprocessing.get_context()) - self.exit = False - bb.utils.set_process_name("ProcessEQueue") - - def setexit(self): - self.exit = True - - def waitEvent(self, timeout): - if self.exit: - return self.getEvent() + self.socket_connection.close() + self.connection.connection.close() + self.connection.recv.close() + return + +class BitBakeServer(object): + start_log_format = '--- Starting bitbake server pid %s at %s ---' + start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f' + + def __init__(self, lock, sockname, configuration, featureset): + + self.configuration = configuration + self.featureset = featureset + self.sockname = sockname + self.bitbake_lock = lock + self.readypipe, self.readypipein = os.pipe() + + # Create server control socket + if os.path.exists(sockname): + os.unlink(sockname) + + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # AF_UNIX has path length issues so chdir here to workaround + cwd = os.getcwd() + logfile = os.path.join(cwd, "bitbake-cookerdaemon.log") + try: - if not self.server.is_alive(): - return self.getEvent() - return self.get(True, timeout) - except Empty: - return None + os.chdir(os.path.dirname(sockname)) + self.sock.bind(os.path.basename(sockname)) + finally: + os.chdir(cwd) + self.sock.listen(1) + + os.set_inheritable(self.sock.fileno(), True) + startdatetime = datetime.datetime.now() + bb.daemonize.createDaemon(self._startServer, logfile) + self.sock.close() + self.bitbake_lock.close() + + ready = ConnectionReader(self.readypipe) + r = ready.poll(30) + if r: + r = ready.get() + if not r or r != "ready": + ready.close() + bb.error("Unable to start bitbake server") + if os.path.exists(logfile): + logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)')) + started = False + lines = [] + with open(logfile, "r") as f: + for line in f: + if started: + lines.append(line) + else: + res = logstart_re.match(line.rstrip()) + if res: + ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format) + if ldatetime >= startdatetime: + started = True + lines.append(line) + if lines: + if len(lines) > 10: + bb.error("Last 10 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-10:]))) + else: + bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines))) + raise SystemExit(1) + ready.close() + os.close(self.readypipein) + + def _startServer(self): + print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format))) + server = ProcessServer(self.bitbake_lock, self.sock, self.sockname) + self.configuration.setServerRegIdleCallback(server.register_idle_function) + writer = ConnectionWriter(self.readypipein) + try: + self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset) + writer.send("ready") + except: + writer.send("fail") + raise + finally: + os.close(self.readypipein) + server.cooker = self.cooker + server.server_timeout = self.configuration.server_timeout + server.xmlrpcinterface = self.configuration.xmlrpcinterface + print("Started bitbake server pid %d" % os.getpid()) + server.start() + +def connectProcessServer(sockname, featureset): + # Connect to socket + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # AF_UNIX has path length issues so chdir here to workaround + cwd = os.getcwd() + + try: + os.chdir(os.path.dirname(sockname)) + sock.connect(os.path.basename(sockname)) + finally: + os.chdir(cwd) + + readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None + eq = command_chan_recv = command_chan = None + + try: + + # Send an fd for the remote to write events to + readfd, writefd = os.pipe() + eq = BBUIEventQueue(readfd) + # Send an fd for the remote to recieve commands from + readfd1, writefd1 = os.pipe() + command_chan = ConnectionWriter(writefd1) + # Send an fd for the remote to write commands results to + readfd2, writefd2 = os.pipe() + command_chan_recv = ConnectionReader(readfd2) + + sendfds(sock, [writefd, readfd1, writefd2]) + + server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock) + + # Close the ends of the pipes we won't use + for i in [writefd, readfd1, writefd2]: + os.close(i) + + server_connection.connection.updateFeatureSet(featureset) + + except (Exception, SystemExit) as e: + if command_chan_recv: + command_chan_recv.close() + if command_chan: + command_chan.close() + for i in [writefd, readfd1, writefd2]: + try: + os.close(i) + except OSError: + pass + sock.close() + raise + + return server_connection + +def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + +def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + assert len(a) % 256 == msg[0] + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + +class BBUIEventQueue: + def __init__(self, readfd): + + self.eventQueue = [] + self.eventQueueLock = threading.Lock() + self.eventQueueNotify = threading.Event() + + self.reader = ConnectionReader(readfd) + + self.t = threading.Thread() + self.t.setDaemon(True) + self.t.run = self.startCallbackHandler + self.t.start() def getEvent(self): - try: - if not self.server.is_alive(): - self.setexit() - return self.get(False) - except Empty: - if self.exit: - sys.exit(1) + self.eventQueueLock.acquire() + + if len(self.eventQueue) == 0: + self.eventQueueLock.release() return None -class BitBakeServer(BitBakeBaseServer): - def initServer(self, single_use=True): - # establish communication channels. We use bidirectional pipes for - # ui <--> server command/response pairs - # and a queue for server -> ui event notifications - # - self.ui_channel, self.server_channel = Pipe() - self.event_queue = ProcessEventQueue(0) - self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None) - self.event_queue.server = self.serverImpl - - def detach(self): - self.serverImpl.start() - return + item = self.eventQueue.pop(0) - def establishConnection(self, featureset): + if len(self.eventQueue) == 0: + self.eventQueueNotify.clear() - self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue) + self.eventQueueLock.release() + return item - _, error = self.connection.connection.runCommand(["setFeatures", featureset]) - if error: - logger.error("Unable to set the cooker to the correct featureset: %s" % error) - raise BaseException(error) - signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate()) - return self.connection + def waitEvent(self, delay): + self.eventQueueNotify.wait(delay) + return self.getEvent() + + def queue_event(self, event): + self.eventQueueLock.acquire() + self.eventQueue.append(event) + self.eventQueueNotify.set() + self.eventQueueLock.release() + + def send_event(self, event): + self.queue_event(pickle.loads(event)) + + def startCallbackHandler(self): + bb.utils.set_process_name("UIEventQueue") + while True: + try: + self.reader.wait() + event = self.reader.get() + self.queue_event(event) + except EOFError: + # Easiest way to exit is to close the file descriptor to cause an exit + break + self.reader.close() + +class ConnectionReader(object): + + def __init__(self, fd): + self.reader = multiprocessing.connection.Connection(fd, writable=False) + self.rlock = multiprocessing.Lock() + + def wait(self, timeout=None): + return multiprocessing.connection.wait([self.reader], timeout) + + def poll(self, timeout=None): + return self.reader.poll(timeout) + + def get(self): + with self.rlock: + res = self.reader.recv_bytes() + return multiprocessing.reduction.ForkingPickler.loads(res) + + def fileno(self): + return self.reader.fileno() + + def close(self): + return self.reader.close() + + +class ConnectionWriter(object): + + def __init__(self, fd): + self.writer = multiprocessing.connection.Connection(fd, readable=False) + self.wlock = multiprocessing.Lock() + # Why bb.event needs this I have no idea + self.event = self + + def send(self, obj): + obj = multiprocessing.reduction.ForkingPickler.dumps(obj) + with self.wlock: + self.writer.send_bytes(obj) + + def fileno(self): + return self.writer.fileno() + + def close(self): + return self.writer.close() diff --git a/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpc.py b/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpc.py deleted file mode 100644 index a06007f5a..000000000 --- a/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpc.py +++ /dev/null @@ -1,422 +0,0 @@ -# -# BitBake XMLRPC Server -# -# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer -# Copyright (C) 2006 - 2008 Richard Purdie -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -""" - This module implements an xmlrpc server for BitBake. - - Use this by deriving a class from BitBakeXMLRPCServer and then adding - methods which you want to "export" via XMLRPC. If the methods have the - prefix xmlrpc_, then registering those function will happen automatically, - if not, you need to call register_function. - - Use register_idle_function() to add a function which the xmlrpc server - calls from within server_forever when no requests are pending. Make sure - that those functions are non-blocking or else you will introduce latency - in the server's main loop. -""" - -import os -import sys - -import hashlib -import time -import socket -import signal -import threading -import pickle -import inspect -import select -import http.client -import xmlrpc.client -from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler - -import bb -from bb import daemonize -from bb.ui import uievent -from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer - -DEBUG = False - -class BBTransport(xmlrpc.client.Transport): - def __init__(self, timeout): - self.timeout = timeout - self.connection_token = None - xmlrpc.client.Transport.__init__(self) - - # Modified from default to pass timeout to HTTPConnection - def make_connection(self, host): - #return an existing connection if possible. This allows - #HTTP/1.1 keep-alive. - if self._connection and host == self._connection[0]: - return self._connection[1] - - # create a HTTP connection object from a host descriptor - chost, self._extra_headers, x509 = self.get_host_info(host) - #store the host argument along with the connection object - self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout) - return self._connection[1] - - def set_connection_token(self, token): - self.connection_token = token - - def send_content(self, h, body): - if self.connection_token: - h.putheader("Bitbake-token", self.connection_token) - xmlrpc.client.Transport.send_content(self, h, body) - -def _create_server(host, port, timeout = 60): - t = BBTransport(timeout) - s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True) - return s, t - -def check_connection(remote, timeout): - try: - host, port = remote.split(":") - port = int(port) - except Exception as e: - bb.warn("Failed to read remote definition (%s)" % str(e)) - raise e - - server, _transport = _create_server(host, port, timeout) - try: - ret, err = server.runCommand(['getVariable', 'TOPDIR']) - if err or not ret: - return False - except ConnectionError: - return False - return True - -class BitBakeServerCommands(): - - def __init__(self, server): - self.server = server - self.has_client = False - - def registerEventHandler(self, host, port): - """ - Register a remote UI Event Handler - """ - s, t = _create_server(host, port) - - # we don't allow connections if the cooker is running - if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]): - return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.cooker.state) - - self.event_handle = bb.event.register_UIHhandler(s, True) - return self.event_handle, 'OK' - - def unregisterEventHandler(self, handlerNum): - """ - Unregister a remote UI Event Handler - """ - return bb.event.unregister_UIHhandler(handlerNum) - - def runCommand(self, command): - """ - Run a cooker command on the server - """ - return self.cooker.command.runCommand(command, self.server.readonly) - - def getEventHandle(self): - return self.event_handle - - def terminateServer(self): - """ - Trigger the server to quit - """ - self.server.quit = True - print("Server (cooker) exiting") - return - - def addClient(self): - if self.has_client: - return None - token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest() - self.server.set_connection_token(token) - self.has_client = True - return token - - def removeClient(self): - if self.has_client: - self.server.set_connection_token(None) - self.has_client = False - if self.server.single_use: - self.server.quit = True - -# This request handler checks if the request has a "Bitbake-token" header -# field (this comes from the client side) and compares it with its internal -# "Bitbake-token" field (this comes from the server). If the two are not -# equal, it is assumed that a client is trying to connect to the server -# while another client is connected to the server. In this case, a 503 error -# ("service unavailable") is returned to the client. -class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): - def __init__(self, request, client_address, server): - self.server = server - SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server) - - def do_POST(self): - try: - remote_token = self.headers["Bitbake-token"] - except: - remote_token = None - if remote_token != self.server.connection_token and remote_token != "observer": - self.report_503() - else: - if remote_token == "observer": - self.server.readonly = True - else: - self.server.readonly = False - SimpleXMLRPCRequestHandler.do_POST(self) - - def report_503(self): - self.send_response(503) - response = 'No more client allowed' - self.send_header("Content-type", "text/plain") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(bytes(response, 'utf-8')) - - -class XMLRPCProxyServer(BaseImplServer): - """ not a real working server, but a stub for a proxy server connection - - """ - def __init__(self, host, port, use_builtin_types=True): - self.host = host - self.port = port - -class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer): - # remove this when you're done with debugging - # allow_reuse_address = True - - def __init__(self, interface, single_use=False, idle_timeout=0): - """ - Constructor - """ - BaseImplServer.__init__(self) - self.single_use = single_use - # Use auto port configuration - if (interface[1] == -1): - interface = (interface[0], 0) - SimpleXMLRPCServer.__init__(self, interface, - requestHandler=BitBakeXMLRPCRequestHandler, - logRequests=False, allow_none=True) - self.host, self.port = self.socket.getsockname() - self.connection_token = None - #self.register_introspection_functions() - self.commands = BitBakeServerCommands(self) - self.autoregister_all_functions(self.commands, "") - self.interface = interface - self.time = time.time() - self.idle_timeout = idle_timeout - if idle_timeout: - self.register_idle_function(self.handle_idle_timeout, self) - - def addcooker(self, cooker): - BaseImplServer.addcooker(self, cooker) - self.commands.cooker = cooker - - def autoregister_all_functions(self, context, prefix): - """ - Convenience method for registering all functions in the scope - of this class that start with a common prefix - """ - methodlist = inspect.getmembers(context, inspect.ismethod) - for name, method in methodlist: - if name.startswith(prefix): - self.register_function(method, name[len(prefix):]) - - def handle_idle_timeout(self, server, data, abort): - if not abort: - if time.time() - server.time > server.idle_timeout: - server.quit = True - print("Server idle timeout expired") - return [] - - def serve_forever(self): - # Start the actual XMLRPC server - bb.cooker.server_main(self.cooker, self._serve_forever) - - def _serve_forever(self): - """ - Serve Requests. Overloaded to honor a quit command - """ - self.quit = False - while not self.quit: - fds = [self] - nextsleep = 0.1 - for function, data in list(self._idlefuns.items()): - retval = None - try: - retval = function(self, data, False) - if retval is False: - del self._idlefuns[function] - elif retval is True: - nextsleep = 0 - elif isinstance(retval, float): - if (retval < nextsleep): - nextsleep = retval - else: - fds = fds + retval - except SystemExit: - raise - except: - import traceback - traceback.print_exc() - if retval == None: - # the function execute failed; delete it - del self._idlefuns[function] - pass - - socktimeout = self.socket.gettimeout() or nextsleep - socktimeout = min(socktimeout, nextsleep) - # Mirror what BaseServer handle_request would do - try: - fd_sets = select.select(fds, [], [], socktimeout) - if fd_sets[0] and self in fd_sets[0]: - if self.idle_timeout: - self.time = time.time() - self._handle_request_noblock() - except IOError: - # we ignore interrupted calls - pass - - # Tell idle functions we're exiting - for function, data in list(self._idlefuns.items()): - try: - retval = function(self, data, True) - except: - pass - self.server_close() - return - - def set_connection_token(self, token): - self.connection_token = token - -class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection): - def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = None): - self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port) - self.clientinfo = clientinfo - self.serverImpl = serverImpl - self.observer_only = observer_only - if featureset: - self.featureset = featureset - else: - self.featureset = [] - - def connect(self, token = None): - if token is None: - if self.observer_only: - token = "observer" - else: - token = self.connection.addClient() - - if token is None: - return None - - self.transport.set_connection_token(token) - return self - - def setupEventQueue(self): - self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo) - for event in bb.event.ui_queue: - self.events.queue_event(event) - - _, error = self.connection.runCommand(["setFeatures", self.featureset]) - if error: - # disconnect the client, we can't make the setFeature work - self.connection.removeClient() - # no need to log it here, the error shall be sent to the client - raise BaseException(error) - - def removeClient(self): - if not self.observer_only: - self.connection.removeClient() - - def terminate(self): - # Don't wait for server indefinitely - import socket - socket.setdefaulttimeout(2) - try: - self.events.system_quit() - except: - pass - try: - self.connection.removeClient() - except: - pass - -class BitBakeServer(BitBakeBaseServer): - def initServer(self, interface = ("localhost", 0), - single_use = False, idle_timeout=0): - self.interface = interface - self.serverImpl = XMLRPCServer(interface, single_use, idle_timeout) - - def detach(self): - daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log") - del self.cooker - - def establishConnection(self, featureset): - self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset) - return self.connection.connect() - - def set_connection_token(self, token): - self.connection.transport.set_connection_token(token) - -class BitBakeXMLRPCClient(BitBakeBaseServer): - - def __init__(self, observer_only = False, token = None): - self.token = token - - self.observer_only = observer_only - # if we need extra caches, just tell the server to load them all - pass - - def saveConnectionDetails(self, remote): - self.remote = remote - - def establishConnection(self, featureset): - # The format of "remote" must be "server:port" - try: - [host, port] = self.remote.split(":") - port = int(port) - except Exception as e: - bb.warn("Failed to read remote definition (%s)" % str(e)) - raise e - - # We need our IP for the server connection. We get the IP - # by trying to connect with the server - try: - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect((host, port)) - ip = s.getsockname()[0] - s.close() - except Exception as e: - bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e))) - raise e - try: - self.serverImpl = XMLRPCProxyServer(host, port, use_builtin_types=True) - self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset) - return self.connection.connect(self.token) - except Exception as e: - bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e))) - raise e - - def endSession(self): - self.connection.removeClient() diff --git a/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpcclient.py b/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpcclient.py new file mode 100644 index 000000000..4661a9e5a --- /dev/null +++ b/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpcclient.py @@ -0,0 +1,154 @@ +# +# BitBake XMLRPC Client Interface +# +# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer +# Copyright (C) 2006 - 2008 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import sys + +import socket +import http.client +import xmlrpc.client + +import bb +from bb.ui import uievent + +class BBTransport(xmlrpc.client.Transport): + def __init__(self, timeout): + self.timeout = timeout + self.connection_token = None + xmlrpc.client.Transport.__init__(self) + + # Modified from default to pass timeout to HTTPConnection + def make_connection(self, host): + #return an existing connection if possible. This allows + #HTTP/1.1 keep-alive. + if self._connection and host == self._connection[0]: + return self._connection[1] + + # create a HTTP connection object from a host descriptor + chost, self._extra_headers, x509 = self.get_host_info(host) + #store the host argument along with the connection object + self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout) + return self._connection[1] + + def set_connection_token(self, token): + self.connection_token = token + + def send_content(self, h, body): + if self.connection_token: + h.putheader("Bitbake-token", self.connection_token) + xmlrpc.client.Transport.send_content(self, h, body) + +def _create_server(host, port, timeout = 60): + t = BBTransport(timeout) + s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True) + return s, t + +def check_connection(remote, timeout): + try: + host, port = remote.split(":") + port = int(port) + except Exception as e: + bb.warn("Failed to read remote definition (%s)" % str(e)) + raise e + + server, _transport = _create_server(host, port, timeout) + try: + ret, err = server.runCommand(['getVariable', 'TOPDIR']) + if err or not ret: + return False + except ConnectionError: + return False + return True + +class BitBakeXMLRPCServerConnection(object): + def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None): + self.connection, self.transport = _create_server(host, port) + self.clientinfo = clientinfo + self.observer_only = observer_only + if featureset: + self.featureset = featureset + else: + self.featureset = [] + + self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo) + + _, error = self.connection.runCommand(["setFeatures", self.featureset]) + if error: + # disconnect the client, we can't make the setFeature work + self.connection.removeClient() + # no need to log it here, the error shall be sent to the client + raise BaseException(error) + + def connect(self, token = None): + if token is None: + if self.observer_only: + token = "observer" + else: + token = self.connection.addClient() + + if token is None: + return None + + self.transport.set_connection_token(token) + return self + + def removeClient(self): + if not self.observer_only: + self.connection.removeClient() + + def terminate(self): + # Don't wait for server indefinitely + socket.setdefaulttimeout(2) + try: + self.events.system_quit() + except: + pass + try: + self.connection.removeClient() + except: + pass + +def connectXMLRPC(remote, featureset, observer_only = False, token = None): + # The format of "remote" must be "server:port" + try: + [host, port] = remote.split(":") + port = int(port) + except Exception as e: + bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e))) + raise e + + # We need our IP for the server connection. We get the IP + # by trying to connect with the server + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect((host, port)) + ip = s.getsockname()[0] + s.close() + except Exception as e: + bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e))) + raise e + try: + connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset) + return connection.connect(token) + except Exception as e: + bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e))) + raise e + + + diff --git a/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpcserver.py b/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpcserver.py new file mode 100644 index 000000000..875b1282e --- /dev/null +++ b/import-layers/yocto-poky/bitbake/lib/bb/server/xmlrpcserver.py @@ -0,0 +1,158 @@ +# +# BitBake XMLRPC Server Interface +# +# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer +# Copyright (C) 2006 - 2008 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import sys + +import hashlib +import time +import inspect +from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler + +import bb + +# This request handler checks if the request has a "Bitbake-token" header +# field (this comes from the client side) and compares it with its internal +# "Bitbake-token" field (this comes from the server). If the two are not +# equal, it is assumed that a client is trying to connect to the server +# while another client is connected to the server. In this case, a 503 error +# ("service unavailable") is returned to the client. +class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): + def __init__(self, request, client_address, server): + self.server = server + SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server) + + def do_POST(self): + try: + remote_token = self.headers["Bitbake-token"] + except: + remote_token = None + if 0 and remote_token != self.server.connection_token and remote_token != "observer": + self.report_503() + else: + if remote_token == "observer": + self.server.readonly = True + else: + self.server.readonly = False + SimpleXMLRPCRequestHandler.do_POST(self) + + def report_503(self): + self.send_response(503) + response = 'No more client allowed' + self.send_header("Content-type", "text/plain") + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(bytes(response, 'utf-8')) + +class BitBakeXMLRPCServer(SimpleXMLRPCServer): + # remove this when you're done with debugging + # allow_reuse_address = True + + def __init__(self, interface, cooker, parent): + # Use auto port configuration + if (interface[1] == -1): + interface = (interface[0], 0) + SimpleXMLRPCServer.__init__(self, interface, + requestHandler=BitBakeXMLRPCRequestHandler, + logRequests=False, allow_none=True) + self.host, self.port = self.socket.getsockname() + self.interface = interface + + self.connection_token = None + self.commands = BitBakeXMLRPCServerCommands(self) + self.register_functions(self.commands, "") + + self.cooker = cooker + self.parent = parent + + + def register_functions(self, context, prefix): + """ + Convenience method for registering all functions in the scope + of this class that start with a common prefix + """ + methodlist = inspect.getmembers(context, inspect.ismethod) + for name, method in methodlist: + if name.startswith(prefix): + self.register_function(method, name[len(prefix):]) + + def get_timeout(self, delay): + socktimeout = self.socket.gettimeout() or delay + return min(socktimeout, delay) + + def handle_requests(self): + self._handle_request_noblock() + +class BitBakeXMLRPCServerCommands(): + + def __init__(self, server): + self.server = server + self.has_client = False + + def registerEventHandler(self, host, port): + """ + Register a remote UI Event Handler + """ + s, t = bb.server.xmlrpcclient._create_server(host, port) + + # we don't allow connections if the cooker is running + if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]): + return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state) + + self.event_handle = bb.event.register_UIHhandler(s, True) + return self.event_handle, 'OK' + + def unregisterEventHandler(self, handlerNum): + """ + Unregister a remote UI Event Handler + """ + ret = bb.event.unregister_UIHhandler(handlerNum, True) + self.event_handle = None + return ret + + def runCommand(self, command): + """ + Run a cooker command on the server + """ + return self.server.cooker.command.runCommand(command, self.server.readonly) + + def getEventHandle(self): + return self.event_handle + + def terminateServer(self): + """ + Trigger the server to quit + """ + self.server.parent.quit = True + print("XMLRPC Server triggering exit") + return + + def addClient(self): + if self.server.parent.haveui: + return None + token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest() + self.server.connection_token = token + self.server.parent.haveui = True + return token + + def removeClient(self): + if self.server.parent.haveui: + self.server.connection_token = None + self.server.parent.haveui = False + diff --git a/import-layers/yocto-poky/bitbake/lib/bb/siggen.py b/import-layers/yocto-poky/bitbake/lib/bb/siggen.py index f71190ad4..5ef82d7be 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/siggen.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/siggen.py @@ -69,6 +69,10 @@ class SignatureGenerator(object): def set_taskdata(self, data): self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data + def reset(self, data): + self.__init__(data) + + class SignatureGeneratorBasic(SignatureGenerator): """ """ diff --git a/import-layers/yocto-poky/bitbake/lib/bb/taskdata.py b/import-layers/yocto-poky/bitbake/lib/bb/taskdata.py index 8c96a5628..0ea6c0bfd 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/taskdata.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/taskdata.py @@ -47,7 +47,7 @@ class TaskData: """ BitBake Task Data implementation """ - def __init__(self, abort = True, tryaltconfigs = False, skiplist = None, allowincomplete = False): + def __init__(self, abort = True, skiplist = None, allowincomplete = False): self.build_targets = {} self.run_targets = {} @@ -66,7 +66,6 @@ class TaskData: self.failed_fns = [] self.abort = abort - self.tryaltconfigs = tryaltconfigs self.allowincomplete = allowincomplete self.skiplist = skiplist diff --git a/import-layers/yocto-poky/bitbake/lib/bb/tests/event.py b/import-layers/yocto-poky/bitbake/lib/bb/tests/event.py new file mode 100644 index 000000000..c7eb1fe44 --- /dev/null +++ b/import-layers/yocto-poky/bitbake/lib/bb/tests/event.py @@ -0,0 +1,377 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake Tests for the Event implementation (event.py) +# +# Copyright (C) 2017 Intel Corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# + +import unittest +import bb +import logging +import bb.compat +import bb.event +import importlib +import threading +import time +import pickle +from unittest.mock import Mock +from unittest.mock import call + + +class EventQueueStub(): + """ Class used as specification for UI event handler queue stub objects """ + def __init__(self): + return + + def send(self, event): + return + + +class PickleEventQueueStub(): + """ Class used as specification for UI event handler queue stub objects + with sendpickle method """ + def __init__(self): + return + + def sendpickle(self, pickled_event): + return + + +class UIClientStub(): + """ Class used as specification for UI event handler stub objects """ + def __init__(self): + self.event = None + + +class EventHandlingTest(unittest.TestCase): + """ Event handling test class """ + _threadlock_test_calls = [] + + def setUp(self): + self._test_process = Mock() + ui_client1 = UIClientStub() + ui_client2 = UIClientStub() + self._test_ui1 = Mock(wraps=ui_client1) + self._test_ui2 = Mock(wraps=ui_client2) + importlib.reload(bb.event) + + def _create_test_handlers(self): + """ Method used to create a test handler ordered dictionary """ + test_handlers = bb.compat.OrderedDict() + test_handlers["handler1"] = self._test_process.handler1 + test_handlers["handler2"] = self._test_process.handler2 + return test_handlers + + def test_class_handlers(self): + """ Test set_class_handlers and get_class_handlers methods """ + test_handlers = self._create_test_handlers() + bb.event.set_class_handlers(test_handlers) + self.assertEqual(test_handlers, + bb.event.get_class_handlers()) + + def test_handlers(self): + """ Test set_handlers and get_handlers """ + test_handlers = self._create_test_handlers() + bb.event.set_handlers(test_handlers) + self.assertEqual(test_handlers, + bb.event.get_handlers()) + + def test_clean_class_handlers(self): + """ Test clean_class_handlers method """ + cleanDict = bb.compat.OrderedDict() + self.assertEqual(cleanDict, + bb.event.clean_class_handlers()) + + def test_register(self): + """ Test register method for class handlers """ + result = bb.event.register("handler", self._test_process.handler) + self.assertEqual(result, bb.event.Registered) + handlers_dict = bb.event.get_class_handlers() + self.assertIn("handler", handlers_dict) + + def test_already_registered(self): + """ Test detection of an already registed class handler """ + bb.event.register("handler", self._test_process.handler) + handlers_dict = bb.event.get_class_handlers() + self.assertIn("handler", handlers_dict) + result = bb.event.register("handler", self._test_process.handler) + self.assertEqual(result, bb.event.AlreadyRegistered) + + def test_register_from_string(self): + """ Test register method receiving code in string """ + result = bb.event.register("string_handler", " return True") + self.assertEqual(result, bb.event.Registered) + handlers_dict = bb.event.get_class_handlers() + self.assertIn("string_handler", handlers_dict) + + def test_register_with_mask(self): + """ Test register method with event masking """ + mask = ["bb.event.OperationStarted", + "bb.event.OperationCompleted"] + result = bb.event.register("event_handler", + self._test_process.event_handler, + mask) + self.assertEqual(result, bb.event.Registered) + handlers_dict = bb.event.get_class_handlers() + self.assertIn("event_handler", handlers_dict) + + def test_remove(self): + """ Test remove method for class handlers """ + test_handlers = self._create_test_handlers() + bb.event.set_class_handlers(test_handlers) + count = len(test_handlers) + bb.event.remove("handler1", None) + test_handlers = bb.event.get_class_handlers() + self.assertEqual(len(test_handlers), count - 1) + with self.assertRaises(KeyError): + bb.event.remove("handler1", None) + + def test_execute_handler(self): + """ Test execute_handler method for class handlers """ + mask = ["bb.event.OperationProgress"] + result = bb.event.register("event_handler", + self._test_process.event_handler, + mask) + self.assertEqual(result, bb.event.Registered) + event = bb.event.OperationProgress(current=10, total=100) + bb.event.execute_handler("event_handler", + self._test_process.event_handler, + event, + None) + self._test_process.event_handler.assert_called_once_with(event) + + def test_fire_class_handlers(self): + """ Test fire_class_handlers method """ + mask = ["bb.event.OperationStarted"] + result = bb.event.register("event_handler1", + self._test_process.event_handler1, + mask) + self.assertEqual(result, bb.event.Registered) + result = bb.event.register("event_handler2", + self._test_process.event_handler2, + "*") + self.assertEqual(result, bb.event.Registered) + event1 = bb.event.OperationStarted() + event2 = bb.event.OperationCompleted(total=123) + bb.event.fire_class_handlers(event1, None) + bb.event.fire_class_handlers(event2, None) + bb.event.fire_class_handlers(event2, None) + expected_event_handler1 = [call(event1)] + expected_event_handler2 = [call(event1), + call(event2), + call(event2)] + self.assertEqual(self._test_process.event_handler1.call_args_list, + expected_event_handler1) + self.assertEqual(self._test_process.event_handler2.call_args_list, + expected_event_handler2) + + def test_change_handler_event_mapping(self): + """ Test changing the event mapping for class handlers """ + event1 = bb.event.OperationStarted() + event2 = bb.event.OperationCompleted(total=123) + + # register handler for all events + result = bb.event.register("event_handler1", + self._test_process.event_handler1, + "*") + self.assertEqual(result, bb.event.Registered) + bb.event.fire_class_handlers(event1, None) + bb.event.fire_class_handlers(event2, None) + expected = [call(event1), call(event2)] + self.assertEqual(self._test_process.event_handler1.call_args_list, + expected) + + # unregister handler and register it only for OperationStarted + result = bb.event.remove("event_handler1", + self._test_process.event_handler1) + mask = ["bb.event.OperationStarted"] + result = bb.event.register("event_handler1", + self._test_process.event_handler1, + mask) + self.assertEqual(result, bb.event.Registered) + bb.event.fire_class_handlers(event1, None) + bb.event.fire_class_handlers(event2, None) + expected = [call(event1), call(event2), call(event1)] + self.assertEqual(self._test_process.event_handler1.call_args_list, + expected) + + # unregister handler and register it only for OperationCompleted + result = bb.event.remove("event_handler1", + self._test_process.event_handler1) + mask = ["bb.event.OperationCompleted"] + result = bb.event.register("event_handler1", + self._test_process.event_handler1, + mask) + self.assertEqual(result, bb.event.Registered) + bb.event.fire_class_handlers(event1, None) + bb.event.fire_class_handlers(event2, None) + expected = [call(event1), call(event2), call(event1), call(event2)] + self.assertEqual(self._test_process.event_handler1.call_args_list, + expected) + + def test_register_UIHhandler(self): + """ Test register_UIHhandler method """ + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + + def test_UIHhandler_already_registered(self): + """ Test registering an UIHhandler already existing """ + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 2) + + def test_unregister_UIHhandler(self): + """ Test unregister_UIHhandler method """ + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + result = bb.event.unregister_UIHhandler(1) + self.assertIs(result, None) + + def test_fire_ui_handlers(self): + """ Test fire_ui_handlers method """ + self._test_ui1.event = Mock(spec_set=EventQueueStub) + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + self._test_ui2.event = Mock(spec_set=PickleEventQueueStub) + result = bb.event.register_UIHhandler(self._test_ui2, mainui=True) + self.assertEqual(result, 2) + event1 = bb.event.OperationStarted() + bb.event.fire_ui_handlers(event1, None) + expected = [call(event1)] + self.assertEqual(self._test_ui1.event.send.call_args_list, + expected) + expected = [call(pickle.dumps(event1))] + self.assertEqual(self._test_ui2.event.sendpickle.call_args_list, + expected) + + def test_fire(self): + """ Test fire method used to trigger class and ui event handlers """ + mask = ["bb.event.ConfigParsed"] + result = bb.event.register("event_handler1", + self._test_process.event_handler1, + mask) + + self._test_ui1.event = Mock(spec_set=EventQueueStub) + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + + event1 = bb.event.ConfigParsed() + bb.event.fire(event1, None) + expected = [call(event1)] + self.assertEqual(self._test_process.event_handler1.call_args_list, + expected) + self.assertEqual(self._test_ui1.event.send.call_args_list, + expected) + + def test_fire_from_worker(self): + """ Test fire_from_worker method """ + self._test_ui1.event = Mock(spec_set=EventQueueStub) + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + event1 = bb.event.ConfigParsed() + bb.event.fire_from_worker(event1, None) + expected = [call(event1)] + self.assertEqual(self._test_ui1.event.send.call_args_list, + expected) + + def test_print_ui_queue(self): + """ Test print_ui_queue method """ + event1 = bb.event.OperationStarted() + event2 = bb.event.OperationCompleted(total=123) + bb.event.fire(event1, None) + bb.event.fire(event2, None) + logger = logging.getLogger("BitBake") + logger.addHandler(bb.event.LogHandler()) + logger.info("Test info LogRecord") + logger.warning("Test warning LogRecord") + with self.assertLogs("BitBake", level="INFO") as cm: + bb.event.print_ui_queue() + self.assertEqual(cm.output, + ["INFO:BitBake:Test info LogRecord", + "WARNING:BitBake:Test warning LogRecord"]) + + def _set_threadlock_test_mockups(self): + """ Create UI event handler mockups used in enable and disable + threadlock tests """ + def ui1_event_send(event): + if type(event) is bb.event.ConfigParsed: + self._threadlock_test_calls.append("w1_ui1") + if type(event) is bb.event.OperationStarted: + self._threadlock_test_calls.append("w2_ui1") + time.sleep(2) + + def ui2_event_send(event): + if type(event) is bb.event.ConfigParsed: + self._threadlock_test_calls.append("w1_ui2") + if type(event) is bb.event.OperationStarted: + self._threadlock_test_calls.append("w2_ui2") + time.sleep(2) + + self._threadlock_test_calls = [] + self._test_ui1.event = EventQueueStub() + self._test_ui1.event.send = ui1_event_send + result = bb.event.register_UIHhandler(self._test_ui1, mainui=True) + self.assertEqual(result, 1) + self._test_ui2.event = EventQueueStub() + self._test_ui2.event.send = ui2_event_send + result = bb.event.register_UIHhandler(self._test_ui2, mainui=True) + self.assertEqual(result, 2) + + def _set_and_run_threadlock_test_workers(self): + """ Create and run the workers used to trigger events in enable and + disable threadlock tests """ + worker1 = threading.Thread(target=self._thread_lock_test_worker1) + worker2 = threading.Thread(target=self._thread_lock_test_worker2) + worker1.start() + time.sleep(1) + worker2.start() + worker1.join() + worker2.join() + + def _thread_lock_test_worker1(self): + """ First worker used to fire the ConfigParsed event for enable and + disable threadlocks tests """ + bb.event.fire(bb.event.ConfigParsed(), None) + + def _thread_lock_test_worker2(self): + """ Second worker used to fire the OperationStarted event for enable + and disable threadlocks tests """ + bb.event.fire(bb.event.OperationStarted(), None) + + def test_enable_threadlock(self): + """ Test enable_threadlock method """ + self._set_threadlock_test_mockups() + bb.event.enable_threadlock() + self._set_and_run_threadlock_test_workers() + # Calls to UI handlers should be in order as all the registered + # handlers for the event coming from the first worker should be + # called before processing the event from the second worker. + self.assertEqual(self._threadlock_test_calls, + ["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"]) + + def test_disable_threadlock(self): + """ Test disable_threadlock method """ + self._set_threadlock_test_mockups() + bb.event.disable_threadlock() + self._set_and_run_threadlock_test_workers() + # Calls to UI handlers should be intertwined together. Thanks to the + # delay in the registered handlers for the event coming from the first + # worker, the event coming from the second worker starts being + # processed before finishing handling the first worker event. + self.assertEqual(self._threadlock_test_calls, + ["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"]) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/tests/fetch.py b/import-layers/yocto-poky/bitbake/lib/bb/tests/fetch.py index 5a8d89285..7d7c5d7ff 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/tests/fetch.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/tests/fetch.py @@ -28,6 +28,11 @@ from bb.fetch2 import URI from bb.fetch2 import FetchMethod import bb +def skipIfNoNetwork(): + if os.environ.get("BB_SKIP_NETTESTS") == "yes": + return unittest.skip("Network tests being skipped") + return lambda f: f + class URITest(unittest.TestCase): test_uris = { "http://www.google.com/index.html" : { @@ -518,141 +523,153 @@ class FetcherLocalTest(FetcherTest): self.fetchUnpack(['file://a;subdir=/bin/sh']) class FetcherNetworkTest(FetcherTest): + @skipIfNoNetwork() + def test_fetch(self): + fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d) + fetcher.download() + self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) + self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892) + self.d.setVar("BB_NO_NETWORK", "1") + fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d) + fetcher.download() + fetcher.unpack(self.unpackdir) + self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9) + self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9) - if os.environ.get("BB_SKIP_NETTESTS") == "yes": - print("Unset BB_SKIP_NETTESTS to run network tests") - else: - def test_fetch(self): - fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892) - self.d.setVar("BB_NO_NETWORK", "1") - fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9) - self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9) - - def test_fetch_mirror(self): - self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - def test_fetch_mirror_of_mirror(self): - self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* http://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - def test_fetch_file_mirror_of_mirror(self): - self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* http://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - os.mkdir(self.dldir + "/some2where") - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - def test_fetch_premirror(self): - self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake") - fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) - fetcher.download() - self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) - - def gitfetcher(self, url1, url2): - def checkrevision(self, fetcher): - fetcher.unpack(self.unpackdir) - revision = bb.process.run("git rev-parse HEAD", shell=True, cwd=self.unpackdir + "/git")[0].strip() - self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5") - - self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1") - self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5") - fetcher = bb.fetch.Fetch([url1], self.d) - fetcher.download() - checkrevision(self, fetcher) - # Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works - bb.utils.prunedir(self.dldir + "/git2/") - bb.utils.prunedir(self.unpackdir) - self.d.setVar("BB_NO_NETWORK", "1") - fetcher = bb.fetch.Fetch([url2], self.d) - fetcher.download() - checkrevision(self, fetcher) - - def test_gitfetch(self): - url1 = url2 = "git://git.openembedded.org/bitbake" - self.gitfetcher(url1, url2) - - def test_gitfetch_goodsrcrev(self): - # SRCREV is set but matches rev= parameter - url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5" - self.gitfetcher(url1, url2) - - def test_gitfetch_badsrcrev(self): - # SRCREV is set but does not match rev= parameter - url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5" - self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2) - - def test_gitfetch_tagandrev(self): - # SRCREV is set but does not match rev= parameter - url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5" - self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2) - - def test_gitfetch_localusehead(self): - # Create dummy local Git repo - src_dir = tempfile.mkdtemp(dir=self.tempdir, - prefix='gitfetch_localusehead_') - src_dir = os.path.abspath(src_dir) - bb.process.run("git init", cwd=src_dir) - bb.process.run("git commit --allow-empty -m'Dummy commit'", - cwd=src_dir) - # Use other branch than master - bb.process.run("git checkout -b my-devel", cwd=src_dir) - bb.process.run("git commit --allow-empty -m'Dummy commit 2'", - cwd=src_dir) - stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir) - orig_rev = stdout[0].strip() - - # Fetch and check revision - self.d.setVar("SRCREV", "AUTOINC") - url = "git://" + src_dir + ";protocol=file;usehead=1" - fetcher = bb.fetch.Fetch([url], self.d) - fetcher.download() - fetcher.unpack(self.unpackdir) - stdout = bb.process.run("git rev-parse HEAD", - cwd=os.path.join(self.unpackdir, 'git')) - unpack_rev = stdout[0].strip() - self.assertEqual(orig_rev, unpack_rev) - - def test_gitfetch_remoteusehead(self): - url = "git://git.openembedded.org/bitbake;usehead=1" - self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url) - - def test_gitfetch_premirror(self): - url1 = "git://git.openembedded.org/bitbake" - url2 = "git://someserver.org/bitbake" - self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n") - self.gitfetcher(url1, url2) - - def test_gitfetch_premirror2(self): - url1 = url2 = "git://someserver.org/bitbake" - self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n") - self.gitfetcher(url1, url2) - - def test_gitfetch_premirror3(self): - realurl = "git://git.openembedded.org/bitbake" - dummyurl = "git://someserver.org/bitbake" - self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git") - os.chdir(self.tempdir) - bb.process.run("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True) - self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir)) - self.gitfetcher(dummyurl, dummyurl) - - def test_git_submodule(self): - fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d) - fetcher.download() - # Previous cwd has been deleted - os.chdir(os.path.dirname(self.unpackdir)) + @skipIfNoNetwork() + def test_fetch_mirror(self): + self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake") + fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) + fetcher.download() + self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) + + @skipIfNoNetwork() + def test_fetch_mirror_of_mirror(self): + self.d.setVar("MIRRORS", "http://.*/.* http://invalid2.yoctoproject.org/ \n http://invalid2.yoctoproject.org/.* http://downloads.yoctoproject.org/releases/bitbake") + fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) + fetcher.download() + self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) + + @skipIfNoNetwork() + def test_fetch_file_mirror_of_mirror(self): + self.d.setVar("MIRRORS", "http://.*/.* file:///some1where/ \n file:///some1where/.* file://some2where/ \n file://some2where/.* http://downloads.yoctoproject.org/releases/bitbake") + fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) + os.mkdir(self.dldir + "/some2where") + fetcher.download() + self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) + + @skipIfNoNetwork() + def test_fetch_premirror(self): + self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake") + fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d) + fetcher.download() + self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749) + + @skipIfNoNetwork() + def gitfetcher(self, url1, url2): + def checkrevision(self, fetcher): fetcher.unpack(self.unpackdir) + revision = bb.process.run("git rev-parse HEAD", shell=True, cwd=self.unpackdir + "/git")[0].strip() + self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5") + + self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1") + self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5") + fetcher = bb.fetch.Fetch([url1], self.d) + fetcher.download() + checkrevision(self, fetcher) + # Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works + bb.utils.prunedir(self.dldir + "/git2/") + bb.utils.prunedir(self.unpackdir) + self.d.setVar("BB_NO_NETWORK", "1") + fetcher = bb.fetch.Fetch([url2], self.d) + fetcher.download() + checkrevision(self, fetcher) + + @skipIfNoNetwork() + def test_gitfetch(self): + url1 = url2 = "git://git.openembedded.org/bitbake" + self.gitfetcher(url1, url2) + + @skipIfNoNetwork() + def test_gitfetch_goodsrcrev(self): + # SRCREV is set but matches rev= parameter + url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5" + self.gitfetcher(url1, url2) + + @skipIfNoNetwork() + def test_gitfetch_badsrcrev(self): + # SRCREV is set but does not match rev= parameter + url1 = url2 = "git://git.openembedded.org/bitbake;rev=dead05b0b4ba0959fe0624d2a4885d7b70426da5" + self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2) + + @skipIfNoNetwork() + def test_gitfetch_tagandrev(self): + # SRCREV is set but does not match rev= parameter + url1 = url2 = "git://git.openembedded.org/bitbake;rev=270a05b0b4ba0959fe0624d2a4885d7b70426da5;tag=270a05b0b4ba0959fe0624d2a4885d7b70426da5" + self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2) + + @skipIfNoNetwork() + def test_gitfetch_localusehead(self): + # Create dummy local Git repo + src_dir = tempfile.mkdtemp(dir=self.tempdir, + prefix='gitfetch_localusehead_') + src_dir = os.path.abspath(src_dir) + bb.process.run("git init", cwd=src_dir) + bb.process.run("git commit --allow-empty -m'Dummy commit'", + cwd=src_dir) + # Use other branch than master + bb.process.run("git checkout -b my-devel", cwd=src_dir) + bb.process.run("git commit --allow-empty -m'Dummy commit 2'", + cwd=src_dir) + stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir) + orig_rev = stdout[0].strip() + + # Fetch and check revision + self.d.setVar("SRCREV", "AUTOINC") + url = "git://" + src_dir + ";protocol=file;usehead=1" + fetcher = bb.fetch.Fetch([url], self.d) + fetcher.download() + fetcher.unpack(self.unpackdir) + stdout = bb.process.run("git rev-parse HEAD", + cwd=os.path.join(self.unpackdir, 'git')) + unpack_rev = stdout[0].strip() + self.assertEqual(orig_rev, unpack_rev) + + @skipIfNoNetwork() + def test_gitfetch_remoteusehead(self): + url = "git://git.openembedded.org/bitbake;usehead=1" + self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url) + + @skipIfNoNetwork() + def test_gitfetch_premirror(self): + url1 = "git://git.openembedded.org/bitbake" + url2 = "git://someserver.org/bitbake" + self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n") + self.gitfetcher(url1, url2) + + @skipIfNoNetwork() + def test_gitfetch_premirror2(self): + url1 = url2 = "git://someserver.org/bitbake" + self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n") + self.gitfetcher(url1, url2) + + @skipIfNoNetwork() + def test_gitfetch_premirror3(self): + realurl = "git://git.openembedded.org/bitbake" + dummyurl = "git://someserver.org/bitbake" + self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git") + os.chdir(self.tempdir) + bb.process.run("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True) + self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir)) + self.gitfetcher(dummyurl, dummyurl) + + @skipIfNoNetwork() + def test_git_submodule(self): + fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d) + fetcher.download() + # Previous cwd has been deleted + os.chdir(os.path.dirname(self.unpackdir)) + fetcher.unpack(self.unpackdir) class TrustedNetworksTest(FetcherTest): @@ -782,32 +799,32 @@ class FetchLatestVersionTest(FetcherTest): ("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://www.oracle.com/technetwork/products/berkeleydb/downloads/index-082944.html", "http://download.oracle.com/otn/berkeley-db/(?P<name>db-)(?P<pver>((\d+[\.\-_]*)+))\.tar\.gz") : "6.1.19", } - if os.environ.get("BB_SKIP_NETTESTS") == "yes": - print("Unset BB_SKIP_NETTESTS to run network tests") - else: - def test_git_latest_versionstring(self): - for k, v in self.test_git_uris.items(): - self.d.setVar("PN", k[0]) - self.d.setVar("SRCREV", k[2]) - self.d.setVar("UPSTREAM_CHECK_GITTAGREGEX", k[3]) - ud = bb.fetch2.FetchData(k[1], self.d) - pupver= ud.method.latest_versionstring(ud, self.d) - verstring = pupver[0] - self.assertTrue(verstring, msg="Could not find upstream version") - r = bb.utils.vercmp_string(v, verstring) - self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring)) - - def test_wget_latest_versionstring(self): - for k, v in self.test_wget_uris.items(): - self.d.setVar("PN", k[0]) - self.d.setVar("UPSTREAM_CHECK_URI", k[2]) - self.d.setVar("UPSTREAM_CHECK_REGEX", k[3]) - ud = bb.fetch2.FetchData(k[1], self.d) - pupver = ud.method.latest_versionstring(ud, self.d) - verstring = pupver[0] - self.assertTrue(verstring, msg="Could not find upstream version") - r = bb.utils.vercmp_string(v, verstring) - self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring)) + + @skipIfNoNetwork() + def test_git_latest_versionstring(self): + for k, v in self.test_git_uris.items(): + self.d.setVar("PN", k[0]) + self.d.setVar("SRCREV", k[2]) + self.d.setVar("UPSTREAM_CHECK_GITTAGREGEX", k[3]) + ud = bb.fetch2.FetchData(k[1], self.d) + pupver= ud.method.latest_versionstring(ud, self.d) + verstring = pupver[0] + self.assertTrue(verstring, msg="Could not find upstream version") + r = bb.utils.vercmp_string(v, verstring) + self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring)) + + @skipIfNoNetwork() + def test_wget_latest_versionstring(self): + for k, v in self.test_wget_uris.items(): + self.d.setVar("PN", k[0]) + self.d.setVar("UPSTREAM_CHECK_URI", k[2]) + self.d.setVar("UPSTREAM_CHECK_REGEX", k[3]) + ud = bb.fetch2.FetchData(k[1], self.d) + pupver = ud.method.latest_versionstring(ud, self.d) + verstring = pupver[0] + self.assertTrue(verstring, msg="Could not find upstream version") + r = bb.utils.vercmp_string(v, verstring) + self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring)) class FetchCheckStatusTest(FetcherTest): @@ -820,37 +837,636 @@ class FetchCheckStatusTest(FetcherTest): "https://yoctoproject.org/documentation", "http://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz", "http://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz", - "ftp://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz", - "ftp://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz", - "ftp://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz", + "ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz", + "http://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz", + "https://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz", + "https://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz", # GitHub releases are hosted on Amazon S3, which doesn't support HEAD "https://github.com/kergoth/tslib/releases/download/1.1/tslib-1.1.tar.xz" ] - if os.environ.get("BB_SKIP_NETTESTS") == "yes": - print("Unset BB_SKIP_NETTESTS to run network tests") - else: - - def test_wget_checkstatus(self): - fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d) - for u in self.test_wget_uris: + @skipIfNoNetwork() + def test_wget_checkstatus(self): + fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d) + for u in self.test_wget_uris: + with self.subTest(url=u): ud = fetch.ud[u] m = ud.method ret = m.checkstatus(fetch, ud, self.d) self.assertTrue(ret, msg="URI %s, can't check status" % (u)) + @skipIfNoNetwork() + def test_wget_checkstatus_connection_cache(self): + from bb.fetch2 import FetchConnectionCache - def test_wget_checkstatus_connection_cache(self): - from bb.fetch2 import FetchConnectionCache - - connection_cache = FetchConnectionCache() - fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d, - connection_cache = connection_cache) + connection_cache = FetchConnectionCache() + fetch = bb.fetch2.Fetch(self.test_wget_uris, self.d, + connection_cache = connection_cache) - for u in self.test_wget_uris: + for u in self.test_wget_uris: + with self.subTest(url=u): ud = fetch.ud[u] m = ud.method ret = m.checkstatus(fetch, ud, self.d) self.assertTrue(ret, msg="URI %s, can't check status" % (u)) - connection_cache.close_connections() + connection_cache.close_connections() + + +class GitMakeShallowTest(FetcherTest): + bitbake_dir = os.path.join(os.path.dirname(os.path.join(__file__)), '..', '..', '..') + make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow') + + def setUp(self): + FetcherTest.setUp(self) + self.gitdir = os.path.join(self.tempdir, 'gitshallow') + bb.utils.mkdirhier(self.gitdir) + bb.process.run('git init', cwd=self.gitdir) + + def assertRefs(self, expected_refs): + actual_refs = self.git(['for-each-ref', '--format=%(refname)']).splitlines() + full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs).splitlines() + self.assertEqual(sorted(full_expected), sorted(actual_refs)) + + def assertRevCount(self, expected_count, args=None): + if args is None: + args = ['HEAD'] + revs = self.git(['rev-list'] + args) + actual_count = len(revs.splitlines()) + self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count)) + + def git(self, cmd): + if isinstance(cmd, str): + cmd = 'git ' + cmd + else: + cmd = ['git'] + cmd + return bb.process.run(cmd, cwd=self.gitdir)[0] + + def make_shallow(self, args=None): + if args is None: + args = ['HEAD'] + return bb.process.run([self.make_shallow_path] + args, cwd=self.gitdir) + + def add_empty_file(self, path, msg=None): + if msg is None: + msg = path + open(os.path.join(self.gitdir, path), 'w').close() + self.git(['add', path]) + self.git(['commit', '-m', msg, path]) + + def test_make_shallow_single_branch_no_merge(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2) + self.make_shallow() + self.assertRevCount(1) + + def test_make_shallow_single_branch_one_merge(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('checkout -b a_branch') + self.add_empty_file('c') + self.git('checkout master') + self.add_empty_file('d') + self.git('merge --no-ff --no-edit a_branch') + self.git('branch -d a_branch') + self.add_empty_file('e') + self.assertRevCount(6) + self.make_shallow(['HEAD~2']) + self.assertRevCount(5) + + def test_make_shallow_at_merge(self): + self.add_empty_file('a') + self.git('checkout -b a_branch') + self.add_empty_file('b') + self.git('checkout master') + self.git('merge --no-ff --no-edit a_branch') + self.git('branch -d a_branch') + self.assertRevCount(3) + self.make_shallow() + self.assertRevCount(1) + + def test_make_shallow_annotated_tag(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('tag -a -m a_tag a_tag') + self.assertRevCount(2) + self.make_shallow(['a_tag']) + self.assertRevCount(1) + + def test_make_shallow_multi_ref(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('checkout -b a_branch') + self.add_empty_file('c') + self.git('checkout master') + self.add_empty_file('d') + self.git('checkout -b a_branch_2') + self.add_empty_file('a_tag') + self.git('tag a_tag') + self.git('checkout master') + self.git('branch -D a_branch_2') + self.add_empty_file('e') + self.assertRevCount(6, ['--all']) + self.make_shallow() + self.assertRevCount(5, ['--all']) + + def test_make_shallow_multi_ref_trim(self): + self.add_empty_file('a') + self.git('checkout -b a_branch') + self.add_empty_file('c') + self.git('checkout master') + self.assertRevCount(1) + self.assertRevCount(2, ['--all']) + self.assertRefs(['master', 'a_branch']) + self.make_shallow(['-r', 'master', 'HEAD']) + self.assertRevCount(1, ['--all']) + self.assertRefs(['master']) + + def test_make_shallow_noop(self): + self.add_empty_file('a') + self.assertRevCount(1) + self.make_shallow() + self.assertRevCount(1) + + @skipIfNoNetwork() + def test_make_shallow_bitbake(self): + self.git('remote add origin https://github.com/openembedded/bitbake') + self.git('fetch --tags origin') + orig_revs = len(self.git('rev-list --all').splitlines()) + self.make_shallow(['refs/tags/1.10.0']) + self.assertRevCount(orig_revs - 1746, ['--all']) + +class GitShallowTest(FetcherTest): + def setUp(self): + FetcherTest.setUp(self) + self.gitdir = os.path.join(self.tempdir, 'git') + self.srcdir = os.path.join(self.tempdir, 'gitsource') + + bb.utils.mkdirhier(self.srcdir) + self.git('init', cwd=self.srcdir) + self.d.setVar('WORKDIR', self.tempdir) + self.d.setVar('S', self.gitdir) + self.d.delVar('PREMIRRORS') + self.d.delVar('MIRRORS') + + uri = 'git://%s;protocol=file;subdir=${S}' % self.srcdir + self.d.setVar('SRC_URI', uri) + self.d.setVar('SRCREV', '${AUTOREV}') + self.d.setVar('AUTOREV', '${@bb.fetch2.get_autorev(d)}') + + self.d.setVar('BB_GIT_SHALLOW', '1') + self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '0') + self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1') + + def assertRefs(self, expected_refs, cwd=None): + if cwd is None: + cwd = self.gitdir + actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines() + full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines() + self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs))) + + def assertRevCount(self, expected_count, args=None, cwd=None): + if args is None: + args = ['HEAD'] + if cwd is None: + cwd = self.gitdir + revs = self.git(['rev-list'] + args, cwd=cwd) + actual_count = len(revs.splitlines()) + self.assertEqual(expected_count, actual_count, msg='Object count `%d` is not the expected `%d`' % (actual_count, expected_count)) + + def git(self, cmd, cwd=None): + if isinstance(cmd, str): + cmd = 'git ' + cmd + else: + cmd = ['git'] + cmd + if cwd is None: + cwd = self.gitdir + return bb.process.run(cmd, cwd=cwd)[0] + + def add_empty_file(self, path, cwd=None, msg=None): + if msg is None: + msg = path + if cwd is None: + cwd = self.srcdir + open(os.path.join(cwd, path), 'w').close() + self.git(['add', path], cwd) + self.git(['commit', '-m', msg, path], cwd) + + def fetch(self, uri=None): + if uri is None: + uris = self.d.getVar('SRC_URI', True).split() + uri = uris[0] + d = self.d + else: + d = self.d.createCopy() + d.setVar('SRC_URI', uri) + uri = d.expand(uri) + uris = [uri] + + fetcher = bb.fetch2.Fetch(uris, d) + fetcher.download() + ud = fetcher.ud[uri] + return fetcher, ud + + def fetch_and_unpack(self, uri=None): + fetcher, ud = self.fetch(uri) + fetcher.unpack(self.d.getVar('WORKDIR')) + assert os.path.exists(self.d.getVar('S')) + return fetcher, ud + + def fetch_shallow(self, uri=None, disabled=False, keepclone=False): + """Fetch a uri, generating a shallow tarball, then unpack using it""" + fetcher, ud = self.fetch_and_unpack(uri) + assert os.path.exists(ud.clonedir), 'Git clone in DLDIR (%s) does not exist for uri %s' % (ud.clonedir, uri) + + # Confirm that the unpacked repo is unshallow + if not disabled: + assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0])) + + # fetch and unpack, from the shallow tarball + bb.utils.remove(self.gitdir, recurse=True) + bb.utils.remove(ud.clonedir, recurse=True) + + # confirm that the unpacked repo is used when no git clone or git + # mirror tarball is available + fetcher, ud = self.fetch_and_unpack(uri) + if not disabled: + assert os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is not shallow' % self.gitdir + else: + assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')), 'Unpacked git repository at %s is shallow' % self.gitdir + return fetcher, ud + + def test_shallow_disabled(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW', '0') + self.fetch_shallow(disabled=True) + self.assertRevCount(2) + + def test_shallow_nobranch(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + srcrev = self.git('rev-parse HEAD', cwd=self.srcdir).strip() + self.d.setVar('SRCREV', srcrev) + uri = self.d.getVar('SRC_URI', True).split()[0] + uri = '%s;nobranch=1;bare=1' % uri + + self.fetch_shallow(uri) + self.assertRevCount(1) + + # shallow refs are used to ensure the srcrev sticks around when we + # have no other branches referencing it + self.assertRefs(['refs/shallow/default']) + + def test_shallow_default_depth_1(self): + # Create initial git repo + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + self.fetch_shallow() + self.assertRevCount(1) + + def test_shallow_depth_0_disables(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + self.fetch_shallow(disabled=True) + self.assertRevCount(2) + + def test_shallow_depth_default_override(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '2') + self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '1') + self.fetch_shallow() + self.assertRevCount(1) + + def test_shallow_depth_default_override_disable(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.add_empty_file('c') + self.assertRevCount(3, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '2') + self.fetch_shallow() + self.assertRevCount(2) + + def test_current_shallow_out_of_date_clone(self): + # Create initial git repo + self.add_empty_file('a') + self.add_empty_file('b') + self.add_empty_file('c') + self.assertRevCount(3, cwd=self.srcdir) + + # Clone and generate mirror tarball + fetcher, ud = self.fetch() + + # Ensure we have a current mirror tarball, but an out of date clone + self.git('update-ref refs/heads/master refs/heads/master~1', cwd=ud.clonedir) + self.assertRevCount(2, cwd=ud.clonedir) + + # Fetch and unpack, from the current tarball, not the out of date clone + bb.utils.remove(self.gitdir, recurse=True) + fetcher, ud = self.fetch() + fetcher.unpack(self.d.getVar('WORKDIR')) + self.assertRevCount(1) + + def test_shallow_single_branch_no_merge(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + self.fetch_shallow() + self.assertRevCount(1) + assert os.path.exists(os.path.join(self.gitdir, 'a')) + assert os.path.exists(os.path.join(self.gitdir, 'b')) + + def test_shallow_no_dangling(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.assertRevCount(2, cwd=self.srcdir) + + self.fetch_shallow() + self.assertRevCount(1) + assert not self.git('fsck --dangling') + + def test_shallow_srcrev_branch_truncation(self): + self.add_empty_file('a') + self.add_empty_file('b') + b_commit = self.git('rev-parse HEAD', cwd=self.srcdir).rstrip() + self.add_empty_file('c') + self.assertRevCount(3, cwd=self.srcdir) + + self.d.setVar('SRCREV', b_commit) + self.fetch_shallow() + + # The 'c' commit was removed entirely, and 'a' was removed from history + self.assertRevCount(1, ['--all']) + self.assertEqual(self.git('rev-parse HEAD').strip(), b_commit) + assert os.path.exists(os.path.join(self.gitdir, 'a')) + assert os.path.exists(os.path.join(self.gitdir, 'b')) + assert not os.path.exists(os.path.join(self.gitdir, 'c')) + + def test_shallow_ref_pruning(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('branch a_branch', cwd=self.srcdir) + self.assertRefs(['master', 'a_branch'], cwd=self.srcdir) + self.assertRevCount(2, cwd=self.srcdir) + + self.fetch_shallow() + + self.assertRefs(['master', 'origin/master']) + self.assertRevCount(1) + + def test_shallow_submodules(self): + self.add_empty_file('a') + self.add_empty_file('b') + + smdir = os.path.join(self.tempdir, 'gitsubmodule') + bb.utils.mkdirhier(smdir) + self.git('init', cwd=smdir) + self.add_empty_file('asub', cwd=smdir) + + self.git('submodule init', cwd=self.srcdir) + self.git('submodule add file://%s' % smdir, cwd=self.srcdir) + self.git('submodule update', cwd=self.srcdir) + self.git('commit -m submodule -a', cwd=self.srcdir) + + uri = 'gitsm://%s;protocol=file;subdir=${S}' % self.srcdir + fetcher, ud = self.fetch_shallow(uri) + + self.assertRevCount(1) + assert './.git/modules/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0] + assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule')) + + if any(os.path.exists(os.path.join(p, 'git-annex')) for p in os.environ.get('PATH').split(':')): + def test_shallow_annex(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('annex init', cwd=self.srcdir) + open(os.path.join(self.srcdir, 'c'), 'w').close() + self.git('annex add c', cwd=self.srcdir) + self.git('commit -m annex-c -a', cwd=self.srcdir) + bb.process.run('chmod u+w -R %s' % os.path.join(self.srcdir, '.git', 'annex')) + + uri = 'gitannex://%s;protocol=file;subdir=${S}' % self.srcdir + fetcher, ud = self.fetch_shallow(uri) + + self.assertRevCount(1) + assert './.git/annex/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0] + assert os.path.exists(os.path.join(self.gitdir, 'c')) + + def test_shallow_multi_one_uri(self): + # Create initial git repo + self.add_empty_file('a') + self.add_empty_file('b') + self.git('checkout -b a_branch', cwd=self.srcdir) + self.add_empty_file('c') + self.add_empty_file('d') + self.git('checkout master', cwd=self.srcdir) + self.git('tag v0.0 a_branch', cwd=self.srcdir) + self.add_empty_file('e') + self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir) + self.add_empty_file('f') + self.assertRevCount(7, cwd=self.srcdir) + + uri = self.d.getVar('SRC_URI', True).split()[0] + uri = '%s;branch=master,a_branch;name=master,a_branch' % uri + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') + self.d.setVar('SRCREV_master', '${AUTOREV}') + self.d.setVar('SRCREV_a_branch', '${AUTOREV}') + + self.fetch_shallow(uri) + + self.assertRevCount(5) + self.assertRefs(['master', 'origin/master', 'origin/a_branch']) + + def test_shallow_multi_one_uri_depths(self): + # Create initial git repo + self.add_empty_file('a') + self.add_empty_file('b') + self.git('checkout -b a_branch', cwd=self.srcdir) + self.add_empty_file('c') + self.add_empty_file('d') + self.git('checkout master', cwd=self.srcdir) + self.add_empty_file('e') + self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir) + self.add_empty_file('f') + self.assertRevCount(7, cwd=self.srcdir) + + uri = self.d.getVar('SRC_URI', True).split()[0] + uri = '%s;branch=master,a_branch;name=master,a_branch' % uri + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + self.d.setVar('BB_GIT_SHALLOW_DEPTH_master', '3') + self.d.setVar('BB_GIT_SHALLOW_DEPTH_a_branch', '1') + self.d.setVar('SRCREV_master', '${AUTOREV}') + self.d.setVar('SRCREV_a_branch', '${AUTOREV}') + + self.fetch_shallow(uri) + + self.assertRevCount(4, ['--all']) + self.assertRefs(['master', 'origin/master', 'origin/a_branch']) + + def test_shallow_clone_preferred_over_shallow(self): + self.add_empty_file('a') + self.add_empty_file('b') + + # Fetch once to generate the shallow tarball + fetcher, ud = self.fetch() + assert os.path.exists(os.path.join(self.dldir, ud.mirrortarballs[0])) + + # Fetch and unpack with both the clonedir and shallow tarball available + bb.utils.remove(self.gitdir, recurse=True) + fetcher, ud = self.fetch_and_unpack() + + # The unpacked tree should *not* be shallow + self.assertRevCount(2) + assert not os.path.exists(os.path.join(self.gitdir, '.git', 'shallow')) + + def test_shallow_mirrors(self): + self.add_empty_file('a') + self.add_empty_file('b') + + # Fetch once to generate the shallow tarball + fetcher, ud = self.fetch() + mirrortarball = ud.mirrortarballs[0] + assert os.path.exists(os.path.join(self.dldir, mirrortarball)) + + # Set up the mirror + mirrordir = os.path.join(self.tempdir, 'mirror') + bb.utils.mkdirhier(mirrordir) + self.d.setVar('PREMIRRORS', 'git://.*/.* file://%s/\n' % mirrordir) + + os.rename(os.path.join(self.dldir, mirrortarball), + os.path.join(mirrordir, mirrortarball)) + + # Fetch from the mirror + bb.utils.remove(self.dldir, recurse=True) + bb.utils.remove(self.gitdir, recurse=True) + self.fetch_and_unpack() + self.assertRevCount(1) + + def test_shallow_invalid_depth(self): + self.add_empty_file('a') + self.add_empty_file('b') + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '-12') + with self.assertRaises(bb.fetch2.FetchError): + self.fetch() + + def test_shallow_invalid_depth_default(self): + self.add_empty_file('a') + self.add_empty_file('b') + + self.d.setVar('BB_GIT_SHALLOW_DEPTH_default', '-12') + with self.assertRaises(bb.fetch2.FetchError): + self.fetch() + + def test_shallow_extra_refs(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('branch a_branch', cwd=self.srcdir) + self.assertRefs(['master', 'a_branch'], cwd=self.srcdir) + self.assertRevCount(2, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/a_branch') + self.fetch_shallow() + + self.assertRefs(['master', 'origin/master', 'origin/a_branch']) + self.assertRevCount(1) + + def test_shallow_extra_refs_wildcard(self): + self.add_empty_file('a') + self.add_empty_file('b') + self.git('branch a_branch', cwd=self.srcdir) + self.git('tag v1.0', cwd=self.srcdir) + self.assertRefs(['master', 'a_branch', 'v1.0'], cwd=self.srcdir) + self.assertRevCount(2, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*') + self.fetch_shallow() + + self.assertRefs(['master', 'origin/master', 'v1.0']) + self.assertRevCount(1) + + def test_shallow_missing_extra_refs(self): + self.add_empty_file('a') + self.add_empty_file('b') + + self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/heads/foo') + with self.assertRaises(bb.fetch2.FetchError): + self.fetch() + + def test_shallow_missing_extra_refs_wildcard(self): + self.add_empty_file('a') + self.add_empty_file('b') + + self.d.setVar('BB_GIT_SHALLOW_EXTRA_REFS', 'refs/tags/*') + self.fetch() + + def test_shallow_remove_revs(self): + # Create initial git repo + self.add_empty_file('a') + self.add_empty_file('b') + self.git('checkout -b a_branch', cwd=self.srcdir) + self.add_empty_file('c') + self.add_empty_file('d') + self.git('checkout master', cwd=self.srcdir) + self.git('tag v0.0 a_branch', cwd=self.srcdir) + self.add_empty_file('e') + self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir) + self.git('branch -d a_branch', cwd=self.srcdir) + self.add_empty_file('f') + self.assertRevCount(7, cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') + + self.fetch_shallow() + + self.assertRevCount(5) + + def test_shallow_invalid_revs(self): + self.add_empty_file('a') + self.add_empty_file('b') + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0') + + with self.assertRaises(bb.fetch2.FetchError): + self.fetch() + + @skipIfNoNetwork() + def test_bitbake(self): + self.git('remote add --mirror=fetch origin git://github.com/openembedded/bitbake', cwd=self.srcdir) + self.git('config core.bare true', cwd=self.srcdir) + self.git('fetch', cwd=self.srcdir) + + self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0') + # Note that the 1.10.0 tag is annotated, so this also tests + # reference of an annotated vs unannotated tag + self.d.setVar('BB_GIT_SHALLOW_REVS', '1.10.0') + + self.fetch_shallow() + + # Confirm that the history of 1.10.0 was removed + orig_revs = len(self.git('rev-list master', cwd=self.srcdir).splitlines()) + revs = len(self.git('rev-list master').splitlines()) + self.assertNotEqual(orig_revs, revs) + self.assertRefs(['master', 'origin/master']) + self.assertRevCount(orig_revs - 1758) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/tests/parse.py b/import-layers/yocto-poky/bitbake/lib/bb/tests/parse.py index ab6ca9031..8f16ba4f4 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/tests/parse.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/tests/parse.py @@ -83,7 +83,28 @@ unset B[flag] self.assertEqual(d.getVar("A"), None) self.assertEqual(d.getVarFlag("A","flag"), None) self.assertEqual(d.getVar("B"), "2") - + + exporttest = """ +A = "a" +export B = "b" +export C +exportD = "d" +""" + + def test_parse_exports(self): + f = self.parsehelper(self.exporttest) + d = bb.parse.handle(f.name, self.d)[''] + self.assertEqual(d.getVar("A"), "a") + self.assertIsNone(d.getVarFlag("A", "export")) + self.assertEqual(d.getVar("B"), "b") + self.assertEqual(d.getVarFlag("B", "export"), 1) + self.assertIsNone(d.getVar("C")) + self.assertEqual(d.getVarFlag("C", "export"), 1) + self.assertIsNone(d.getVar("D")) + self.assertIsNone(d.getVarFlag("D", "export")) + self.assertEqual(d.getVar("exportD"), "d") + self.assertIsNone(d.getVarFlag("exportD", "export")) + overridetest = """ RRECOMMENDS_${PN} = "a" diff --git a/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py b/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py index 928333a50..fa95f6329 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py @@ -2,6 +2,7 @@ # # Copyright (C) 2012-2017 Intel Corporation # Copyright (C) 2011 Mentor Graphics Corporation +# Copyright (C) 2006-2012 Richard Purdie # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as @@ -54,6 +55,7 @@ class TinfoilCommandFailed(Exception): """Exception raised when run_command fails""" class TinfoilDataStoreConnector: + """Connector object used to enable access to datastore objects via tinfoil""" def __init__(self, tinfoil, dsindex): self.tinfoil = tinfoil @@ -172,6 +174,14 @@ class TinfoilCookerAdapter: attrvalue = self.tinfoil.run_command('getBbFilePriority') or {} elif name == 'pkg_dp': attrvalue = self.tinfoil.run_command('getDefaultPreference') or {} + elif name == 'fn_provides': + attrvalue = self.tinfoil.run_command('getRecipeProvides') or {} + elif name == 'packages': + attrvalue = self.tinfoil.run_command('getRecipePackages') or {} + elif name == 'packages_dynamic': + attrvalue = self.tinfoil.run_command('getRecipePackagesDynamic') or {} + elif name == 'rproviders': + attrvalue = self.tinfoil.run_command('getRProviders') or {} else: raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) @@ -208,19 +218,119 @@ class TinfoilCookerAdapter: return self.tinfoil.find_best_provider(pn) +class TinfoilRecipeInfo: + """ + Provides a convenient representation of the cached information for a single recipe. + Some attributes are set on construction, others are read on-demand (which internally + may result in a remote procedure call to the bitbake server the first time). + Note that only information which is cached is available through this object - if + you need other variable values you will need to parse the recipe using + Tinfoil.parse_recipe(). + """ + def __init__(self, recipecache, d, pn, fn, fns): + self._recipecache = recipecache + self._d = d + self.pn = pn + self.fn = fn + self.fns = fns + self.inherit_files = recipecache.inherits[fn] + self.depends = recipecache.deps[fn] + (self.pe, self.pv, self.pr) = recipecache.pkg_pepvpr[fn] + self._cached_packages = None + self._cached_rprovides = None + self._cached_packages_dynamic = None + + def __getattr__(self, name): + if name == 'alternates': + return [x for x in self.fns if x != self.fn] + elif name == 'rdepends': + return self._recipecache.rundeps[self.fn] + elif name == 'rrecommends': + return self._recipecache.runrecs[self.fn] + elif name == 'provides': + return self._recipecache.fn_provides[self.fn] + elif name == 'packages': + if self._cached_packages is None: + self._cached_packages = [] + for pkg, fns in self._recipecache.packages.items(): + if self.fn in fns: + self._cached_packages.append(pkg) + return self._cached_packages + elif name == 'packages_dynamic': + if self._cached_packages_dynamic is None: + self._cached_packages_dynamic = [] + for pkg, fns in self._recipecache.packages_dynamic.items(): + if self.fn in fns: + self._cached_packages_dynamic.append(pkg) + return self._cached_packages_dynamic + elif name == 'rprovides': + if self._cached_rprovides is None: + self._cached_rprovides = [] + for pkg, fns in self._recipecache.rproviders.items(): + if self.fn in fns: + self._cached_rprovides.append(pkg) + return self._cached_rprovides + else: + raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name)) + def inherits(self, only_recipe=False): + """ + Get the inherited classes for a recipe. Returns the class names only. + Parameters: + only_recipe: True to return only the classes inherited by the recipe + itself, False to return all classes inherited within + the context for the recipe (which includes globally + inherited classes). + """ + if only_recipe: + global_inherit = [x for x in (self._d.getVar('BBINCLUDED') or '').split() if x.endswith('.bbclass')] + else: + global_inherit = [] + for clsfile in self.inherit_files: + if only_recipe and clsfile in global_inherit: + continue + clsname = os.path.splitext(os.path.basename(clsfile))[0] + yield clsname + def __str__(self): + return '%s' % self.pn + + class Tinfoil: + """ + Tinfoil - an API for scripts and utilities to query + BitBake internals and perform build operations. + """ def __init__(self, output=sys.stdout, tracking=False, setup_logging=True): + """ + Create a new tinfoil object. + Parameters: + output: specifies where console output should be sent. Defaults + to sys.stdout. + tracking: True to enable variable history tracking, False to + disable it (default). Enabling this has a minor + performance impact so typically it isn't enabled + unless you need to query variable history. + setup_logging: True to setup a logger so that things like + bb.warn() will work immediately and timeout warnings + are visible; False to let BitBake do this itself. + """ self.logger = logging.getLogger('BitBake') self.config_data = None self.cooker = None self.tracking = tracking self.ui_module = None self.server_connection = None + self.recipes_parsed = False + self.quiet = 0 + self.oldhandlers = self.logger.handlers[:] if setup_logging: # This is the *client-side* logger, nothing to do with # logging messages from the server bb.msg.logger_create('BitBake', output) + self.localhandlers = [] + for handler in self.logger.handlers: + if handler not in self.oldhandlers: + self.localhandlers.append(handler) def __enter__(self): return self @@ -228,19 +338,61 @@ class Tinfoil: def __exit__(self, type, value, traceback): self.shutdown() - def prepare(self, config_only=False, config_params=None, quiet=0): + def prepare(self, config_only=False, config_params=None, quiet=0, extra_features=None): + """ + Prepares the underlying BitBake system to be used via tinfoil. + This function must be called prior to calling any of the other + functions in the API. + NOTE: if you call prepare() you must absolutely call shutdown() + before your code terminates. You can use a "with" block to ensure + this happens e.g. + + with bb.tinfoil.Tinfoil() as tinfoil: + tinfoil.prepare() + ... + + Parameters: + config_only: True to read only the configuration and not load + the cache / parse recipes. This is useful if you just + want to query the value of a variable at the global + level or you want to do anything else that doesn't + involve knowing anything about the recipes in the + current configuration. False loads the cache / parses + recipes. + config_params: optionally specify your own configuration + parameters. If not specified an instance of + TinfoilConfigParameters will be created internally. + quiet: quiet level controlling console output - equivalent + to bitbake's -q/--quiet option. Default of 0 gives + the same output level as normal bitbake execution. + extra_features: extra features to be added to the feature + set requested from the server. See + CookerFeatures._feature_list for possible + features. + """ + self.quiet = quiet + if self.tracking: extrafeatures = [bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING] else: extrafeatures = [] + if extra_features: + extrafeatures += extra_features + if not config_params: config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet) cookerconfig = CookerConfiguration() cookerconfig.setConfigParameters(config_params) - server, self.server_connection, ui_module = setup_bitbake(config_params, + if not config_only: + # Disable local loggers because the UI module is going to set up its own + for handler in self.localhandlers: + self.logger.handlers.remove(handler) + self.localhandlers = [] + + self.server_connection, ui_module = setup_bitbake(config_params, cookerconfig, extrafeatures) @@ -266,6 +418,7 @@ class Tinfoil: self.run_command('parseConfiguration') else: self.run_actions(config_params) + self.recipes_parsed = True self.config_data = bb.data.init() connector = TinfoilDataStoreConnector(self, None) @@ -285,7 +438,13 @@ class Tinfoil: def parseRecipes(self): """ - Force a parse of all recipes. Normally you should specify + Legacy function - use parse_recipes() instead. + """ + self.parse_recipes() + + def parse_recipes(self): + """ + Load information on all recipes. Normally you should specify config_only=False when calling prepare() instead of using this function; this function is designed for situations where you need to initialise Tinfoil and use it with config_only=True first and @@ -293,6 +452,7 @@ class Tinfoil: """ config_params = TinfoilConfigParameters(config_only=False) self.run_actions(config_params) + self.recipes_parsed = True def run_command(self, command, *params): """ @@ -339,9 +499,16 @@ class Tinfoil: return self.server_connection.events.waitEvent(timeout) def get_overlayed_recipes(self): + """ + Find recipes which are overlayed (i.e. where recipes exist in multiple layers) + """ return defaultdict(list, self.run_command('getOverlayedRecipes')) def get_skipped_recipes(self): + """ + Find recipes which were skipped (i.e. SkipRecipe was raised + during parsing). + """ return OrderedDict(self.run_command('getSkippedRecipes')) def get_all_providers(self): @@ -374,8 +541,77 @@ class Tinfoil: return best[3] def get_file_appends(self, fn): + """ + Find the bbappends for a recipe file + """ return self.run_command('getFileAppends', fn) + def all_recipes(self, mc='', sort=True): + """ + Enable iterating over all recipes in the current configuration. + Returns an iterator over TinfoilRecipeInfo objects created on demand. + Parameters: + mc: The multiconfig, default of '' uses the main configuration. + sort: True to sort recipes alphabetically (default), False otherwise + """ + recipecache = self.cooker.recipecaches[mc] + if sort: + recipes = sorted(recipecache.pkg_pn.items()) + else: + recipes = recipecache.pkg_pn.items() + for pn, fns in recipes: + prov = self.find_best_provider(pn) + recipe = TinfoilRecipeInfo(recipecache, + self.config_data, + pn=pn, + fn=prov[3], + fns=fns) + yield recipe + + def all_recipe_files(self, mc='', variants=True, preferred_only=False): + """ + Enable iterating over all recipe files in the current configuration. + Returns an iterator over file paths. + Parameters: + mc: The multiconfig, default of '' uses the main configuration. + variants: True to include variants of recipes created through + BBCLASSEXTEND (default) or False to exclude them + preferred_only: True to include only the preferred recipe where + multiple exist providing the same PN, False to list + all recipes + """ + recipecache = self.cooker.recipecaches[mc] + if preferred_only: + files = [] + for pn in recipecache.pkg_pn.keys(): + prov = self.find_best_provider(pn) + files.append(prov[3]) + else: + files = recipecache.pkg_fn.keys() + for fn in sorted(files): + if not variants and fn.startswith('virtual:'): + continue + yield fn + + + def get_recipe_info(self, pn, mc=''): + """ + Get information on a specific recipe in the current configuration by name (PN). + Returns a TinfoilRecipeInfo object created on demand. + Parameters: + mc: The multiconfig, default of '' uses the main configuration. + """ + recipecache = self.cooker.recipecaches[mc] + prov = self.find_best_provider(pn) + fn = prov[3] + actual_pn = recipecache.pkg_fn[fn] + recipe = TinfoilRecipeInfo(recipecache, + self.config_data, + pn=actual_pn, + fn=fn, + fns=recipecache.pkg_pn[actual_pn]) + return recipe + def parse_recipe(self, pn): """ Parse the specified recipe and return a datastore object @@ -399,26 +635,199 @@ class Tinfoil: specify config_data then you cannot use a virtual specification for fn. """ - if appends and appendlist == []: - appends = False - if config_data: - dctr = bb.remotedata.RemoteDatastores.transmit_datastore(config_data) - dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, dctr) - else: - dscon = self.run_command('parseRecipeFile', fn, appends, appendlist) - if dscon: - return self._reconvert_type(dscon, 'DataStoreConnectionHandle') - else: - return None + if self.tracking: + # Enable history tracking just for the parse operation + self.run_command('enableDataTracking') + try: + if appends and appendlist == []: + appends = False + if config_data: + dctr = bb.remotedata.RemoteDatastores.transmit_datastore(config_data) + dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, dctr) + else: + dscon = self.run_command('parseRecipeFile', fn, appends, appendlist) + if dscon: + return self._reconvert_type(dscon, 'DataStoreConnectionHandle') + else: + return None + finally: + if self.tracking: + self.run_command('disableDataTracking') - def build_file(self, buildfile, task): + def build_file(self, buildfile, task, internal=True): """ Runs the specified task for just a single recipe (i.e. no dependencies). - This is equivalent to bitbake -b, except no warning will be printed. + This is equivalent to bitbake -b, except with the default internal=True + no warning about dependencies will be produced, normal info messages + from the runqueue will be silenced and BuildInit, BuildStarted and + BuildCompleted events will not be fired. """ - return self.run_command('buildFile', buildfile, task, True) + return self.run_command('buildFile', buildfile, task, internal) + + def build_targets(self, targets, task=None, handle_events=True, extra_events=None, event_callback=None): + """ + Builds the specified targets. This is equivalent to a normal invocation + of bitbake. Has built-in event handling which is enabled by default and + can be extended if needed. + Parameters: + targets: + One or more targets to build. Can be a list or a + space-separated string. + task: + The task to run; if None then the value of BB_DEFAULT_TASK + will be used. Default None. + handle_events: + True to handle events in a similar way to normal bitbake + invocation with knotty; False to return immediately (on the + assumption that the caller will handle the events instead). + Default True. + extra_events: + An optional list of events to add to the event mask (if + handle_events=True). If you add events here you also need + to specify a callback function in event_callback that will + handle the additional events. Default None. + event_callback: + An optional function taking a single parameter which + will be called first upon receiving any event (if + handle_events=True) so that the caller can override or + extend the event handling. Default None. + """ + if isinstance(targets, str): + targets = targets.split() + if not task: + task = self.config_data.getVar('BB_DEFAULT_TASK') + + if handle_events: + # A reasonable set of default events matching up with those we handle below + eventmask = [ + 'bb.event.BuildStarted', + 'bb.event.BuildCompleted', + 'logging.LogRecord', + 'bb.event.NoProvider', + 'bb.command.CommandCompleted', + 'bb.command.CommandFailed', + 'bb.build.TaskStarted', + 'bb.build.TaskFailed', + 'bb.build.TaskSucceeded', + 'bb.build.TaskFailedSilent', + 'bb.build.TaskProgress', + 'bb.runqueue.runQueueTaskStarted', + 'bb.runqueue.sceneQueueTaskStarted', + 'bb.event.ProcessStarted', + 'bb.event.ProcessProgress', + 'bb.event.ProcessFinished', + ] + if extra_events: + eventmask.extend(extra_events) + ret = self.set_event_mask(eventmask) + + includelogs = self.config_data.getVar('BBINCLUDELOGS') + loglines = self.config_data.getVar('BBINCLUDELOGS_LINES') + + ret = self.run_command('buildTargets', targets, task) + if handle_events: + result = False + # Borrowed from knotty, instead somewhat hackily we use the helper + # as the object to store "shutdown" on + helper = bb.ui.uihelper.BBUIHelper() + # We set up logging optionally in the constructor so now we need to + # grab the handlers to pass to TerminalFilter + console = None + errconsole = None + for handler in self.logger.handlers: + if isinstance(handler, logging.StreamHandler): + if handler.stream == sys.stdout: + console = handler + elif handler.stream == sys.stderr: + errconsole = handler + format_str = "%(levelname)s: %(message)s" + format = bb.msg.BBLogFormatter(format_str) + helper.shutdown = 0 + parseprogress = None + termfilter = bb.ui.knotty.TerminalFilter(helper, helper, console, errconsole, format, quiet=self.quiet) + try: + while True: + try: + event = self.wait_event(0.25) + if event: + if event_callback and event_callback(event): + continue + if helper.eventHandler(event): + if isinstance(event, bb.build.TaskFailedSilent): + logger.warning("Logfile for failed setscene task is %s" % event.logfile) + elif isinstance(event, bb.build.TaskFailed): + bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter) + continue + if isinstance(event, bb.event.ProcessStarted): + if self.quiet > 1: + continue + parseprogress = bb.ui.knotty.new_progress(event.processname, event.total) + parseprogress.start(False) + continue + if isinstance(event, bb.event.ProcessProgress): + if self.quiet > 1: + continue + if parseprogress: + parseprogress.update(event.progress) + else: + bb.warn("Got ProcessProgress event for someting that never started?") + continue + if isinstance(event, bb.event.ProcessFinished): + if self.quiet > 1: + continue + if parseprogress: + parseprogress.finish() + parseprogress = None + continue + if isinstance(event, bb.command.CommandCompleted): + result = True + break + if isinstance(event, bb.command.CommandFailed): + self.logger.error(str(event)) + result = False + break + if isinstance(event, logging.LogRecord): + if event.taskpid == 0 or event.levelno > logging.INFO: + self.logger.handle(event) + continue + if isinstance(event, bb.event.NoProvider): + self.logger.error(str(event)) + result = False + break + + elif helper.shutdown > 1: + break + termfilter.updateFooter() + except KeyboardInterrupt: + termfilter.clearFooter() + if helper.shutdown == 1: + print("\nSecond Keyboard Interrupt, stopping...\n") + ret = self.run_command("stateForceShutdown") + if ret and ret[2]: + self.logger.error("Unable to cleanly stop: %s" % ret[2]) + elif helper.shutdown == 0: + print("\nKeyboard Interrupt, closing down...\n") + interrupted = True + ret = self.run_command("stateShutdown") + if ret and ret[2]: + self.logger.error("Unable to cleanly shutdown: %s" % ret[2]) + helper.shutdown = helper.shutdown + 1 + termfilter.clearFooter() + finally: + termfilter.finish() + if helper.failed_tasks: + result = False + return result + else: + return ret def shutdown(self): + """ + Shut down tinfoil. Disconnects from the server and gracefully + releases any associated resources. You must call this function if + prepare() has been called, or use a with... block when you create + the tinfoil object which will ensure that it gets called. + """ if self.server_connection: self.run_command('clientComplete') _server_connections.remove(self.server_connection) @@ -426,6 +835,12 @@ class Tinfoil: self.server_connection.terminate() self.server_connection = None + # Restore logging handlers to how it looked when we started + if self.oldhandlers: + for handler in self.logger.handlers: + if handler not in self.oldhandlers: + self.logger.handlers.remove(handler) + def _reconvert_type(self, obj, origtypename): """ Convert an object back to the right type, in the case diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py index e451c630d..524a5b094 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py @@ -719,7 +719,11 @@ class ORMWrapper(object): def save_build_package_information(self, build_obj, package_info, recipes, built_package): - # assert isinstance(build_obj, Build) + # assert isinstance(build_obj, Build) + + if not 'PN' in package_info.keys(): + # no package data to save (e.g. 'OPKGN'="lib64-*"|"lib32-*") + return None # create and save the object pname = package_info['PKG'] diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/knotty.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/knotty.py index 82aa7c464..fa88e6ccd 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/knotty.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/knotty.py @@ -207,8 +207,10 @@ class TerminalFilter(object): self.interactive = False bb.note("Unable to use interactive mode for this terminal, using fallback") return - console.addFilter(InteractConsoleLogFilter(self, format)) - errconsole.addFilter(InteractConsoleLogFilter(self, format)) + if console: + console.addFilter(InteractConsoleLogFilter(self, format)) + if errconsole: + errconsole.addFilter(InteractConsoleLogFilter(self, format)) self.main_progress = None @@ -310,6 +312,32 @@ class TerminalFilter(object): fd = sys.stdin.fileno() self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup) +def print_event_log(event, includelogs, loglines, termfilter): + # FIXME refactor this out further + logfile = event.logfile + if logfile and os.path.exists(logfile): + termfilter.clearFooter() + bb.error("Logfile of failure stored in: %s" % logfile) + if includelogs and not event.errprinted: + print("Log data follows:") + f = open(logfile, "r") + lines = [] + while True: + l = f.readline() + if l == '': + break + l = l.rstrip() + if loglines: + lines.append(' | %s' % l) + if len(lines) > int(loglines): + lines.pop(0) + else: + print('| %s' % l) + f.close() + if lines: + for line in lines: + print(line) + def _log_settings_from_server(server, observe_only): # Get values of variables which control our output includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"]) @@ -342,6 +370,9 @@ _evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.Lo def main(server, eventHandler, params, tf = TerminalFilter): + if not params.observe_only: + params.updateToServer(server, os.environ.copy()) + includelogs, loglines, consolelogfile = _log_settings_from_server(server, params.observe_only) if sys.stdin.isatty() and sys.stdout.isatty(): @@ -365,8 +396,9 @@ def main(server, eventHandler, params, tf = TerminalFilter): bb.msg.addDefaultlogFilter(errconsole, bb.msg.BBLogFilterStdErr) console.setFormatter(format) errconsole.setFormatter(format) - logger.addHandler(console) - logger.addHandler(errconsole) + if not bb.msg.has_console_handler(logger): + logger.addHandler(console) + logger.addHandler(errconsole) bb.utils.set_process_name("KnottyUI") @@ -395,7 +427,6 @@ def main(server, eventHandler, params, tf = TerminalFilter): universe = False if not params.observe_only: params.updateFromServer(server) - params.updateToServer(server, os.environ.copy()) cmdline = params.parseActions() if not cmdline: print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") @@ -471,11 +502,11 @@ def main(server, eventHandler, params, tf = TerminalFilter): continue # Prefix task messages with recipe/task - if event.taskpid in helper.running_tasks: + if event.taskpid in helper.running_tasks and event.levelno != format.PLAIN: taskinfo = helper.running_tasks[event.taskpid] event.msg = taskinfo['title'] + ': ' + event.msg if hasattr(event, 'fn'): - event.msg = event.fn + ': ' + event.msg + event.msg = event.fn + ': ' + event.msg logger.handle(event) continue @@ -484,29 +515,7 @@ def main(server, eventHandler, params, tf = TerminalFilter): continue if isinstance(event, bb.build.TaskFailed): return_value = 1 - logfile = event.logfile - if logfile and os.path.exists(logfile): - termfilter.clearFooter() - bb.error("Logfile of failure stored in: %s" % logfile) - if includelogs and not event.errprinted: - print("Log data follows:") - f = open(logfile, "r") - lines = [] - while True: - l = f.readline() - if l == '': - break - l = l.rstrip() - if loglines: - lines.append(' | %s' % l) - if len(lines) > int(loglines): - lines.pop(0) - else: - print('| %s' % l) - f.close() - if lines: - for line in lines: - print(line) + print_event_log(event, includelogs, loglines, termfilter) if isinstance(event, bb.build.TaskBase): logger.info(event._message) continue @@ -559,7 +568,7 @@ def main(server, eventHandler, params, tf = TerminalFilter): return_value = event.exitcode if event.error: errors = errors + 1 - logger.error("Command execution failed: %s", event.error) + logger.error(str(event)) main.shutdown = 2 continue if isinstance(event, bb.command.CommandExit): @@ -570,39 +579,16 @@ def main(server, eventHandler, params, tf = TerminalFilter): main.shutdown = 2 continue if isinstance(event, bb.event.MultipleProviders): - logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "", - event._item, - ", ".join(event._candidates)) - rtime = "" - if event._is_runtime: - rtime = "R" - logger.info("consider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, event._item)) + logger.info(str(event)) continue if isinstance(event, bb.event.NoProvider): - if event._runtime: - r = "R" - else: - r = "" - - extra = '' - if not event._reasons: - if event._close_matches: - extra = ". Close matches:\n %s" % '\n '.join(event._close_matches) - # For universe builds, only show these as warnings, not errors - h = logger.warning if not universe: return_value = 1 errors = errors + 1 - h = logger.error - - if event._dependees: - h("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s", r, event._item, ", ".join(event._dependees), r, extra) + logger.error(str(event)) else: - h("Nothing %sPROVIDES '%s'%s", r, event._item, extra) - if event._reasons: - for reason in event._reasons: - h("%s", reason) + logger.warning(str(event)) continue if isinstance(event, bb.runqueue.sceneQueueTaskStarted): @@ -624,13 +610,11 @@ def main(server, eventHandler, params, tf = TerminalFilter): if isinstance(event, bb.runqueue.runQueueTaskFailed): return_value = 1 taskfailures.append(event.taskstring) - logger.error("Task (%s) failed with exit code '%s'", - event.taskstring, event.exitcode) + logger.error(str(event)) continue if isinstance(event, bb.runqueue.sceneQueueTaskFailed): - logger.warning("Setscene task (%s) failed with exit code '%s' - real task will be run instead", - event.taskstring, event.exitcode) + logger.warning(str(event)) continue if isinstance(event, bb.event.DepTreeGenerated): @@ -663,6 +647,7 @@ def main(server, eventHandler, params, tf = TerminalFilter): bb.event.MetadataEvent, bb.event.StampUpdate, bb.event.ConfigParsed, + bb.event.MultiConfigParsed, bb.event.RecipeParsed, bb.event.RecipePreFinalise, bb.runqueue.runQueueEvent, diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/ncurses.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/ncurses.py index ca845a32a..8690c529c 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/ncurses.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/ncurses.py @@ -315,7 +315,7 @@ class NCursesUI: # also allow them to now exit with a single ^C shutdown = 2 if isinstance(event, bb.command.CommandFailed): - mw.appendText("Command execution failed: %s" % event.error) + mw.appendText(str(event)) time.sleep(2) exitflag = True if isinstance(event, bb.command.CommandExit): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/taskexp.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/taskexp.py index 9d14ecefa..0e8e9d4cf 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/taskexp.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/taskexp.py @@ -63,7 +63,9 @@ class PackageReverseDepView(Gtk.TreeView): self.current = None self.filter_model = model.filter_new() self.filter_model.set_visible_func(self._filter) - self.set_model(self.filter_model) + self.sort_model = self.filter_model.sort_new_with_model() + self.sort_model.set_sort_column_id(COL_DEP_PARENT, Gtk.SortType.ASCENDING) + self.set_model(self.sort_model) self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PARENT)) def _filter(self, model, iter, data): @@ -286,23 +288,7 @@ def main(server, eventHandler, params): continue if isinstance(event, bb.event.NoProvider): - if event._runtime: - r = "R" - else: - r = "" - - extra = '' - if not event._reasons: - if event._close_matches: - extra = ". Close matches:\n %s" % '\n '.join(event._close_matches) - - if event._dependees: - print("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, event._item, ", ".join(event._dependees), r, extra)) - else: - print("Nothing %sPROVIDES '%s'%s" % (r, event._item, extra)) - if event._reasons: - for reason in event._reasons: - print(reason) + print(str(event)) _, error = server.runCommand(["stateShutdown"]) if error: @@ -310,7 +296,7 @@ def main(server, eventHandler, params): break if isinstance(event, bb.command.CommandFailed): - print("Command execution failed: %s" % event.error) + print(str(event)) return event.exitcode if isinstance(event, bb.command.CommandExit): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/toasterui.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/toasterui.py index 71f04fa5c..88cec3759 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/toasterui.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/toasterui.py @@ -320,29 +320,13 @@ def main(server, eventHandler, params): if isinstance(event, bb.event.CacheLoadCompleted): continue if isinstance(event, bb.event.MultipleProviders): - logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "", - event._item, - ", ".join(event._candidates)) - logger.info("consider defining a PREFERRED_PROVIDER entry to match %s", event._item) + logger.info(str(event)) continue if isinstance(event, bb.event.NoProvider): errors = errors + 1 - if event._runtime: - r = "R" - else: - r = "" - - if event._dependees: - text = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)" % (r, event._item, ", ".join(event._dependees), r) - else: - text = "Nothing %sPROVIDES '%s'" % (r, event._item) - + text = str(event) logger.error(text) - if event._reasons: - for reason in event._reasons: - logger.error("%s", reason) - text += reason buildinfohelper.store_log_error(text) continue @@ -364,8 +348,7 @@ def main(server, eventHandler, params): if isinstance(event, bb.runqueue.runQueueTaskFailed): buildinfohelper.update_and_store_task(event) taskfailures.append(event.taskstring) - logger.error("Task (%s) failed with exit code '%s'", - event.taskstring, event.exitcode) + logger.error(str(event)) continue if isinstance(event, (bb.runqueue.sceneQueueTaskCompleted, bb.runqueue.sceneQueueTaskFailed)): @@ -382,7 +365,7 @@ def main(server, eventHandler, params): if isinstance(event, bb.command.CommandFailed): errors += 1 errorcode = 1 - logger.error("Command execution failed: %s", event.error) + logger.error(str(event)) elif isinstance(event, bb.event.BuildCompleted): buildinfohelper.scan_image_artifacts() buildinfohelper.clone_required_sdk_artifacts() diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py index 113fcedea..963c1ea2d 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py @@ -61,6 +61,9 @@ class BBUIHelper: self.running_tasks[event.pid]['progress'] = event.progress self.running_tasks[event.pid]['rate'] = event.rate self.needUpdate = True + else: + return False + return True def getTasks(self): self.needUpdate = False diff --git a/import-layers/yocto-poky/bitbake/lib/bb/utils.py b/import-layers/yocto-poky/bitbake/lib/bb/utils.py index 6a44db57d..c540b49cf 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/utils.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/utils.py @@ -771,13 +771,14 @@ def movefile(src, dest, newmtime = None, sstat = None): return None renamefailed = 1 + # os.rename needs to know the dest path ending with file name + # so append the file name to a path only if it's a dir specified + srcfname = os.path.basename(src) + destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \ + else dest + if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]: try: - # os.rename needs to know the dest path ending with file name - # so append the file name to a path only if it's a dir specified - srcfname = os.path.basename(src) - destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \ - else dest os.rename(src, destpath) renamefailed = 0 except Exception as e: @@ -791,8 +792,8 @@ def movefile(src, dest, newmtime = None, sstat = None): didcopy = 0 if stat.S_ISREG(sstat[stat.ST_MODE]): try: # For safety copy then move it over. - shutil.copyfile(src, dest + "#new") - os.rename(dest + "#new", dest) + shutil.copyfile(src, destpath + "#new") + os.rename(destpath + "#new", destpath) didcopy = 1 except Exception as e: print('movefile: copy', src, '->', dest, 'failed.', e) @@ -813,9 +814,9 @@ def movefile(src, dest, newmtime = None, sstat = None): return None if newmtime: - os.utime(dest, (newmtime, newmtime)) + os.utime(destpath, (newmtime, newmtime)) else: - os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) + os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) newmtime = sstat[stat.ST_MTIME] return newmtime @@ -1502,7 +1503,7 @@ def export_proxies(d): def load_plugins(logger, plugins, pluginpath): def load_plugin(name): - logger.debug('Loading plugin %s' % name) + logger.debug(1, 'Loading plugin %s' % name) fp, pathname, description = imp.find_module(name, [pluginpath]) try: return imp.load_module(name, fp, pathname, description) @@ -1510,7 +1511,7 @@ def load_plugins(logger, plugins, pluginpath): if fp: fp.close() - logger.debug('Loading plugins from %s...' % pluginpath) + logger.debug(1, 'Loading plugins from %s...' % pluginpath) expanded = (glob.glob(os.path.join(pluginpath, '*' + ext)) for ext in python_extensions) |