From eb8dc40360f0cfef56fb6947cc817a547d6d9bc6 Mon Sep 17 00:00:00 2001 From: Dave Cobbley Date: Tue, 14 Aug 2018 10:05:37 -0700 Subject: [Subtree] Removing import-layers directory As part of the move to subtrees, need to bring all the import layers content to the top level. Change-Id: I4a163d10898cbc6e11c27f776f60e1a470049d8f Signed-off-by: Dave Cobbley Signed-off-by: Brad Bishop --- poky/bitbake/lib/bb/COW.py | 319 +++ poky/bitbake/lib/bb/__init__.py | 144 ++ poky/bitbake/lib/bb/build.py | 913 +++++++ poky/bitbake/lib/bb/cache.py | 891 +++++++ poky/bitbake/lib/bb/cache_extra.py | 75 + poky/bitbake/lib/bb/checksum.py | 134 + poky/bitbake/lib/bb/codeparser.py | 476 ++++ poky/bitbake/lib/bb/command.py | 765 ++++++ poky/bitbake/lib/bb/compat.py | 6 + poky/bitbake/lib/bb/cooker.py | 2161 ++++++++++++++++ poky/bitbake/lib/bb/cookerdata.py | 434 ++++ poky/bitbake/lib/bb/daemonize.py | 82 + poky/bitbake/lib/bb/data.py | 403 +++ poky/bitbake/lib/bb/data_smart.py | 1037 ++++++++ poky/bitbake/lib/bb/event.py | 831 ++++++ poky/bitbake/lib/bb/exceptions.py | 91 + poky/bitbake/lib/bb/fetch2/__init__.py | 1864 ++++++++++++++ poky/bitbake/lib/bb/fetch2/bzr.py | 139 + poky/bitbake/lib/bb/fetch2/clearcase.py | 260 ++ poky/bitbake/lib/bb/fetch2/cvs.py | 172 ++ poky/bitbake/lib/bb/fetch2/git.py | 664 +++++ poky/bitbake/lib/bb/fetch2/gitannex.py | 91 + poky/bitbake/lib/bb/fetch2/gitsm.py | 135 + poky/bitbake/lib/bb/fetch2/hg.py | 270 ++ poky/bitbake/lib/bb/fetch2/local.py | 119 + poky/bitbake/lib/bb/fetch2/npm.py | 309 +++ poky/bitbake/lib/bb/fetch2/osc.py | 132 + poky/bitbake/lib/bb/fetch2/perforce.py | 209 ++ poky/bitbake/lib/bb/fetch2/repo.py | 97 + poky/bitbake/lib/bb/fetch2/s3.py | 98 + poky/bitbake/lib/bb/fetch2/sftp.py | 125 + poky/bitbake/lib/bb/fetch2/ssh.py | 125 + poky/bitbake/lib/bb/fetch2/svn.py | 193 ++ poky/bitbake/lib/bb/fetch2/wget.py | 626 +++++ poky/bitbake/lib/bb/main.py | 508 ++++ poky/bitbake/lib/bb/methodpool.py | 40 + poky/bitbake/lib/bb/monitordisk.py | 268 ++ poky/bitbake/lib/bb/msg.py | 225 ++ poky/bitbake/lib/bb/namedtuple_with_abc.py | 255 ++ poky/bitbake/lib/bb/parse/__init__.py | 175 ++ poky/bitbake/lib/bb/parse/ast.py | 442 ++++ poky/bitbake/lib/bb/parse/parse_py/BBHandler.py | 251 ++ poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py | 210 ++ poky/bitbake/lib/bb/parse/parse_py/__init__.py | 33 + poky/bitbake/lib/bb/persist_data.py | 214 ++ poky/bitbake/lib/bb/process.py | 179 ++ poky/bitbake/lib/bb/progress.py | 276 ++ poky/bitbake/lib/bb/providers.py | 430 ++++ poky/bitbake/lib/bb/pysh/__init__.py | 0 poky/bitbake/lib/bb/pysh/builtin.py | 710 ++++++ poky/bitbake/lib/bb/pysh/interp.py | 1367 ++++++++++ poky/bitbake/lib/bb/pysh/lsprof.py | 116 + poky/bitbake/lib/bb/pysh/pysh.py | 167 ++ poky/bitbake/lib/bb/pysh/pyshlex.py | 888 +++++++ poky/bitbake/lib/bb/pysh/pyshyacc.py | 779 ++++++ poky/bitbake/lib/bb/pysh/sherrors.py | 41 + poky/bitbake/lib/bb/pysh/subprocess_fix.py | 77 + poky/bitbake/lib/bb/remotedata.py | 116 + poky/bitbake/lib/bb/runqueue.py | 2682 ++++++++++++++++++++ poky/bitbake/lib/bb/server/__init__.py | 21 + poky/bitbake/lib/bb/server/process.py | 624 +++++ poky/bitbake/lib/bb/server/xmlrpcclient.py | 154 ++ poky/bitbake/lib/bb/server/xmlrpcserver.py | 158 ++ poky/bitbake/lib/bb/siggen.py | 729 ++++++ poky/bitbake/lib/bb/taskdata.py | 578 +++++ poky/bitbake/lib/bb/tests/__init__.py | 0 poky/bitbake/lib/bb/tests/codeparser.py | 428 ++++ poky/bitbake/lib/bb/tests/cow.py | 136 + poky/bitbake/lib/bb/tests/data.py | 607 +++++ poky/bitbake/lib/bb/tests/event.py | 986 +++++++ poky/bitbake/lib/bb/tests/fetch.py | 1573 ++++++++++++ poky/bitbake/lib/bb/tests/parse.py | 185 ++ poky/bitbake/lib/bb/tests/utils.py | 603 +++++ poky/bitbake/lib/bb/tinfoil.py | 900 +++++++ poky/bitbake/lib/bb/ui/__init__.py | 17 + poky/bitbake/lib/bb/ui/buildinfohelper.py | 2002 +++++++++++++++ .../lib/bb/ui/icons/images/images_display.png | Bin 0 -> 6898 bytes .../lib/bb/ui/icons/images/images_hover.png | Bin 0 -> 7051 bytes .../lib/bb/ui/icons/indicators/add-hover.png | Bin 0 -> 1212 bytes poky/bitbake/lib/bb/ui/icons/indicators/add.png | Bin 0 -> 1176 bytes poky/bitbake/lib/bb/ui/icons/indicators/alert.png | Bin 0 -> 3954 bytes .../lib/bb/ui/icons/indicators/confirmation.png | Bin 0 -> 5789 bytes poky/bitbake/lib/bb/ui/icons/indicators/denied.png | Bin 0 -> 3955 bytes poky/bitbake/lib/bb/ui/icons/indicators/error.png | Bin 0 -> 6482 bytes poky/bitbake/lib/bb/ui/icons/indicators/info.png | Bin 0 -> 3311 bytes poky/bitbake/lib/bb/ui/icons/indicators/issues.png | Bin 0 -> 4549 bytes .../bitbake/lib/bb/ui/icons/indicators/refresh.png | Bin 0 -> 5250 bytes .../lib/bb/ui/icons/indicators/remove-hover.png | Bin 0 -> 2809 bytes poky/bitbake/lib/bb/ui/icons/indicators/remove.png | Bin 0 -> 1971 bytes poky/bitbake/lib/bb/ui/icons/indicators/tick.png | Bin 0 -> 4563 bytes poky/bitbake/lib/bb/ui/icons/info/info_display.png | Bin 0 -> 4117 bytes poky/bitbake/lib/bb/ui/icons/info/info_hover.png | Bin 0 -> 4167 bytes .../lib/bb/ui/icons/layers/layers_display.png | Bin 0 -> 4840 bytes .../lib/bb/ui/icons/layers/layers_hover.png | Bin 0 -> 5257 bytes .../lib/bb/ui/icons/packages/packages_display.png | Bin 0 -> 7011 bytes .../lib/bb/ui/icons/packages/packages_hover.png | Bin 0 -> 7121 bytes .../lib/bb/ui/icons/recipe/recipe_display.png | Bin 0 -> 4723 bytes .../lib/bb/ui/icons/recipe/recipe_hover.png | Bin 0 -> 4866 bytes .../lib/bb/ui/icons/settings/settings_display.png | Bin 0 -> 6076 bytes .../lib/bb/ui/icons/settings/settings_hover.png | Bin 0 -> 6269 bytes .../bb/ui/icons/templates/templates_display.png | Bin 0 -> 5651 bytes .../lib/bb/ui/icons/templates/templates_hover.png | Bin 0 -> 5791 bytes poky/bitbake/lib/bb/ui/knotty.py | 728 ++++++ poky/bitbake/lib/bb/ui/ncurses.py | 373 +++ poky/bitbake/lib/bb/ui/taskexp.py | 328 +++ poky/bitbake/lib/bb/ui/toasterui.py | 487 ++++ poky/bitbake/lib/bb/ui/uievent.py | 161 ++ poky/bitbake/lib/bb/ui/uihelper.py | 70 + poky/bitbake/lib/bb/utils.py | 1539 +++++++++++ 109 files changed, 38226 insertions(+) create mode 100644 poky/bitbake/lib/bb/COW.py create mode 100644 poky/bitbake/lib/bb/__init__.py create mode 100644 poky/bitbake/lib/bb/build.py create mode 100644 poky/bitbake/lib/bb/cache.py create mode 100644 poky/bitbake/lib/bb/cache_extra.py create mode 100644 poky/bitbake/lib/bb/checksum.py create mode 100644 poky/bitbake/lib/bb/codeparser.py create mode 100644 poky/bitbake/lib/bb/command.py create mode 100644 poky/bitbake/lib/bb/compat.py create mode 100644 poky/bitbake/lib/bb/cooker.py create mode 100644 poky/bitbake/lib/bb/cookerdata.py create mode 100644 poky/bitbake/lib/bb/daemonize.py create mode 100644 poky/bitbake/lib/bb/data.py create mode 100644 poky/bitbake/lib/bb/data_smart.py create mode 100644 poky/bitbake/lib/bb/event.py create mode 100644 poky/bitbake/lib/bb/exceptions.py create mode 100644 poky/bitbake/lib/bb/fetch2/__init__.py create mode 100644 poky/bitbake/lib/bb/fetch2/bzr.py create mode 100644 poky/bitbake/lib/bb/fetch2/clearcase.py create mode 100644 poky/bitbake/lib/bb/fetch2/cvs.py create mode 100644 poky/bitbake/lib/bb/fetch2/git.py create mode 100644 poky/bitbake/lib/bb/fetch2/gitannex.py create mode 100644 poky/bitbake/lib/bb/fetch2/gitsm.py create mode 100644 poky/bitbake/lib/bb/fetch2/hg.py create mode 100644 poky/bitbake/lib/bb/fetch2/local.py create mode 100644 poky/bitbake/lib/bb/fetch2/npm.py create mode 100644 poky/bitbake/lib/bb/fetch2/osc.py create mode 100644 poky/bitbake/lib/bb/fetch2/perforce.py create mode 100644 poky/bitbake/lib/bb/fetch2/repo.py create mode 100644 poky/bitbake/lib/bb/fetch2/s3.py create mode 100644 poky/bitbake/lib/bb/fetch2/sftp.py create mode 100644 poky/bitbake/lib/bb/fetch2/ssh.py create mode 100644 poky/bitbake/lib/bb/fetch2/svn.py create mode 100644 poky/bitbake/lib/bb/fetch2/wget.py create mode 100755 poky/bitbake/lib/bb/main.py create mode 100644 poky/bitbake/lib/bb/methodpool.py create mode 100644 poky/bitbake/lib/bb/monitordisk.py create mode 100644 poky/bitbake/lib/bb/msg.py create mode 100644 poky/bitbake/lib/bb/namedtuple_with_abc.py create mode 100644 poky/bitbake/lib/bb/parse/__init__.py create mode 100644 poky/bitbake/lib/bb/parse/ast.py create mode 100644 poky/bitbake/lib/bb/parse/parse_py/BBHandler.py create mode 100644 poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py create mode 100644 poky/bitbake/lib/bb/parse/parse_py/__init__.py create mode 100644 poky/bitbake/lib/bb/persist_data.py create mode 100644 poky/bitbake/lib/bb/process.py create mode 100644 poky/bitbake/lib/bb/progress.py create mode 100644 poky/bitbake/lib/bb/providers.py create mode 100644 poky/bitbake/lib/bb/pysh/__init__.py create mode 100644 poky/bitbake/lib/bb/pysh/builtin.py create mode 100644 poky/bitbake/lib/bb/pysh/interp.py create mode 100644 poky/bitbake/lib/bb/pysh/lsprof.py create mode 100644 poky/bitbake/lib/bb/pysh/pysh.py create mode 100644 poky/bitbake/lib/bb/pysh/pyshlex.py create mode 100644 poky/bitbake/lib/bb/pysh/pyshyacc.py create mode 100644 poky/bitbake/lib/bb/pysh/sherrors.py create mode 100644 poky/bitbake/lib/bb/pysh/subprocess_fix.py create mode 100644 poky/bitbake/lib/bb/remotedata.py create mode 100644 poky/bitbake/lib/bb/runqueue.py create mode 100644 poky/bitbake/lib/bb/server/__init__.py create mode 100644 poky/bitbake/lib/bb/server/process.py create mode 100644 poky/bitbake/lib/bb/server/xmlrpcclient.py create mode 100644 poky/bitbake/lib/bb/server/xmlrpcserver.py create mode 100644 poky/bitbake/lib/bb/siggen.py create mode 100644 poky/bitbake/lib/bb/taskdata.py create mode 100644 poky/bitbake/lib/bb/tests/__init__.py create mode 100644 poky/bitbake/lib/bb/tests/codeparser.py create mode 100644 poky/bitbake/lib/bb/tests/cow.py create mode 100644 poky/bitbake/lib/bb/tests/data.py create mode 100644 poky/bitbake/lib/bb/tests/event.py create mode 100644 poky/bitbake/lib/bb/tests/fetch.py create mode 100644 poky/bitbake/lib/bb/tests/parse.py create mode 100644 poky/bitbake/lib/bb/tests/utils.py create mode 100644 poky/bitbake/lib/bb/tinfoil.py create mode 100644 poky/bitbake/lib/bb/ui/__init__.py create mode 100644 poky/bitbake/lib/bb/ui/buildinfohelper.py create mode 100644 poky/bitbake/lib/bb/ui/icons/images/images_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/images/images_hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/add-hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/add.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/alert.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/confirmation.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/denied.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/error.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/info.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/issues.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/refresh.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/remove-hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/remove.png create mode 100644 poky/bitbake/lib/bb/ui/icons/indicators/tick.png create mode 100644 poky/bitbake/lib/bb/ui/icons/info/info_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/info/info_hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/layers/layers_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/layers/layers_hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/packages/packages_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/packages/packages_hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/recipe/recipe_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/recipe/recipe_hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/settings/settings_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/settings/settings_hover.png create mode 100644 poky/bitbake/lib/bb/ui/icons/templates/templates_display.png create mode 100644 poky/bitbake/lib/bb/ui/icons/templates/templates_hover.png create mode 100644 poky/bitbake/lib/bb/ui/knotty.py create mode 100644 poky/bitbake/lib/bb/ui/ncurses.py create mode 100644 poky/bitbake/lib/bb/ui/taskexp.py create mode 100644 poky/bitbake/lib/bb/ui/toasterui.py create mode 100644 poky/bitbake/lib/bb/ui/uievent.py create mode 100644 poky/bitbake/lib/bb/ui/uihelper.py create mode 100644 poky/bitbake/lib/bb/utils.py (limited to 'poky/bitbake/lib/bb') diff --git a/poky/bitbake/lib/bb/COW.py b/poky/bitbake/lib/bb/COW.py new file mode 100644 index 0000000000..bec6208096 --- /dev/null +++ b/poky/bitbake/lib/bb/COW.py @@ -0,0 +1,319 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# This is a copy on write dictionary and set which abuses classes to try and be nice and fast. +# +# Copyright (C) 2006 Tim Ansell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +#Please Note: +# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. +# Assign a file to __warn__ to get warnings about slow operations. +# + + +import copy +import types +ImmutableTypes = ( + bool, + complex, + float, + int, + tuple, + frozenset, + str +) + +MUTABLE = "__mutable__" + +class COWMeta(type): + pass + +class COWDictMeta(COWMeta): + __warn__ = False + __hasmutable__ = False + __marker__ = tuple() + + def __str__(cls): + # FIXME: I have magic numbers! + return "" % (cls.__count__, len(cls.__dict__) - 3) + __repr__ = __str__ + + def cow(cls): + class C(cls): + __count__ = cls.__count__ + 1 + return C + copy = cow + __call__ = cow + + def __setitem__(cls, key, value): + if value is not None and not isinstance(value, ImmutableTypes): + if not isinstance(value, COWMeta): + cls.__hasmutable__ = True + key += MUTABLE + setattr(cls, key, value) + + def __getmutable__(cls, key, readonly=False): + nkey = key + MUTABLE + try: + return cls.__dict__[nkey] + except KeyError: + pass + + value = getattr(cls, nkey) + if readonly: + return value + + if not cls.__warn__ is False and not isinstance(value, COWMeta): + print("Warning: Doing a copy because %s is a mutable type." % key, file=cls.__warn__) + try: + value = value.copy() + except AttributeError as e: + value = copy.copy(value) + setattr(cls, nkey, value) + return value + + __getmarker__ = [] + def __getreadonly__(cls, key, default=__getmarker__): + """\ + Get a value (even if mutable) which you promise not to change. + """ + return cls.__getitem__(key, default, True) + + def __getitem__(cls, key, default=__getmarker__, readonly=False): + try: + try: + value = getattr(cls, key) + except AttributeError: + value = cls.__getmutable__(key, readonly) + + # This is for values which have been deleted + if value is cls.__marker__: + raise AttributeError("key %s does not exist." % key) + + return value + except AttributeError as e: + if not default is cls.__getmarker__: + return default + + raise KeyError(str(e)) + + def __delitem__(cls, key): + cls.__setitem__(key, cls.__marker__) + + def __revertitem__(cls, key): + if key not in cls.__dict__: + key += MUTABLE + delattr(cls, key) + + def __contains__(cls, key): + return cls.has_key(key) + + def has_key(cls, key): + value = cls.__getreadonly__(key, cls.__marker__) + if value is cls.__marker__: + return False + return True + + def iter(cls, type, readonly=False): + for key in dir(cls): + if key.startswith("__"): + continue + + if key.endswith(MUTABLE): + key = key[:-len(MUTABLE)] + + if type == "keys": + yield key + + try: + if readonly: + value = cls.__getreadonly__(key) + else: + value = cls[key] + except KeyError: + continue + + if type == "values": + yield value + if type == "items": + yield (key, value) + raise StopIteration() + + def iterkeys(cls): + return cls.iter("keys") + def itervalues(cls, readonly=False): + if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: + print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__) + return cls.iter("values", readonly) + def iteritems(cls, readonly=False): + if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: + print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__) + return cls.iter("items", readonly) + +class COWSetMeta(COWDictMeta): + def __str__(cls): + # FIXME: I have magic numbers! + return "" % (cls.__count__, len(cls.__dict__) -3) + __repr__ = __str__ + + def cow(cls): + class C(cls): + __count__ = cls.__count__ + 1 + return C + + def add(cls, value): + COWDictMeta.__setitem__(cls, repr(hash(value)), value) + + def remove(cls, value): + COWDictMeta.__delitem__(cls, repr(hash(value))) + + def __in__(cls, value): + return repr(hash(value)) in COWDictMeta + + def iterkeys(cls): + raise TypeError("sets don't have keys") + + def iteritems(cls): + raise TypeError("sets don't have 'items'") + +# These are the actual classes you use! +class COWDictBase(object, metaclass = COWDictMeta): + __count__ = 0 + +class COWSetBase(object, metaclass = COWSetMeta): + __count__ = 0 + +if __name__ == "__main__": + import sys + COWDictBase.__warn__ = sys.stderr + a = COWDictBase() + print("a", a) + + a['a'] = 'a' + a['b'] = 'b' + a['dict'] = {} + + b = a.copy() + print("b", b) + b['c'] = 'b' + + print() + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(): + print(x) + print() + + b['dict']['a'] = 'b' + b['a'] = 'c' + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(): + print(x) + print() + + try: + b['dict2'] + except KeyError as e: + print("Okay!") + + a['set'] = COWSetBase() + a['set'].add("o1") + a['set'].add("o1") + a['set'].add("o2") + + print("a", a) + for x in a['set'].itervalues(): + print(x) + print("--") + print("b", b) + for x in b['set'].itervalues(): + print(x) + print() + + b['set'].add('o3') + + print("a", a) + for x in a['set'].itervalues(): + print(x) + print("--") + print("b", b) + for x in b['set'].itervalues(): + print(x) + print() + + a['set2'] = set() + a['set2'].add("o1") + a['set2'].add("o1") + a['set2'].add("o2") + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() + + del b['b'] + try: + print(b['b']) + except KeyError: + print("Yay! deleted key raises error") + + if 'b' in b: + print("Boo!") + else: + print("Yay - has_key with delete works!") + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() + + b.__revertitem__('b') + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() + + b.__revertitem__('dict') + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() diff --git a/poky/bitbake/lib/bb/__init__.py b/poky/bitbake/lib/bb/__init__.py new file mode 100644 index 0000000000..d24adb8eac --- /dev/null +++ b/poky/bitbake/lib/bb/__init__.py @@ -0,0 +1,144 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake Build System Python Library +# +# Copyright (C) 2003 Holger Schurig +# Copyright (C) 2003, 2004 Chris Larson +# +# Based on Gentoo's portage.py. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +__version__ = "1.38.0" + +import sys +if sys.version_info < (3, 4, 0): + raise RuntimeError("Sorry, python 3.4.0 or later is required for this version of bitbake") + + +class BBHandledException(Exception): + """ + The big dilemma for generic bitbake code is what information to give the user + when an exception occurs. Any exception inheriting this base exception class + has already provided information to the user via some 'fired' message type such as + an explicitly fired event using bb.fire, or a bb.error message. If bitbake + encounters an exception derived from this class, no backtrace or other information + will be given to the user, its assumed the earlier event provided the relevant information. + """ + pass + +import os +import logging + + +class NullHandler(logging.Handler): + def emit(self, record): + pass + +Logger = logging.getLoggerClass() +class BBLogger(Logger): + def __init__(self, name): + if name.split(".")[0] == "BitBake": + self.debug = self.bbdebug + Logger.__init__(self, name) + + def bbdebug(self, level, msg, *args, **kwargs): + return self.log(logging.DEBUG - level + 1, msg, *args, **kwargs) + + def plain(self, msg, *args, **kwargs): + return self.log(logging.INFO + 1, msg, *args, **kwargs) + + def verbose(self, msg, *args, **kwargs): + return self.log(logging.INFO - 1, msg, *args, **kwargs) + +logging.raiseExceptions = False +logging.setLoggerClass(BBLogger) + +logger = logging.getLogger("BitBake") +logger.addHandler(NullHandler()) +logger.setLevel(logging.DEBUG - 2) + +mainlogger = logging.getLogger("BitBake.Main") + +# This has to be imported after the setLoggerClass, as the import of bb.msg +# can result in construction of the various loggers. +import bb.msg + +from bb import fetch2 as fetch +sys.modules['bb.fetch'] = sys.modules['bb.fetch2'] + +# Messaging convenience functions +def plain(*args): + mainlogger.plain(''.join(args)) + +def debug(lvl, *args): + if isinstance(lvl, str): + mainlogger.warning("Passed invalid debug level '%s' to bb.debug", lvl) + args = (lvl,) + args + lvl = 1 + mainlogger.debug(lvl, ''.join(args)) + +def note(*args): + mainlogger.info(''.join(args)) + +def warn(*args): + mainlogger.warning(''.join(args)) + +def error(*args, **kwargs): + mainlogger.error(''.join(args), extra=kwargs) + +def fatal(*args, **kwargs): + mainlogger.critical(''.join(args), extra=kwargs) + raise BBHandledException() + +def deprecated(func, name=None, advice=""): + """This is a decorator which can be used to mark functions + as deprecated. It will result in a warning being emitted + when the function is used.""" + import warnings + + if advice: + advice = ": %s" % advice + if name is None: + name = func.__name__ + + def newFunc(*args, **kwargs): + warnings.warn("Call to deprecated function %s%s." % (name, + advice), + category=DeprecationWarning, + stacklevel=2) + return func(*args, **kwargs) + newFunc.__name__ = func.__name__ + newFunc.__doc__ = func.__doc__ + newFunc.__dict__.update(func.__dict__) + return newFunc + +# For compatibility +def deprecate_import(current, modulename, fromlist, renames = None): + """Import objects from one module into another, wrapping them with a DeprecationWarning""" + import sys + + module = __import__(modulename, fromlist = fromlist) + for position, objname in enumerate(fromlist): + obj = getattr(module, objname) + newobj = deprecated(obj, "{0}.{1}".format(current, objname), + "Please use {0}.{1} instead".format(modulename, objname)) + if renames: + newname = renames[position] + else: + newname = objname + + setattr(sys.modules[current], newname, newobj) + diff --git a/poky/bitbake/lib/bb/build.py b/poky/bitbake/lib/bb/build.py new file mode 100644 index 0000000000..4631abdde5 --- /dev/null +++ b/poky/bitbake/lib/bb/build.py @@ -0,0 +1,913 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake 'Build' implementation +# +# Core code for function execution and task handling in the +# BitBake build tools. +# +# Copyright (C) 2003, 2004 Chris Larson +# +# Based on Gentoo's portage.py. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import sys +import logging +import shlex +import glob +import time +import stat +import bb +import bb.msg +import bb.process +import bb.progress +from bb import data, event, utils + +bblogger = logging.getLogger('BitBake') +logger = logging.getLogger('BitBake.Build') + +NULL = open(os.devnull, 'r+') + +__mtime_cache = {} + +def cached_mtime_noerror(f): + if f not in __mtime_cache: + try: + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + except OSError: + return 0 + return __mtime_cache[f] + +def reset_cache(): + global __mtime_cache + __mtime_cache = {} + +# When we execute a Python function, we'd like certain things +# in all namespaces, hence we add them to __builtins__. +# If we do not do this and use the exec globals, they will +# not be available to subfunctions. +if hasattr(__builtins__, '__setitem__'): + builtins = __builtins__ +else: + builtins = __builtins__.__dict__ + +builtins['bb'] = bb +builtins['os'] = os + +class FuncFailed(Exception): + def __init__(self, name = None, logfile = None): + self.logfile = logfile + self.name = name + if name: + self.msg = 'Function failed: %s' % name + else: + self.msg = "Function failed" + + def __str__(self): + if self.logfile and os.path.exists(self.logfile): + msg = ("%s (log file is located at %s)" % + (self.msg, self.logfile)) + else: + msg = self.msg + return msg + +class TaskBase(event.Event): + """Base class for task events""" + + def __init__(self, t, logfile, d): + self._task = t + self._package = d.getVar("PF") + self._mc = d.getVar("BB_CURRENT_MC") + self.taskfile = d.getVar("FILE") + self.taskname = self._task + self.logfile = logfile + self.time = time.time() + event.Event.__init__(self) + self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName()) + + def getTask(self): + return self._task + + def setTask(self, task): + self._task = task + + def getDisplayName(self): + return bb.event.getName(self)[4:] + + task = property(getTask, setTask, None, "task property") + +class TaskStarted(TaskBase): + """Task execution started""" + def __init__(self, t, logfile, taskflags, d): + super(TaskStarted, self).__init__(t, logfile, d) + self.taskflags = taskflags + +class TaskSucceeded(TaskBase): + """Task execution completed""" + +class TaskFailed(TaskBase): + """Task execution failed""" + + def __init__(self, task, logfile, metadata, errprinted = False): + self.errprinted = errprinted + super(TaskFailed, self).__init__(task, logfile, metadata) + +class TaskFailedSilent(TaskBase): + """Task execution failed (silently)""" + def getDisplayName(self): + # Don't need to tell the user it was silent + return "Failed" + +class TaskInvalid(TaskBase): + + def __init__(self, task, metadata): + super(TaskInvalid, self).__init__(task, None, metadata) + self._message = "No such task '%s'" % task + +class TaskProgress(event.Event): + """ + Task made some progress that could be reported to the user, usually in + the form of a progress bar or similar. + NOTE: this class does not inherit from TaskBase since it doesn't need + to - it's fired within the task context itself, so we don't have any of + the context information that you do in the case of the other events. + The event PID can be used to determine which task it came from. + The progress value is normally 0-100, but can also be negative + indicating that progress has been made but we aren't able to determine + how much. + The rate is optional, this is simply an extra string to display to the + user if specified. + """ + def __init__(self, progress, rate=None): + self.progress = progress + self.rate = rate + event.Event.__init__(self) + + +class LogTee(object): + def __init__(self, logger, outfile): + self.outfile = outfile + self.logger = logger + self.name = self.outfile.name + + def write(self, string): + self.logger.plain(string) + self.outfile.write(string) + + def __enter__(self): + self.outfile.__enter__() + return self + + def __exit__(self, *excinfo): + self.outfile.__exit__(*excinfo) + + def __repr__(self): + return ''.format(self.name) + def flush(self): + self.outfile.flush() + +# +# pythonexception allows the python exceptions generated to be raised +# as the real exceptions (not FuncFailed) and without a backtrace at the +# origin of the failure. +# +def exec_func(func, d, dirs = None, pythonexception=False): + """Execute a BB 'function'""" + + try: + oldcwd = os.getcwd() + except: + oldcwd = None + + flags = d.getVarFlags(func) + cleandirs = flags.get('cleandirs') if flags else None + if cleandirs: + for cdir in d.expand(cleandirs).split(): + bb.utils.remove(cdir, True) + bb.utils.mkdirhier(cdir) + + if flags and dirs is None: + dirs = flags.get('dirs') + if dirs: + dirs = d.expand(dirs).split() + + if dirs: + for adir in dirs: + bb.utils.mkdirhier(adir) + adir = dirs[-1] + else: + adir = None + + body = d.getVar(func, False) + if not body: + if body is None: + logger.warning("Function %s doesn't exist", func) + return + + ispython = flags.get('python') + + lockflag = flags.get('lockfiles') + if lockflag: + lockfiles = [f for f in d.expand(lockflag).split()] + else: + lockfiles = None + + tempdir = d.getVar('T') + + # or func allows items to be executed outside of the normal + # task set, such as buildhistory + task = d.getVar('BB_RUNTASK') or func + if task == func: + taskfunc = task + else: + taskfunc = "%s.%s" % (task, func) + + runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}" + runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid()) + runfile = os.path.join(tempdir, runfn) + bb.utils.mkdirhier(os.path.dirname(runfile)) + + # Setup the courtesy link to the runfn, only for tasks + # we create the link 'just' before the run script is created + # if we create it after, and if the run script fails, then the + # link won't be created as an exception would be fired. + if task == func: + runlink = os.path.join(tempdir, 'run.{0}'.format(task)) + if runlink: + bb.utils.remove(runlink) + + try: + os.symlink(runfn, runlink) + except OSError: + pass + + with bb.utils.fileslocked(lockfiles): + if ispython: + exec_func_python(func, d, runfile, cwd=adir, pythonexception=pythonexception) + else: + exec_func_shell(func, d, runfile, cwd=adir) + + try: + curcwd = os.getcwd() + except: + curcwd = None + + if oldcwd and curcwd != oldcwd: + try: + bb.warn("Task %s changed cwd to %s" % (func, curcwd)) + os.chdir(oldcwd) + except: + pass + +_functionfmt = """ +{function}(d) +""" +logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s") +def exec_func_python(func, d, runfile, cwd=None, pythonexception=False): + """Execute a python BB 'function'""" + + code = _functionfmt.format(function=func) + bb.utils.mkdirhier(os.path.dirname(runfile)) + with open(runfile, 'w') as script: + bb.data.emit_func_python(func, script, d) + + if cwd: + try: + olddir = os.getcwd() + except OSError as e: + bb.warn("%s: Cannot get cwd: %s" % (func, e)) + olddir = None + os.chdir(cwd) + + bb.debug(2, "Executing python function %s" % func) + + try: + text = "def %s(d):\n%s" % (func, d.getVar(func, False)) + fn = d.getVarFlag(func, "filename", False) + lineno = int(d.getVarFlag(func, "lineno", False)) + bb.methodpool.insert_method(func, text, fn, lineno - 1) + + comp = utils.better_compile(code, func, "exec_python_func() autogenerated") + utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated", pythonexception=pythonexception) + except (bb.parse.SkipRecipe, bb.build.FuncFailed): + raise + except: + if pythonexception: + raise + raise FuncFailed(func, None) + finally: + bb.debug(2, "Python function %s finished" % func) + + if cwd and olddir: + try: + os.chdir(olddir) + except OSError as e: + bb.warn("%s: Cannot restore cwd %s: %s" % (func, olddir, e)) + +def shell_trap_code(): + return '''#!/bin/sh\n +# Emit a useful diagnostic if something fails: +bb_exit_handler() { + ret=$? + case $ret in + 0) ;; + *) case $BASH_VERSION in + "") echo "WARNING: exit code $ret from a shell command.";; + *) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from '$BASH_COMMAND'";; + esac + exit $ret + esac +} +trap 'bb_exit_handler' 0 +set -e +''' + +def exec_func_shell(func, d, runfile, cwd=None): + """Execute a shell function from the metadata + + Note on directory behavior. The 'dirs' varflag should contain a list + of the directories you need created prior to execution. The last + item in the list is where we will chdir/cd to. + """ + + # Don't let the emitted shell script override PWD + d.delVarFlag('PWD', 'export') + + with open(runfile, 'w') as script: + script.write(shell_trap_code()) + + bb.data.emit_func(func, script, d) + + if bb.msg.loggerVerboseLogs: + script.write("set -x\n") + if cwd: + script.write("cd '%s'\n" % cwd) + script.write("%s\n" % func) + script.write(''' +# cleanup +ret=$? +trap '' 0 +exit $ret +''') + + os.chmod(runfile, 0o775) + + cmd = runfile + if d.getVarFlag(func, 'fakeroot', False): + fakerootcmd = d.getVar('FAKEROOT') + if fakerootcmd: + cmd = [fakerootcmd, runfile] + + if bb.msg.loggerDefaultVerbose: + logfile = LogTee(logger, sys.stdout) + else: + logfile = sys.stdout + + progress = d.getVarFlag(func, 'progress') + if progress: + if progress == 'percent': + # Use default regex + logfile = bb.progress.BasicProgressHandler(d, outfile=logfile) + elif progress.startswith('percent:'): + # Use specified regex + logfile = bb.progress.BasicProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile) + elif progress.startswith('outof:'): + # Use specified regex + logfile = bb.progress.OutOfProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile) + else: + bb.warn('%s: invalid task progress varflag value "%s", ignoring' % (func, progress)) + + fifobuffer = bytearray() + def readfifo(data): + nonlocal fifobuffer + fifobuffer.extend(data) + while fifobuffer: + message, token, nextmsg = fifobuffer.partition(b"\00") + if token: + splitval = message.split(b' ', 1) + cmd = splitval[0].decode("utf-8") + if len(splitval) > 1: + value = splitval[1].decode("utf-8") + else: + value = '' + if cmd == 'bbplain': + bb.plain(value) + elif cmd == 'bbnote': + bb.note(value) + elif cmd == 'bbwarn': + bb.warn(value) + elif cmd == 'bberror': + bb.error(value) + elif cmd == 'bbfatal': + # The caller will call exit themselves, so bb.error() is + # what we want here rather than bb.fatal() + bb.error(value) + elif cmd == 'bbfatal_log': + bb.error(value, forcelog=True) + elif cmd == 'bbdebug': + splitval = value.split(' ', 1) + level = int(splitval[0]) + value = splitval[1] + bb.debug(level, value) + else: + bb.warn("Unrecognised command '%s' on FIFO" % cmd) + fifobuffer = nextmsg + else: + break + + tempdir = d.getVar('T') + fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid()) + if os.path.exists(fifopath): + os.unlink(fifopath) + os.mkfifo(fifopath) + with open(fifopath, 'r+b', buffering=0) as fifo: + try: + bb.debug(2, "Executing shell function %s" % func) + + try: + with open(os.devnull, 'r+') as stdin: + bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)]) + except bb.process.CmdError: + logfn = d.getVar('BB_LOGFILE') + raise FuncFailed(func, logfn) + finally: + os.unlink(fifopath) + + bb.debug(2, "Shell function %s finished" % func) + +def _task_data(fn, task, d): + localdata = bb.data.createCopy(d) + localdata.setVar('BB_FILENAME', fn) + localdata.setVar('BB_CURRENTTASK', task[3:]) + localdata.setVar('OVERRIDES', 'task-%s:%s' % + (task[3:].replace('_', '-'), d.getVar('OVERRIDES', False))) + localdata.finalize() + bb.data.expandKeys(localdata) + return localdata + +def _exec_task(fn, task, d, quieterr): + """Execute a BB 'task' + + Execution of a task involves a bit more setup than executing a function, + running it with its own local metadata, and with some useful variables set. + """ + if not d.getVarFlag(task, 'task', False): + event.fire(TaskInvalid(task, d), d) + logger.error("No such task: %s" % task) + return 1 + + logger.debug(1, "Executing task %s", task) + + localdata = _task_data(fn, task, d) + tempdir = localdata.getVar('T') + if not tempdir: + bb.fatal("T variable not set, unable to build") + + # Change nice level if we're asked to + nice = localdata.getVar("BB_TASK_NICE_LEVEL") + if nice: + curnice = os.nice(0) + nice = int(nice) - curnice + newnice = os.nice(nice) + logger.debug(1, "Renice to %s " % newnice) + ionice = localdata.getVar("BB_TASK_IONICE_LEVEL") + if ionice: + try: + cls, prio = ionice.split(".", 1) + bb.utils.ioprio_set(os.getpid(), int(cls), int(prio)) + except: + bb.warn("Invalid ionice level %s" % ionice) + + bb.utils.mkdirhier(tempdir) + + # Determine the logfile to generate + logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}' + logbase = logfmt.format(task=task, pid=os.getpid()) + + # Document the order of the tasks... + logorder = os.path.join(tempdir, 'log.task_order') + try: + with open(logorder, 'a') as logorderfile: + logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase)) + except OSError: + logger.exception("Opening log file '%s'", logorder) + pass + + # Setup the courtesy link to the logfn + loglink = os.path.join(tempdir, 'log.{0}'.format(task)) + logfn = os.path.join(tempdir, logbase) + if loglink: + bb.utils.remove(loglink) + + try: + os.symlink(logbase, loglink) + except OSError: + pass + + prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True) + postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True) + + class ErrorCheckHandler(logging.Handler): + def __init__(self): + self.triggered = False + logging.Handler.__init__(self, logging.ERROR) + def emit(self, record): + if getattr(record, 'forcelog', False): + self.triggered = False + else: + self.triggered = True + + # Handle logfiles + si = open('/dev/null', 'r') + try: + bb.utils.mkdirhier(os.path.dirname(logfn)) + logfile = open(logfn, 'w') + except OSError: + logger.exception("Opening log file '%s'", logfn) + pass + + # Dup the existing fds so we dont lose them + osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()] + oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()] + ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()] + + # Replace those fds with our own + os.dup2(si.fileno(), osi[1]) + os.dup2(logfile.fileno(), oso[1]) + os.dup2(logfile.fileno(), ose[1]) + + # Ensure Python logging goes to the logfile + handler = logging.StreamHandler(logfile) + handler.setFormatter(logformatter) + # Always enable full debug output into task logfiles + handler.setLevel(logging.DEBUG - 2) + bblogger.addHandler(handler) + + errchk = ErrorCheckHandler() + bblogger.addHandler(errchk) + + localdata.setVar('BB_LOGFILE', logfn) + localdata.setVar('BB_RUNTASK', task) + localdata.setVar('BB_TASK_LOGGER', bblogger) + + flags = localdata.getVarFlags(task) + + try: + try: + event.fire(TaskStarted(task, logfn, flags, localdata), localdata) + except (bb.BBHandledException, SystemExit): + return 1 + except FuncFailed as exc: + logger.error(str(exc)) + return 1 + + try: + for func in (prefuncs or '').split(): + exec_func(func, localdata) + exec_func(task, localdata) + for func in (postfuncs or '').split(): + exec_func(func, localdata) + except FuncFailed as exc: + if quieterr: + event.fire(TaskFailedSilent(task, logfn, localdata), localdata) + else: + errprinted = errchk.triggered + logger.error(str(exc)) + event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata) + return 1 + except bb.BBHandledException: + event.fire(TaskFailed(task, logfn, localdata, True), localdata) + return 1 + finally: + sys.stdout.flush() + sys.stderr.flush() + + bblogger.removeHandler(handler) + + # Restore the backup fds + os.dup2(osi[0], osi[1]) + os.dup2(oso[0], oso[1]) + os.dup2(ose[0], ose[1]) + + # Close the backup fds + os.close(osi[0]) + os.close(oso[0]) + os.close(ose[0]) + si.close() + + logfile.close() + if os.path.exists(logfn) and os.path.getsize(logfn) == 0: + logger.debug(2, "Zero size logfn %s, removing", logfn) + bb.utils.remove(logfn) + bb.utils.remove(loglink) + event.fire(TaskSucceeded(task, logfn, localdata), localdata) + + if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False): + make_stamp(task, localdata) + + return 0 + +def exec_task(fn, task, d, profile = False): + try: + quieterr = False + if d.getVarFlag(task, "quieterrors", False) is not None: + quieterr = True + + if profile: + profname = "profile-%s.log" % (d.getVar("PN") + "-" + task) + try: + import cProfile as profile + except: + import profile + prof = profile.Profile() + ret = profile.Profile.runcall(prof, _exec_task, fn, task, d, quieterr) + prof.dump_stats(profname) + bb.utils.process_profilelog(profname) + + return ret + else: + return _exec_task(fn, task, d, quieterr) + + except Exception: + from traceback import format_exc + if not quieterr: + logger.error("Build of %s failed" % (task)) + logger.error(format_exc()) + failedevent = TaskFailed(task, None, d, True) + event.fire(failedevent, d) + return 1 + +def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False): + """ + Internal stamp helper function + Makes sure the stamp directory exists + Returns the stamp path+filename + + In the bitbake core, d can be a CacheData and file_name will be set. + When called in task context, d will be a data store, file_name will not be set + """ + taskflagname = taskname + if taskname.endswith("_setscene") and taskname != "do_setscene": + taskflagname = taskname.replace("_setscene", "") + + if file_name: + stamp = d.stamp[file_name] + extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or "" + else: + stamp = d.getVar('STAMP') + file_name = d.getVar('BB_FILENAME') + extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or "" + + if baseonly: + return stamp + if noextra: + extrainfo = "" + + if not stamp: + return + + stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo) + + stampdir = os.path.dirname(stamp) + if cached_mtime_noerror(stampdir) == 0: + bb.utils.mkdirhier(stampdir) + + return stamp + +def stamp_cleanmask_internal(taskname, d, file_name): + """ + Internal stamp helper function to generate stamp cleaning mask + Returns the stamp path+filename + + In the bitbake core, d can be a CacheData and file_name will be set. + When called in task context, d will be a data store, file_name will not be set + """ + taskflagname = taskname + if taskname.endswith("_setscene") and taskname != "do_setscene": + taskflagname = taskname.replace("_setscene", "") + + if file_name: + stamp = d.stampclean[file_name] + extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or "" + else: + stamp = d.getVar('STAMPCLEAN') + file_name = d.getVar('BB_FILENAME') + extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or "" + + if not stamp: + return [] + + cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo) + + return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")] + +def make_stamp(task, d, file_name = None): + """ + Creates/updates a stamp for a given task + (d can be a data dict or dataCache) + """ + cleanmask = stamp_cleanmask_internal(task, d, file_name) + for mask in cleanmask: + for name in glob.glob(mask): + # Preserve sigdata files in the stamps directory + if "sigdata" in name or "sigbasedata" in name: + continue + # Preserve taint files in the stamps directory + if name.endswith('.taint'): + continue + os.unlink(name) + + stamp = stamp_internal(task, d, file_name) + # Remove the file and recreate to force timestamp + # change on broken NFS filesystems + if stamp: + bb.utils.remove(stamp) + open(stamp, "w").close() + + # If we're in task context, write out a signature file for each task + # as it completes + if not task.endswith("_setscene") and task != "do_setscene" and not file_name: + stampbase = stamp_internal(task, d, None, True) + file_name = d.getVar('BB_FILENAME') + bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True) + +def del_stamp(task, d, file_name = None): + """ + Removes a stamp for a given task + (d can be a data dict or dataCache) + """ + stamp = stamp_internal(task, d, file_name) + bb.utils.remove(stamp) + +def write_taint(task, d, file_name = None): + """ + Creates a "taint" file which will force the specified task and its + dependents to be re-run the next time by influencing the value of its + taskhash. + (d can be a data dict or dataCache) + """ + import uuid + if file_name: + taintfn = d.stamp[file_name] + '.' + task + '.taint' + else: + taintfn = d.getVar('STAMP') + '.' + task + '.taint' + bb.utils.mkdirhier(os.path.dirname(taintfn)) + # The specific content of the taint file is not really important, + # we just need it to be random, so a random UUID is used + with open(taintfn, 'w') as taintf: + taintf.write(str(uuid.uuid4())) + +def stampfile(taskname, d, file_name = None, noextra=False): + """ + Return the stamp for a given task + (d can be a data dict or dataCache) + """ + return stamp_internal(taskname, d, file_name, noextra=noextra) + +def add_tasks(tasklist, d): + task_deps = d.getVar('_task_deps', False) + if not task_deps: + task_deps = {} + if not 'tasks' in task_deps: + task_deps['tasks'] = [] + if not 'parents' in task_deps: + task_deps['parents'] = {} + + for task in tasklist: + task = d.expand(task) + + d.setVarFlag(task, 'task', 1) + + if not task in task_deps['tasks']: + task_deps['tasks'].append(task) + + flags = d.getVarFlags(task) + def getTask(name): + if not name in task_deps: + task_deps[name] = {} + if name in flags: + deptask = d.expand(flags[name]) + task_deps[name][task] = deptask + getTask('depends') + getTask('rdepends') + getTask('deptask') + getTask('rdeptask') + getTask('recrdeptask') + getTask('recideptask') + getTask('nostamp') + getTask('fakeroot') + getTask('noexec') + getTask('umask') + task_deps['parents'][task] = [] + if 'deps' in flags: + for dep in flags['deps']: + dep = d.expand(dep) + task_deps['parents'][task].append(dep) + + # don't assume holding a reference + d.setVar('_task_deps', task_deps) + +def addtask(task, before, after, d): + if task[:3] != "do_": + task = "do_" + task + + d.setVarFlag(task, "task", 1) + bbtasks = d.getVar('__BBTASKS', False) or [] + if task not in bbtasks: + bbtasks.append(task) + d.setVar('__BBTASKS', bbtasks) + + existing = d.getVarFlag(task, "deps", False) or [] + if after is not None: + # set up deps for function + for entry in after.split(): + if entry not in existing: + existing.append(entry) + d.setVarFlag(task, "deps", existing) + if before is not None: + # set up things that depend on this func + for entry in before.split(): + existing = d.getVarFlag(entry, "deps", False) or [] + if task not in existing: + d.setVarFlag(entry, "deps", [task] + existing) + +def deltask(task, d): + if task[:3] != "do_": + task = "do_" + task + + bbtasks = d.getVar('__BBTASKS', False) or [] + if task in bbtasks: + bbtasks.remove(task) + d.delVarFlag(task, 'task') + d.setVar('__BBTASKS', bbtasks) + + d.delVarFlag(task, 'deps') + for bbtask in d.getVar('__BBTASKS', False) or []: + deps = d.getVarFlag(bbtask, 'deps', False) or [] + if task in deps: + deps.remove(task) + d.setVarFlag(bbtask, 'deps', deps) + +def preceedtask(task, with_recrdeptasks, d): + """ + Returns a set of tasks in the current recipe which were specified as + precondition by the task itself ("after") or which listed themselves + as precondition ("before"). Preceeding tasks specified via the + "recrdeptask" are included in the result only if requested. Beware + that this may lead to the task itself being listed. + """ + preceed = set() + + # Ignore tasks which don't exist + tasks = d.getVar('__BBTASKS', False) + if task not in tasks: + return preceed + + preceed.update(d.getVarFlag(task, 'deps') or []) + if with_recrdeptasks: + recrdeptask = d.getVarFlag(task, 'recrdeptask') + if recrdeptask: + preceed.update(recrdeptask.split()) + return preceed + +def tasksbetween(task_start, task_end, d): + """ + Return the list of tasks between two tasks in the current recipe, + where task_start is to start at and task_end is the task to end at + (and task_end has a dependency chain back to task_start). + """ + outtasks = [] + tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys())) + def follow_chain(task, endtask, chain=None): + if not chain: + chain = [] + chain.append(task) + for othertask in tasks: + if othertask == task: + continue + if task == endtask: + for ctask in chain: + if ctask not in outtasks: + outtasks.append(ctask) + else: + deps = d.getVarFlag(othertask, 'deps', False) + if task in deps: + follow_chain(othertask, endtask, chain) + chain.pop() + follow_chain(task_start, task_end) + return outtasks diff --git a/poky/bitbake/lib/bb/cache.py b/poky/bitbake/lib/bb/cache.py new file mode 100644 index 0000000000..168a77ac0c --- /dev/null +++ b/poky/bitbake/lib/bb/cache.py @@ -0,0 +1,891 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake Cache implementation +# +# Caching of bitbake variables before task execution + +# Copyright (C) 2006 Richard Purdie +# Copyright (C) 2012 Intel Corporation + +# but small sections based on code from bin/bitbake: +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import sys +import logging +import pickle +from collections import defaultdict +import bb.utils + +logger = logging.getLogger("BitBake.Cache") + +__cache_version__ = "151" + +def getCacheFile(path, filename, data_hash): + return os.path.join(path, filename + "." + data_hash) + +# RecipeInfoCommon defines common data retrieving methods +# from meta data for caches. CoreRecipeInfo as well as other +# Extra RecipeInfo needs to inherit this class +class RecipeInfoCommon(object): + + @classmethod + def listvar(cls, var, metadata): + return cls.getvar(var, metadata).split() + + @classmethod + def intvar(cls, var, metadata): + return int(cls.getvar(var, metadata) or 0) + + @classmethod + def depvar(cls, var, metadata): + return bb.utils.explode_deps(cls.getvar(var, metadata)) + + @classmethod + def pkgvar(cls, var, packages, metadata): + return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata)) + for pkg in packages) + + @classmethod + def taskvar(cls, var, tasks, metadata): + return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata)) + for task in tasks) + + @classmethod + def flaglist(cls, flag, varlist, metadata, squash=False): + out_dict = dict((var, metadata.getVarFlag(var, flag)) + for var in varlist) + if squash: + return dict((k,v) for (k,v) in out_dict.items() if v) + else: + return out_dict + + @classmethod + def getvar(cls, var, metadata, expand = True): + return metadata.getVar(var, expand) or '' + + +class CoreRecipeInfo(RecipeInfoCommon): + __slots__ = () + + cachefile = "bb_cache.dat" + + def __init__(self, filename, metadata): + self.file_depends = metadata.getVar('__depends', False) + self.timestamp = bb.parse.cached_mtime(filename) + self.variants = self.listvar('__VARIANTS', metadata) + [''] + self.appends = self.listvar('__BBAPPEND', metadata) + self.nocache = self.getvar('BB_DONT_CACHE', metadata) + + self.skipreason = self.getvar('__SKIPPED', metadata) + if self.skipreason: + self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0] + self.skipped = True + self.provides = self.depvar('PROVIDES', metadata) + self.rprovides = self.depvar('RPROVIDES', metadata) + return + + self.tasks = metadata.getVar('__BBTASKS', False) + + self.pn = self.getvar('PN', metadata) + self.packages = self.listvar('PACKAGES', metadata) + if not self.packages: + self.packages.append(self.pn) + + self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata) + self.hashfilename = self.getvar('BB_HASHFILENAME', metadata) + + self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}} + + self.skipped = False + self.pe = self.getvar('PE', metadata) + self.pv = self.getvar('PV', metadata) + self.pr = self.getvar('PR', metadata) + self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata) + self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata) + self.stamp = self.getvar('STAMP', metadata) + self.stampclean = self.getvar('STAMPCLEAN', metadata) + self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata) + self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True) + self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata) + self.depends = self.depvar('DEPENDS', metadata) + self.provides = self.depvar('PROVIDES', metadata) + self.rdepends = self.depvar('RDEPENDS', metadata) + self.rprovides = self.depvar('RPROVIDES', metadata) + self.rrecommends = self.depvar('RRECOMMENDS', metadata) + self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata) + self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata) + self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata) + self.inherits = self.getvar('__inherit_cache', metadata, expand=False) + self.fakerootenv = self.getvar('FAKEROOTENV', metadata) + self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata) + self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata) + self.extradepsfunc = self.getvar('calculate_extra_depends', metadata) + + @classmethod + def init_cacheData(cls, cachedata): + # CacheData in Core RecipeInfo Class + cachedata.task_deps = {} + cachedata.pkg_fn = {} + cachedata.pkg_pn = defaultdict(list) + cachedata.pkg_pepvpr = {} + cachedata.pkg_dp = {} + + cachedata.stamp = {} + cachedata.stampclean = {} + cachedata.stamp_extrainfo = {} + cachedata.file_checksums = {} + cachedata.fn_provides = {} + cachedata.pn_provides = defaultdict(list) + cachedata.all_depends = [] + + cachedata.deps = defaultdict(list) + cachedata.packages = defaultdict(list) + cachedata.providers = defaultdict(list) + cachedata.rproviders = defaultdict(list) + cachedata.packages_dynamic = defaultdict(list) + + cachedata.rundeps = defaultdict(lambda: defaultdict(list)) + cachedata.runrecs = defaultdict(lambda: defaultdict(list)) + cachedata.possible_world = [] + cachedata.universe_target = [] + cachedata.hashfn = {} + + cachedata.basetaskhash = {} + cachedata.inherits = {} + cachedata.fakerootenv = {} + cachedata.fakerootnoenv = {} + cachedata.fakerootdirs = {} + cachedata.extradepsfunc = {} + + def add_cacheData(self, cachedata, fn): + cachedata.task_deps[fn] = self.task_deps + cachedata.pkg_fn[fn] = self.pn + cachedata.pkg_pn[self.pn].append(fn) + cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr) + cachedata.pkg_dp[fn] = self.defaultpref + cachedata.stamp[fn] = self.stamp + cachedata.stampclean[fn] = self.stampclean + cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo + cachedata.file_checksums[fn] = self.file_checksums + + provides = [self.pn] + for provide in self.provides: + if provide not in provides: + provides.append(provide) + cachedata.fn_provides[fn] = provides + + for provide in provides: + cachedata.providers[provide].append(fn) + if provide not in cachedata.pn_provides[self.pn]: + cachedata.pn_provides[self.pn].append(provide) + + for dep in self.depends: + if dep not in cachedata.deps[fn]: + cachedata.deps[fn].append(dep) + if dep not in cachedata.all_depends: + cachedata.all_depends.append(dep) + + rprovides = self.rprovides + for package in self.packages: + cachedata.packages[package].append(fn) + rprovides += self.rprovides_pkg[package] + + for rprovide in rprovides: + if fn not in cachedata.rproviders[rprovide]: + cachedata.rproviders[rprovide].append(fn) + + for package in self.packages_dynamic: + cachedata.packages_dynamic[package].append(fn) + + # Build hash of runtime depends and recommends + for package in self.packages: + cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package] + cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package] + + # Collect files we may need for possible world-dep + # calculations + if self.not_world: + logger.debug(1, "EXCLUDE FROM WORLD: %s", fn) + else: + cachedata.possible_world.append(fn) + + # create a collection of all targets for sanity checking + # tasks, such as upstream versions, license, and tools for + # task and image creation. + cachedata.universe_target.append(self.pn) + + cachedata.hashfn[fn] = self.hashfilename + for task, taskhash in self.basetaskhashes.items(): + identifier = '%s.%s' % (fn, task) + cachedata.basetaskhash[identifier] = taskhash + + cachedata.inherits[fn] = self.inherits + cachedata.fakerootenv[fn] = self.fakerootenv + cachedata.fakerootnoenv[fn] = self.fakerootnoenv + cachedata.fakerootdirs[fn] = self.fakerootdirs + cachedata.extradepsfunc[fn] = self.extradepsfunc + +def virtualfn2realfn(virtualfn): + """ + Convert a virtual file name to a real one + the associated subclass keyword + """ + mc = "" + if virtualfn.startswith('multiconfig:'): + elems = virtualfn.split(':') + mc = elems[1] + virtualfn = ":".join(elems[2:]) + + fn = virtualfn + cls = "" + if virtualfn.startswith('virtual:'): + elems = virtualfn.split(':') + cls = ":".join(elems[1:-1]) + fn = elems[-1] + + return (fn, cls, mc) + +def realfn2virtual(realfn, cls, mc): + """ + Convert a real filename + the associated subclass keyword to a virtual filename + """ + if cls: + realfn = "virtual:" + cls + ":" + realfn + if mc: + realfn = "multiconfig:" + mc + ":" + realfn + return realfn + +def variant2virtual(realfn, variant): + """ + Convert a real filename + the associated subclass keyword to a virtual filename + """ + if variant == "": + return realfn + if variant.startswith("multiconfig:"): + elems = variant.split(":") + if elems[2]: + return "multiconfig:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn + return "multiconfig:" + elems[1] + ":" + realfn + return "virtual:" + variant + ":" + realfn + +def parse_recipe(bb_data, bbfile, appends, mc=''): + """ + Parse a recipe + """ + + chdir_back = False + + bb_data.setVar("__BBMULTICONFIG", mc) + + # expand tmpdir to include this topdir + bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "") + bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) + oldpath = os.path.abspath(os.getcwd()) + bb.parse.cached_mtime_noerror(bbfile_loc) + + # The ConfHandler first looks if there is a TOPDIR and if not + # then it would call getcwd(). + # Previously, we chdir()ed to bbfile_loc, called the handler + # and finally chdir()ed back, a couple of thousand times. We now + # just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet. + if not bb_data.getVar('TOPDIR', False): + chdir_back = True + bb_data.setVar('TOPDIR', bbfile_loc) + try: + if appends: + bb_data.setVar('__BBAPPEND', " ".join(appends)) + bb_data = bb.parse.handle(bbfile, bb_data) + if chdir_back: + os.chdir(oldpath) + return bb_data + except: + if chdir_back: + os.chdir(oldpath) + raise + + + +class NoCache(object): + + def __init__(self, databuilder): + self.databuilder = databuilder + self.data = databuilder.data + + def loadDataFull(self, virtualfn, appends): + """ + Return a complete set of data for fn. + To do this, we need to parse the file. + """ + logger.debug(1, "Parsing %s (full)" % virtualfn) + (fn, virtual, mc) = virtualfn2realfn(virtualfn) + bb_data = self.load_bbfile(virtualfn, appends, virtonly=True) + return bb_data[virtual] + + def load_bbfile(self, bbfile, appends, virtonly = False): + """ + Load and parse one .bb build file + Return the data and whether parsing resulted in the file being skipped + """ + + if virtonly: + (bbfile, virtual, mc) = virtualfn2realfn(bbfile) + bb_data = self.databuilder.mcdata[mc].createCopy() + bb_data.setVar("__ONLYFINALISE", virtual or "default") + datastores = parse_recipe(bb_data, bbfile, appends, mc) + return datastores + + bb_data = self.data.createCopy() + datastores = parse_recipe(bb_data, bbfile, appends) + + for mc in self.databuilder.mcdata: + if not mc: + continue + bb_data = self.databuilder.mcdata[mc].createCopy() + newstores = parse_recipe(bb_data, bbfile, appends, mc) + for ns in newstores: + datastores["multiconfig:%s:%s" % (mc, ns)] = newstores[ns] + + return datastores + +class Cache(NoCache): + """ + BitBake Cache implementation + """ + + def __init__(self, databuilder, data_hash, caches_array): + super().__init__(databuilder) + data = databuilder.data + + # Pass caches_array information into Cache Constructor + # It will be used later for deciding whether we + # need extra cache file dump/load support + self.caches_array = caches_array + self.cachedir = data.getVar("CACHE") + self.clean = set() + self.checked = set() + self.depends_cache = {} + self.data_fn = None + self.cacheclean = True + self.data_hash = data_hash + + if self.cachedir in [None, '']: + self.has_cache = False + logger.info("Not using a cache. " + "Set CACHE = to enable.") + return + + self.has_cache = True + self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash) + + logger.debug(1, "Cache dir: %s", self.cachedir) + bb.utils.mkdirhier(self.cachedir) + + cache_ok = True + if self.caches_array: + for cache_class in self.caches_array: + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + cache_ok = cache_ok and os.path.exists(cachefile) + cache_class.init_cacheData(self) + if cache_ok: + self.load_cachefile() + elif os.path.isfile(self.cachefile): + logger.info("Out of date cache found, rebuilding...") + else: + logger.debug(1, "Cache file %s not found, building..." % self.cachefile) + + def load_cachefile(self): + cachesize = 0 + previous_progress = 0 + previous_percent = 0 + + # Calculate the correct cachesize of all those cache files + for cache_class in self.caches_array: + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + with open(cachefile, "rb") as cachefile: + cachesize += os.fstat(cachefile.fileno()).st_size + + bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data) + + for cache_class in self.caches_array: + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + logger.debug(1, 'Loading cache file: %s' % cachefile) + with open(cachefile, "rb") as cachefile: + pickled = pickle.Unpickler(cachefile) + # Check cache version information + try: + cache_ver = pickled.load() + bitbake_ver = pickled.load() + except Exception: + logger.info('Invalid cache, rebuilding...') + return + + if cache_ver != __cache_version__: + logger.info('Cache version mismatch, rebuilding...') + return + elif bitbake_ver != bb.__version__: + logger.info('Bitbake version mismatch, rebuilding...') + return + + # Load the rest of the cache file + current_progress = 0 + while cachefile: + try: + key = pickled.load() + value = pickled.load() + except Exception: + break + if not isinstance(key, str): + bb.warn("%s from extras cache is not a string?" % key) + break + if not isinstance(value, RecipeInfoCommon): + bb.warn("%s from extras cache is not a RecipeInfoCommon class?" % value) + break + + if key in self.depends_cache: + self.depends_cache[key].append(value) + else: + self.depends_cache[key] = [value] + # only fire events on even percentage boundaries + current_progress = cachefile.tell() + previous_progress + if current_progress > cachesize: + # we might have calculated incorrect total size because a file + # might've been written out just after we checked its size + cachesize = current_progress + current_percent = 100 * current_progress / cachesize + if current_percent > previous_percent: + previous_percent = current_percent + bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize), + self.data) + + previous_progress += current_progress + + # Note: depends cache number is corresponding to the parsing file numbers. + # The same file has several caches, still regarded as one item in the cache + bb.event.fire(bb.event.CacheLoadCompleted(cachesize, + len(self.depends_cache)), + self.data) + + def parse(self, filename, appends): + """Parse the specified filename, returning the recipe information""" + logger.debug(1, "Parsing %s", filename) + infos = [] + datastores = self.load_bbfile(filename, appends) + depends = [] + variants = [] + # Process the "real" fn last so we can store variants list + for variant, data in sorted(datastores.items(), + key=lambda i: i[0], + reverse=True): + virtualfn = variant2virtual(filename, variant) + variants.append(variant) + depends = depends + (data.getVar("__depends", False) or []) + if depends and not variant: + data.setVar("__depends", depends) + if virtualfn == filename: + data.setVar("__VARIANTS", " ".join(variants)) + info_array = [] + for cache_class in self.caches_array: + info = cache_class(filename, data) + info_array.append(info) + infos.append((virtualfn, info_array)) + + return infos + + def load(self, filename, appends): + """Obtain the recipe information for the specified filename, + using cached values if available, otherwise parsing. + + Note that if it does parse to obtain the info, it will not + automatically add the information to the cache or to your + CacheData. Use the add or add_info method to do so after + running this, or use loadData instead.""" + cached = self.cacheValid(filename, appends) + if cached: + infos = [] + # info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo] + info_array = self.depends_cache[filename] + for variant in info_array[0].variants: + virtualfn = variant2virtual(filename, variant) + infos.append((virtualfn, self.depends_cache[virtualfn])) + else: + return self.parse(filename, appends, configdata, self.caches_array) + + return cached, infos + + def loadData(self, fn, appends, cacheData): + """Load the recipe info for the specified filename, + parsing and adding to the cache if necessary, and adding + the recipe information to the supplied CacheData instance.""" + skipped, virtuals = 0, 0 + + cached, infos = self.load(fn, appends) + for virtualfn, info_array in infos: + if info_array[0].skipped: + logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason) + skipped += 1 + else: + self.add_info(virtualfn, info_array, cacheData, not cached) + virtuals += 1 + + return cached, skipped, virtuals + + def cacheValid(self, fn, appends): + """ + Is the cache valid for fn? + Fast version, no timestamps checked. + """ + if fn not in self.checked: + self.cacheValidUpdate(fn, appends) + + # Is cache enabled? + if not self.has_cache: + return False + if fn in self.clean: + return True + return False + + def cacheValidUpdate(self, fn, appends): + """ + Is the cache valid for fn? + Make thorough (slower) checks including timestamps. + """ + # Is cache enabled? + if not self.has_cache: + return False + + self.checked.add(fn) + + # File isn't in depends_cache + if not fn in self.depends_cache: + logger.debug(2, "Cache: %s is not cached", fn) + return False + + mtime = bb.parse.cached_mtime_noerror(fn) + + # Check file still exists + if mtime == 0: + logger.debug(2, "Cache: %s no longer exists", fn) + self.remove(fn) + return False + + info_array = self.depends_cache[fn] + # Check the file's timestamp + if mtime != info_array[0].timestamp: + logger.debug(2, "Cache: %s changed", fn) + self.remove(fn) + return False + + # Check dependencies are still valid + depends = info_array[0].file_depends + if depends: + for f, old_mtime in depends: + fmtime = bb.parse.cached_mtime_noerror(f) + # Check if file still exists + if old_mtime != 0 and fmtime == 0: + logger.debug(2, "Cache: %s's dependency %s was removed", + fn, f) + self.remove(fn) + return False + + if (fmtime != old_mtime): + logger.debug(2, "Cache: %s's dependency %s changed", + fn, f) + self.remove(fn) + return False + + if hasattr(info_array[0], 'file_checksums'): + for _, fl in info_array[0].file_checksums.items(): + fl = fl.strip() + while fl: + # A .split() would be simpler but means spaces or colons in filenames would break + a = fl.find(":True") + b = fl.find(":False") + if ((a < 0) and b) or ((b > 0) and (b < a)): + f = fl[:b+6] + fl = fl[b+7:] + elif ((b < 0) and a) or ((a > 0) and (a < b)): + f = fl[:a+5] + fl = fl[a+6:] + else: + break + fl = fl.strip() + if "*" in f: + continue + f, exist = f.split(":") + if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)): + logger.debug(2, "Cache: %s's file checksum list file %s changed", + fn, f) + self.remove(fn) + return False + + if appends != info_array[0].appends: + logger.debug(2, "Cache: appends for %s changed", fn) + logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends))) + self.remove(fn) + return False + + invalid = False + for cls in info_array[0].variants: + virtualfn = variant2virtual(fn, cls) + self.clean.add(virtualfn) + if virtualfn not in self.depends_cache: + logger.debug(2, "Cache: %s is not cached", virtualfn) + invalid = True + elif len(self.depends_cache[virtualfn]) != len(self.caches_array): + logger.debug(2, "Cache: Extra caches missing for %s?" % virtualfn) + invalid = True + + # If any one of the variants is not present, mark as invalid for all + if invalid: + for cls in info_array[0].variants: + virtualfn = variant2virtual(fn, cls) + if virtualfn in self.clean: + logger.debug(2, "Cache: Removing %s from cache", virtualfn) + self.clean.remove(virtualfn) + if fn in self.clean: + logger.debug(2, "Cache: Marking %s as not clean", fn) + self.clean.remove(fn) + return False + + self.clean.add(fn) + return True + + def remove(self, fn): + """ + Remove a fn from the cache + Called from the parser in error cases + """ + if fn in self.depends_cache: + logger.debug(1, "Removing %s from cache", fn) + del self.depends_cache[fn] + if fn in self.clean: + logger.debug(1, "Marking %s as unclean", fn) + self.clean.remove(fn) + + def sync(self): + """ + Save the cache + Called from the parser when complete (or exiting) + """ + + if not self.has_cache: + return + + if self.cacheclean: + logger.debug(2, "Cache is clean, not saving.") + return + + for cache_class in self.caches_array: + cache_class_name = cache_class.__name__ + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + with open(cachefile, "wb") as f: + p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL) + p.dump(__cache_version__) + p.dump(bb.__version__) + + for key, info_array in self.depends_cache.items(): + for info in info_array: + if isinstance(info, RecipeInfoCommon) and info.__class__.__name__ == cache_class_name: + p.dump(key) + p.dump(info) + + del self.depends_cache + + @staticmethod + def mtime(cachefile): + return bb.parse.cached_mtime_noerror(cachefile) + + def add_info(self, filename, info_array, cacheData, parsed=None, watcher=None): + if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped): + cacheData.add_from_recipeinfo(filename, info_array) + + if watcher: + watcher(info_array[0].file_depends) + + if not self.has_cache: + return + + if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache: + if parsed: + self.cacheclean = False + self.depends_cache[filename] = info_array + + def add(self, file_name, data, cacheData, parsed=None): + """ + Save data we need into the cache + """ + + realfn = virtualfn2realfn(file_name)[0] + + info_array = [] + for cache_class in self.caches_array: + info_array.append(cache_class(realfn, data)) + self.add_info(file_name, info_array, cacheData, parsed) + + +def init(cooker): + """ + The Objective: Cache the minimum amount of data possible yet get to the + stage of building packages (i.e. tryBuild) without reparsing any .bb files. + + To do this, we intercept getVar calls and only cache the variables we see + being accessed. We rely on the cache getVar calls being made for all + variables bitbake might need to use to reach this stage. For each cached + file we need to track: + + * Its mtime + * The mtimes of all its dependencies + * Whether it caused a parse.SkipRecipe exception + + Files causing parsing errors are evicted from the cache. + + """ + return Cache(cooker.configuration.data, cooker.configuration.data_hash) + + +class CacheData(object): + """ + The data structures we compile from the cached data + """ + + def __init__(self, caches_array): + self.caches_array = caches_array + for cache_class in self.caches_array: + if not issubclass(cache_class, RecipeInfoCommon): + bb.error("Extra cache data class %s should subclass RecipeInfoCommon class" % cache_class) + cache_class.init_cacheData(self) + + # Direct cache variables + self.task_queues = {} + self.preferred = {} + self.tasks = {} + # Indirect Cache variables (set elsewhere) + self.ignored_dependencies = [] + self.world_target = set() + self.bbfile_priority = {} + + def add_from_recipeinfo(self, fn, info_array): + for info in info_array: + info.add_cacheData(self, fn) + +class MultiProcessCache(object): + """ + BitBake multi-process cache implementation + + Used by the codeparser & file checksum caches + """ + + def __init__(self): + self.cachefile = None + self.cachedata = self.create_cachedata() + self.cachedata_extras = self.create_cachedata() + + def init_cache(self, d, cache_file_name=None): + cachedir = (d.getVar("PERSISTENT_DIR") or + d.getVar("CACHE")) + if cachedir in [None, '']: + return + bb.utils.mkdirhier(cachedir) + self.cachefile = os.path.join(cachedir, + cache_file_name or self.__class__.cache_file_name) + logger.debug(1, "Using cache in '%s'", self.cachefile) + + glf = bb.utils.lockfile(self.cachefile + ".lock") + + try: + with open(self.cachefile, "rb") as f: + p = pickle.Unpickler(f) + data, version = p.load() + except: + bb.utils.unlockfile(glf) + return + + bb.utils.unlockfile(glf) + + if version != self.__class__.CACHE_VERSION: + return + + self.cachedata = data + + def create_cachedata(self): + data = [{}] + return data + + def save_extras(self): + if not self.cachefile: + return + + glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True) + + i = os.getpid() + lf = None + while not lf: + lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False) + if not lf or os.path.exists(self.cachefile + "-" + str(i)): + if lf: + bb.utils.unlockfile(lf) + lf = None + i = i + 1 + continue + + with open(self.cachefile + "-" + str(i), "wb") as f: + p = pickle.Pickler(f, -1) + p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION]) + + bb.utils.unlockfile(lf) + bb.utils.unlockfile(glf) + + def merge_data(self, source, dest): + for j in range(0,len(dest)): + for h in source[j]: + if h not in dest[j]: + dest[j][h] = source[j][h] + + def save_merge(self): + if not self.cachefile: + return + + glf = bb.utils.lockfile(self.cachefile + ".lock") + + data = self.cachedata + + for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]: + f = os.path.join(os.path.dirname(self.cachefile), f) + try: + with open(f, "rb") as fd: + p = pickle.Unpickler(fd) + extradata, version = p.load() + except (IOError, EOFError): + os.unlink(f) + continue + + if version != self.__class__.CACHE_VERSION: + os.unlink(f) + continue + + self.merge_data(extradata, data) + os.unlink(f) + + with open(self.cachefile, "wb") as f: + p = pickle.Pickler(f, -1) + p.dump([data, self.__class__.CACHE_VERSION]) + + bb.utils.unlockfile(glf) diff --git a/poky/bitbake/lib/bb/cache_extra.py b/poky/bitbake/lib/bb/cache_extra.py new file mode 100644 index 0000000000..83f4959d6c --- /dev/null +++ b/poky/bitbake/lib/bb/cache_extra.py @@ -0,0 +1,75 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Extra RecipeInfo will be all defined in this file. Currently, +# Only Hob (Image Creator) Requests some extra fields. So +# HobRecipeInfo is defined. It's named HobRecipeInfo because it +# is introduced by 'hob'. Users could also introduce other +# RecipeInfo or simply use those already defined RecipeInfo. +# In the following patch, this newly defined new extra RecipeInfo +# will be dynamically loaded and used for loading/saving the extra +# cache fields + +# Copyright (C) 2011, Intel Corporation. All rights reserved. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from bb.cache import RecipeInfoCommon + +class HobRecipeInfo(RecipeInfoCommon): + __slots__ = () + + classname = "HobRecipeInfo" + # please override this member with the correct data cache file + # such as (bb_cache.dat, bb_extracache_hob.dat) + cachefile = "bb_extracache_" + classname +".dat" + + # override this member with the list of extra cache fields + # that this class will provide + cachefields = ['summary', 'license', 'section', + 'description', 'homepage', 'bugtracker', + 'prevision', 'files_info'] + + def __init__(self, filename, metadata): + + self.summary = self.getvar('SUMMARY', metadata) + self.license = self.getvar('LICENSE', metadata) + self.section = self.getvar('SECTION', metadata) + self.description = self.getvar('DESCRIPTION', metadata) + self.homepage = self.getvar('HOMEPAGE', metadata) + self.bugtracker = self.getvar('BUGTRACKER', metadata) + self.prevision = self.getvar('PR', metadata) + self.files_info = self.getvar('FILES_INFO', metadata) + + @classmethod + def init_cacheData(cls, cachedata): + # CacheData in Hob RecipeInfo Class + cachedata.summary = {} + cachedata.license = {} + cachedata.section = {} + cachedata.description = {} + cachedata.homepage = {} + cachedata.bugtracker = {} + cachedata.prevision = {} + cachedata.files_info = {} + + def add_cacheData(self, cachedata, fn): + cachedata.summary[fn] = self.summary + cachedata.license[fn] = self.license + cachedata.section[fn] = self.section + cachedata.description[fn] = self.description + cachedata.homepage[fn] = self.homepage + cachedata.bugtracker[fn] = self.bugtracker + cachedata.prevision[fn] = self.prevision + cachedata.files_info[fn] = self.files_info diff --git a/poky/bitbake/lib/bb/checksum.py b/poky/bitbake/lib/bb/checksum.py new file mode 100644 index 0000000000..84289208f4 --- /dev/null +++ b/poky/bitbake/lib/bb/checksum.py @@ -0,0 +1,134 @@ +# Local file checksum cache implementation +# +# Copyright (C) 2012 Intel Corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import glob +import operator +import os +import stat +import pickle +import bb.utils +import logging +from bb.cache import MultiProcessCache + +logger = logging.getLogger("BitBake.Cache") + +# mtime cache (non-persistent) +# based upon the assumption that files do not change during bitbake run +class FileMtimeCache(object): + cache = {} + + def cached_mtime(self, f): + if f not in self.cache: + self.cache[f] = os.stat(f)[stat.ST_MTIME] + return self.cache[f] + + def cached_mtime_noerror(self, f): + if f not in self.cache: + try: + self.cache[f] = os.stat(f)[stat.ST_MTIME] + except OSError: + return 0 + return self.cache[f] + + def update_mtime(self, f): + self.cache[f] = os.stat(f)[stat.ST_MTIME] + return self.cache[f] + + def clear(self): + self.cache.clear() + +# Checksum + mtime cache (persistent) +class FileChecksumCache(MultiProcessCache): + cache_file_name = "local_file_checksum_cache.dat" + CACHE_VERSION = 1 + + def __init__(self): + self.mtime_cache = FileMtimeCache() + MultiProcessCache.__init__(self) + + def get_checksum(self, f): + entry = self.cachedata[0].get(f) + cmtime = self.mtime_cache.cached_mtime(f) + if entry: + (mtime, hashval) = entry + if cmtime == mtime: + return hashval + else: + bb.debug(2, "file %s changed mtime, recompute checksum" % f) + + hashval = bb.utils.md5_file(f) + self.cachedata_extras[0][f] = (cmtime, hashval) + return hashval + + def merge_data(self, source, dest): + for h in source[0]: + if h in dest: + (smtime, _) = source[0][h] + (dmtime, _) = dest[0][h] + if smtime > dmtime: + dest[0][h] = source[0][h] + else: + dest[0][h] = source[0][h] + + def get_checksums(self, filelist, pn): + """Get checksums for a list of files""" + + def checksum_file(f): + try: + checksum = self.get_checksum(f) + except OSError as e: + bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e)) + return None + return checksum + + def checksum_dir(pth): + # Handle directories recursively + dirchecksums = [] + for root, dirs, files in os.walk(pth): + for name in files: + fullpth = os.path.join(root, name) + checksum = checksum_file(fullpth) + if checksum: + dirchecksums.append((fullpth, checksum)) + return dirchecksums + + checksums = [] + for pth in filelist.split(): + exist = pth.split(":")[1] + if exist == "False": + continue + pth = pth.split(":")[0] + if '*' in pth: + # Handle globs + for f in glob.glob(pth): + if os.path.isdir(f): + if not os.path.islink(f): + checksums.extend(checksum_dir(f)) + else: + checksum = checksum_file(f) + if checksum: + checksums.append((f, checksum)) + elif os.path.isdir(pth): + if not os.path.islink(pth): + checksums.extend(checksum_dir(pth)) + else: + checksum = checksum_file(pth) + if checksum: + checksums.append((pth, checksum)) + + checksums.sort(key=operator.itemgetter(1)) + return checksums diff --git a/poky/bitbake/lib/bb/codeparser.py b/poky/bitbake/lib/bb/codeparser.py new file mode 100644 index 0000000000..530f44e578 --- /dev/null +++ b/poky/bitbake/lib/bb/codeparser.py @@ -0,0 +1,476 @@ +""" +BitBake code parser + +Parses actual code (i.e. python and shell) for functions and in-line +expressions. Used mainly to determine dependencies on other functions +and variables within the BitBake metadata. Also provides a cache for +this information in order to speed up processing. + +(Not to be confused with the code that parses the metadata itself, +see lib/bb/parse/ for that). + +NOTE: if you change how the parsers gather information you will almost +certainly need to increment CodeParserCache.CACHE_VERSION below so that +any existing codeparser cache gets invalidated. Additionally you'll need +to increment __cache_version__ in cache.py in order to ensure that old +recipe caches don't trigger "Taskhash mismatch" errors. + +""" + +import ast +import sys +import codegen +import logging +import pickle +import bb.pysh as pysh +import os.path +import bb.utils, bb.data +import hashlib +from itertools import chain +from bb.pysh import pyshyacc, pyshlex, sherrors +from bb.cache import MultiProcessCache + +logger = logging.getLogger('BitBake.CodeParser') + +def bbhash(s): + return hashlib.md5(s.encode("utf-8")).hexdigest() + +def check_indent(codestr): + """If the code is indented, add a top level piece of code to 'remove' the indentation""" + + i = 0 + while codestr[i] in ["\n", "\t", " "]: + i = i + 1 + + if i == 0: + return codestr + + if codestr[i-1] == "\t" or codestr[i-1] == " ": + if codestr[0] == "\n": + # Since we're adding a line, we need to remove one line of any empty padding + # to ensure line numbers are correct + codestr = codestr[1:] + return "if 1:\n" + codestr + + return codestr + + +# Basically pickle, in python 2.7.3 at least, does badly with data duplication +# upon pickling and unpickling. Combine this with duplicate objects and things +# are a mess. +# +# When the sets are originally created, python calls intern() on the set keys +# which significantly improves memory usage. Sadly the pickle/unpickle process +# doesn't call intern() on the keys and results in the same strings being duplicated +# in memory. This also means pickle will save the same string multiple times in +# the cache file. +# +# By having shell and python cacheline objects with setstate/getstate, we force +# the object creation through our own routine where we can call intern (via internSet). +# +# We also use hashable frozensets and ensure we use references to these so that +# duplicates can be removed, both in memory and in the resulting pickled data. +# +# By playing these games, the size of the cache file shrinks dramatically +# meaning faster load times and the reloaded cache files also consume much less +# memory. Smaller cache files, faster load times and lower memory usage is good. +# +# A custom getstate/setstate using tuples is actually worth 15% cachesize by +# avoiding duplication of the attribute names! + +class SetCache(object): + def __init__(self): + self.setcache = {} + + def internSet(self, items): + + new = [] + for i in items: + new.append(sys.intern(i)) + s = frozenset(new) + h = hash(s) + if h in self.setcache: + return self.setcache[h] + self.setcache[h] = s + return s + +codecache = SetCache() + +class pythonCacheLine(object): + def __init__(self, refs, execs, contains): + self.refs = codecache.internSet(refs) + self.execs = codecache.internSet(execs) + self.contains = {} + for c in contains: + self.contains[c] = codecache.internSet(contains[c]) + + def __getstate__(self): + return (self.refs, self.execs, self.contains) + + def __setstate__(self, state): + (refs, execs, contains) = state + self.__init__(refs, execs, contains) + def __hash__(self): + l = (hash(self.refs), hash(self.execs)) + for c in sorted(self.contains.keys()): + l = l + (c, hash(self.contains[c])) + return hash(l) + def __repr__(self): + return " ".join([str(self.refs), str(self.execs), str(self.contains)]) + + +class shellCacheLine(object): + def __init__(self, execs): + self.execs = codecache.internSet(execs) + + def __getstate__(self): + return (self.execs) + + def __setstate__(self, state): + (execs) = state + self.__init__(execs) + def __hash__(self): + return hash(self.execs) + def __repr__(self): + return str(self.execs) + +class CodeParserCache(MultiProcessCache): + cache_file_name = "bb_codeparser.dat" + # NOTE: you must increment this if you change how the parsers gather information, + # so that an existing cache gets invalidated. Additionally you'll need + # to increment __cache_version__ in cache.py in order to ensure that old + # recipe caches don't trigger "Taskhash mismatch" errors. + CACHE_VERSION = 9 + + def __init__(self): + MultiProcessCache.__init__(self) + self.pythoncache = self.cachedata[0] + self.shellcache = self.cachedata[1] + self.pythoncacheextras = self.cachedata_extras[0] + self.shellcacheextras = self.cachedata_extras[1] + + # To avoid duplication in the codeparser cache, keep + # a lookup of hashes of objects we already have + self.pythoncachelines = {} + self.shellcachelines = {} + + def newPythonCacheLine(self, refs, execs, contains): + cacheline = pythonCacheLine(refs, execs, contains) + h = hash(cacheline) + if h in self.pythoncachelines: + return self.pythoncachelines[h] + self.pythoncachelines[h] = cacheline + return cacheline + + def newShellCacheLine(self, execs): + cacheline = shellCacheLine(execs) + h = hash(cacheline) + if h in self.shellcachelines: + return self.shellcachelines[h] + self.shellcachelines[h] = cacheline + return cacheline + + def init_cache(self, d): + # Check if we already have the caches + if self.pythoncache: + return + + MultiProcessCache.init_cache(self, d) + + # cachedata gets re-assigned in the parent + self.pythoncache = self.cachedata[0] + self.shellcache = self.cachedata[1] + + def create_cachedata(self): + data = [{}, {}] + return data + +codeparsercache = CodeParserCache() + +def parser_cache_init(d): + codeparsercache.init_cache(d) + +def parser_cache_save(): + codeparsercache.save_extras() + +def parser_cache_savemerge(): + codeparsercache.save_merge() + +Logger = logging.getLoggerClass() +class BufferedLogger(Logger): + def __init__(self, name, level=0, target=None): + Logger.__init__(self, name) + self.setLevel(level) + self.buffer = [] + self.target = target + + def handle(self, record): + self.buffer.append(record) + + def flush(self): + for record in self.buffer: + if self.target.isEnabledFor(record.levelno): + self.target.handle(record) + self.buffer = [] + +class PythonParser(): + getvars = (".getVar", ".appendVar", ".prependVar") + getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag") + containsfuncs = ("bb.utils.contains", "base_contains") + containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter") + execfuncs = ("bb.build.exec_func", "bb.build.exec_task") + + def warn(self, func, arg): + """Warn about calls of bitbake APIs which pass a non-literal + argument for the variable name, as we're not able to track such + a reference. + """ + + try: + funcstr = codegen.to_source(func) + argstr = codegen.to_source(arg) + except TypeError: + self.log.debug(2, 'Failed to convert function and argument to source form') + else: + self.log.debug(1, self.unhandled_message % (funcstr, argstr)) + + def visit_Call(self, node): + name = self.called_node_name(node.func) + if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs): + if isinstance(node.args[0], ast.Str): + varname = node.args[0].s + if name in self.containsfuncs and isinstance(node.args[1], ast.Str): + if varname not in self.contains: + self.contains[varname] = set() + self.contains[varname].add(node.args[1].s) + elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str): + if varname not in self.contains: + self.contains[varname] = set() + self.contains[varname].update(node.args[1].s.split()) + elif name.endswith(self.getvarflags): + if isinstance(node.args[1], ast.Str): + self.references.add('%s[%s]' % (varname, node.args[1].s)) + else: + self.warn(node.func, node.args[1]) + else: + self.references.add(varname) + else: + self.warn(node.func, node.args[0]) + elif name and name.endswith(".expand"): + if isinstance(node.args[0], ast.Str): + value = node.args[0].s + d = bb.data.init() + parser = d.expandWithRefs(value, self.name) + self.references |= parser.references + self.execs |= parser.execs + for varname in parser.contains: + if varname not in self.contains: + self.contains[varname] = set() + self.contains[varname] |= parser.contains[varname] + elif name in self.execfuncs: + if isinstance(node.args[0], ast.Str): + self.var_execs.add(node.args[0].s) + else: + self.warn(node.func, node.args[0]) + elif name and isinstance(node.func, (ast.Name, ast.Attribute)): + self.execs.add(name) + + def called_node_name(self, node): + """Given a called node, return its original string form""" + components = [] + while node: + if isinstance(node, ast.Attribute): + components.append(node.attr) + node = node.value + elif isinstance(node, ast.Name): + components.append(node.id) + return '.'.join(reversed(components)) + else: + break + + def __init__(self, name, log): + self.name = name + self.var_execs = set() + self.contains = {} + self.execs = set() + self.references = set() + self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, log) + + self.unhandled_message = "in call of %s, argument '%s' is not a string literal" + self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message) + + def parse_python(self, node, lineno=0, filename=""): + if not node or not node.strip(): + return + + h = bbhash(str(node)) + + if h in codeparsercache.pythoncache: + self.references = set(codeparsercache.pythoncache[h].refs) + self.execs = set(codeparsercache.pythoncache[h].execs) + self.contains = {} + for i in codeparsercache.pythoncache[h].contains: + self.contains[i] = set(codeparsercache.pythoncache[h].contains[i]) + return + + if h in codeparsercache.pythoncacheextras: + self.references = set(codeparsercache.pythoncacheextras[h].refs) + self.execs = set(codeparsercache.pythoncacheextras[h].execs) + self.contains = {} + for i in codeparsercache.pythoncacheextras[h].contains: + self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i]) + return + + # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though + node = "\n" * int(lineno) + node + code = compile(check_indent(str(node)), filename, "exec", + ast.PyCF_ONLY_AST) + + for n in ast.walk(code): + if n.__class__.__name__ == "Call": + self.visit_Call(n) + + self.execs.update(self.var_execs) + + codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains) + +class ShellParser(): + def __init__(self, name, log): + self.funcdefs = set() + self.allexecs = set() + self.execs = set() + self.log = BufferedLogger('BitBake.Data.%s' % name, logging.DEBUG, log) + self.unhandled_template = "unable to handle non-literal command '%s'" + self.unhandled_template = "while parsing %s, %s" % (name, self.unhandled_template) + + def parse_shell(self, value): + """Parse the supplied shell code in a string, returning the external + commands it executes. + """ + + h = bbhash(str(value)) + + if h in codeparsercache.shellcache: + self.execs = set(codeparsercache.shellcache[h].execs) + return self.execs + + if h in codeparsercache.shellcacheextras: + self.execs = set(codeparsercache.shellcacheextras[h].execs) + return self.execs + + self._parse_shell(value) + self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs) + + codeparsercache.shellcacheextras[h] = codeparsercache.newShellCacheLine(self.execs) + + return self.execs + + def _parse_shell(self, value): + try: + tokens, _ = pyshyacc.parse(value, eof=True, debug=False) + except pyshlex.NeedMore: + raise sherrors.ShellSyntaxError("Unexpected EOF") + + self.process_tokens(tokens) + + def process_tokens(self, tokens): + """Process a supplied portion of the syntax tree as returned by + pyshyacc.parse. + """ + + def function_definition(value): + self.funcdefs.add(value.name) + return [value.body], None + + def case_clause(value): + # Element 0 of each item in the case is the list of patterns, and + # Element 1 of each item in the case is the list of commands to be + # executed when that pattern matches. + words = chain(*[item[0] for item in value.items]) + cmds = chain(*[item[1] for item in value.items]) + return cmds, words + + def if_clause(value): + main = chain(value.cond, value.if_cmds) + rest = value.else_cmds + if isinstance(rest, tuple) and rest[0] == "elif": + return chain(main, if_clause(rest[1])) + else: + return chain(main, rest) + + def simple_command(value): + return None, chain(value.words, (assign[1] for assign in value.assigns)) + + token_handlers = { + "and_or": lambda x: ((x.left, x.right), None), + "async": lambda x: ([x], None), + "brace_group": lambda x: (x.cmds, None), + "for_clause": lambda x: (x.cmds, x.items), + "function_definition": function_definition, + "if_clause": lambda x: (if_clause(x), None), + "pipeline": lambda x: (x.commands, None), + "redirect_list": lambda x: ([x.cmd], None), + "subshell": lambda x: (x.cmds, None), + "while_clause": lambda x: (chain(x.condition, x.cmds), None), + "until_clause": lambda x: (chain(x.condition, x.cmds), None), + "simple_command": simple_command, + "case_clause": case_clause, + } + + def process_token_list(tokens): + for token in tokens: + if isinstance(token, list): + process_token_list(token) + continue + name, value = token + try: + more_tokens, words = token_handlers[name](value) + except KeyError: + raise NotImplementedError("Unsupported token type " + name) + + if more_tokens: + self.process_tokens(more_tokens) + + if words: + self.process_words(words) + + process_token_list(tokens) + + def process_words(self, words): + """Process a set of 'words' in pyshyacc parlance, which includes + extraction of executed commands from $() blocks, as well as grabbing + the command name argument. + """ + + words = list(words) + for word in list(words): + wtree = pyshlex.make_wordtree(word[1]) + for part in wtree: + if not isinstance(part, list): + continue + + if part[0] in ('`', '$('): + command = pyshlex.wordtree_as_string(part[1:-1]) + self._parse_shell(command) + + if word[0] in ("cmd_name", "cmd_word"): + if word in words: + words.remove(word) + + usetoken = False + for word in words: + if word[0] in ("cmd_name", "cmd_word") or \ + (usetoken and word[0] == "TOKEN"): + if "=" in word[1]: + usetoken = True + continue + + cmd = word[1] + if cmd.startswith("$"): + self.log.debug(1, self.unhandled_template % cmd) + elif cmd == "eval": + command = " ".join(word for _, word in words[1:]) + self._parse_shell(command) + else: + self.allexecs.add(cmd) + break diff --git a/poky/bitbake/lib/bb/command.py b/poky/bitbake/lib/bb/command.py new file mode 100644 index 0000000000..6c966e3dbc --- /dev/null +++ b/poky/bitbake/lib/bb/command.py @@ -0,0 +1,765 @@ +""" +BitBake 'Command' module + +Provide an interface to interact with the bitbake server through 'commands' +""" + +# Copyright (C) 2006-2007 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +The bitbake server takes 'commands' from its UI/commandline. +Commands are either synchronous or asynchronous. +Async commands return data to the client in the form of events. +Sync commands must only return data through the function return value +and must not trigger events, directly or indirectly. +Commands are queued in a CommandQueue +""" + +from collections import OrderedDict, defaultdict + +import bb.event +import bb.cooker +import bb.remotedata + +class DataStoreConnectionHandle(object): + def __init__(self, dsindex=0): + self.dsindex = dsindex + +class CommandCompleted(bb.event.Event): + pass + +class CommandExit(bb.event.Event): + def __init__(self, exitcode): + bb.event.Event.__init__(self) + self.exitcode = int(exitcode) + +class CommandFailed(CommandExit): + def __init__(self, message): + self.error = message + CommandExit.__init__(self, 1) + def __str__(self): + return "Command execution failed: %s" % self.error + +class CommandError(Exception): + pass + +class Command: + """ + A queue of asynchronous commands for bitbake + """ + def __init__(self, cooker): + self.cooker = cooker + self.cmds_sync = CommandsSync() + self.cmds_async = CommandsAsync() + self.remotedatastores = bb.remotedata.RemoteDatastores(cooker) + + # FIXME Add lock for this + self.currentAsyncCommand = None + + def runCommand(self, commandline, ro_only = False): + command = commandline.pop(0) + if hasattr(CommandsSync, command): + # Can run synchronous commands straight away + command_method = getattr(self.cmds_sync, command) + if ro_only: + if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'): + return None, "Not able to execute not readonly commands in readonly mode" + try: + self.cooker.process_inotify_updates() + if getattr(command_method, 'needconfig', True): + self.cooker.updateCacheSync() + result = command_method(self, commandline) + except CommandError as exc: + return None, exc.args[0] + except (Exception, SystemExit): + import traceback + return None, traceback.format_exc() + else: + return result, None + if self.currentAsyncCommand is not None: + return None, "Busy (%s in progress)" % self.currentAsyncCommand[0] + if command not in CommandsAsync.__dict__: + return None, "No such command" + self.currentAsyncCommand = (command, commandline) + self.cooker.configuration.server_register_idlecallback(self.cooker.runCommands, self.cooker) + return True, None + + def runAsyncCommand(self): + try: + self.cooker.process_inotify_updates() + if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown): + # updateCache will trigger a shutdown of the parser + # and then raise BBHandledException triggering an exit + self.cooker.updateCache() + return False + if self.currentAsyncCommand is not None: + (command, options) = self.currentAsyncCommand + commandmethod = getattr(CommandsAsync, command) + needcache = getattr( commandmethod, "needcache" ) + if needcache and self.cooker.state != bb.cooker.state.running: + self.cooker.updateCache() + return True + else: + commandmethod(self.cmds_async, self, options) + return False + else: + return False + except KeyboardInterrupt as exc: + self.finishAsyncCommand("Interrupted") + return False + except SystemExit as exc: + arg = exc.args[0] + if isinstance(arg, str): + self.finishAsyncCommand(arg) + else: + self.finishAsyncCommand("Exited with %s" % arg) + return False + except Exception as exc: + import traceback + if isinstance(exc, bb.BBHandledException): + self.finishAsyncCommand("") + else: + self.finishAsyncCommand(traceback.format_exc()) + return False + + def finishAsyncCommand(self, msg=None, code=None): + if msg or msg == "": + bb.event.fire(CommandFailed(msg), self.cooker.data) + elif code: + bb.event.fire(CommandExit(code), self.cooker.data) + else: + bb.event.fire(CommandCompleted(), self.cooker.data) + self.currentAsyncCommand = None + self.cooker.finishcommand() + + def reset(self): + self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) + +def split_mc_pn(pn): + if pn.startswith("multiconfig:"): + _, mc, pn = pn.split(":", 2) + return (mc, pn) + return ('', pn) + +class CommandsSync: + """ + A class of synchronous commands + These should run quickly so as not to hurt interactive performance. + These must not influence any running synchronous command. + """ + + def stateShutdown(self, command, params): + """ + Trigger cooker 'shutdown' mode + """ + command.cooker.shutdown(False) + + def stateForceShutdown(self, command, params): + """ + Stop the cooker + """ + command.cooker.shutdown(True) + + def getAllKeysWithFlags(self, command, params): + """ + Returns a dump of the global state. Call with + variable flags to be retrieved as params. + """ + flaglist = params[0] + return command.cooker.getAllKeysWithFlags(flaglist) + getAllKeysWithFlags.readonly = True + + def getVariable(self, command, params): + """ + Read the value of a variable from data + """ + varname = params[0] + expand = True + if len(params) > 1: + expand = (params[1] == "True") + + return command.cooker.data.getVar(varname, expand) + getVariable.readonly = True + + def setVariable(self, command, params): + """ + Set the value of variable in data + """ + varname = params[0] + value = str(params[1]) + command.cooker.extraconfigdata[varname] = value + command.cooker.data.setVar(varname, value) + + def getSetVariable(self, command, params): + """ + Read the value of a variable from data and set it into the datastore + which effectively expands and locks the value. + """ + varname = params[0] + result = self.getVariable(command, params) + command.cooker.data.setVar(varname, result) + return result + + def setConfig(self, command, params): + """ + Set the value of variable in configuration + """ + varname = params[0] + value = str(params[1]) + setattr(command.cooker.configuration, varname, value) + + def enableDataTracking(self, command, params): + """ + Enable history tracking for variables + """ + command.cooker.enableDataTracking() + + def disableDataTracking(self, command, params): + """ + Disable history tracking for variables + """ + command.cooker.disableDataTracking() + + def setPrePostConfFiles(self, command, params): + prefiles = params[0].split() + postfiles = params[1].split() + command.cooker.configuration.prefile = prefiles + command.cooker.configuration.postfile = postfiles + setPrePostConfFiles.needconfig = False + + def matchFile(self, command, params): + fMatch = params[0] + return command.cooker.matchFile(fMatch) + matchFile.needconfig = False + + def getUIHandlerNum(self, command, params): + return bb.event.get_uihandler() + getUIHandlerNum.needconfig = False + getUIHandlerNum.readonly = True + + def setEventMask(self, command, params): + handlerNum = params[0] + llevel = params[1] + debug_domains = params[2] + mask = params[3] + return bb.event.set_UIHmask(handlerNum, llevel, debug_domains, mask) + setEventMask.needconfig = False + setEventMask.readonly = True + + def setFeatures(self, command, params): + """ + Set the cooker features to include the passed list of features + """ + features = params[0] + command.cooker.setFeatures(features) + setFeatures.needconfig = False + # although we change the internal state of the cooker, this is transparent since + # we always take and leave the cooker in state.initial + setFeatures.readonly = True + + def updateConfig(self, command, params): + options = params[0] + environment = params[1] + cmdline = params[2] + command.cooker.updateConfigOpts(options, environment, cmdline) + updateConfig.needconfig = False + + def parseConfiguration(self, command, params): + """Instruct bitbake to parse its configuration + NOTE: it is only necessary to call this if you aren't calling any normal action + (otherwise parsing is taken care of automatically) + """ + command.cooker.parseConfiguration() + parseConfiguration.needconfig = False + + def getLayerPriorities(self, command, params): + command.cooker.parseConfiguration() + ret = [] + # regex objects cannot be marshalled by xmlrpc + for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities: + ret.append((collection, pattern, regex.pattern, pri)) + return ret + getLayerPriorities.readonly = True + + def getRecipes(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return list(command.cooker.recipecaches[mc].pkg_pn.items()) + getRecipes.readonly = True + + def getRecipeDepends(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return list(command.cooker.recipecaches[mc].deps.items()) + getRecipeDepends.readonly = True + + def getRecipeVersions(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].pkg_pepvpr + getRecipeVersions.readonly = True + + def getRecipeProvides(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].fn_provides + getRecipeProvides.readonly = True + + def getRecipePackages(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].packages + getRecipePackages.readonly = True + + def getRecipePackagesDynamic(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].packages_dynamic + getRecipePackagesDynamic.readonly = True + + def getRProviders(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].rproviders + getRProviders.readonly = True + + def getRuntimeDepends(self, command, params): + ret = [] + try: + mc = params[0] + except IndexError: + mc = '' + rundeps = command.cooker.recipecaches[mc].rundeps + for key, value in rundeps.items(): + if isinstance(value, defaultdict): + value = dict(value) + ret.append((key, value)) + return ret + getRuntimeDepends.readonly = True + + def getRuntimeRecommends(self, command, params): + ret = [] + try: + mc = params[0] + except IndexError: + mc = '' + runrecs = command.cooker.recipecaches[mc].runrecs + for key, value in runrecs.items(): + if isinstance(value, defaultdict): + value = dict(value) + ret.append((key, value)) + return ret + getRuntimeRecommends.readonly = True + + def getRecipeInherits(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].inherits + getRecipeInherits.readonly = True + + def getBbFilePriority(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].bbfile_priority + getBbFilePriority.readonly = True + + def getDefaultPreference(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return command.cooker.recipecaches[mc].pkg_dp + getDefaultPreference.readonly = True + + def getSkippedRecipes(self, command, params): + # Return list sorted by reverse priority order + import bb.cache + skipdict = OrderedDict(sorted(command.cooker.skiplist.items(), + key=lambda x: (-command.cooker.collection.calc_bbfile_priority(bb.cache.virtualfn2realfn(x[0])[0]), x[0]))) + return list(skipdict.items()) + getSkippedRecipes.readonly = True + + def getOverlayedRecipes(self, command, params): + return list(command.cooker.collection.overlayed.items()) + getOverlayedRecipes.readonly = True + + def getFileAppends(self, command, params): + fn = params[0] + return command.cooker.collection.get_file_appends(fn) + getFileAppends.readonly = True + + def getAllAppends(self, command, params): + return command.cooker.collection.bbappends + getAllAppends.readonly = True + + def findProviders(self, command, params): + return command.cooker.findProviders() + findProviders.readonly = True + + def findBestProvider(self, command, params): + (mc, pn) = split_mc_pn(params[0]) + return command.cooker.findBestProvider(pn, mc) + findBestProvider.readonly = True + + def allProviders(self, command, params): + try: + mc = params[0] + except IndexError: + mc = '' + return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items()) + allProviders.readonly = True + + def getRuntimeProviders(self, command, params): + rprovide = params[0] + try: + mc = params[1] + except IndexError: + mc = '' + all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide) + if all_p: + best = bb.providers.filterProvidersRunTime(all_p, rprovide, + command.cooker.data, + command.cooker.recipecaches[mc])[0][0] + else: + best = None + return all_p, best + getRuntimeProviders.readonly = True + + def dataStoreConnectorFindVar(self, command, params): + dsindex = params[0] + name = params[1] + datastore = command.remotedatastores[dsindex] + value, overridedata = datastore._findVar(name) + + if value: + content = value.get('_content', None) + if isinstance(content, bb.data_smart.DataSmart): + # Value is a datastore (e.g. BB_ORIGENV) - need to handle this carefully + idx = command.remotedatastores.check_store(content, True) + return {'_content': DataStoreConnectionHandle(idx), + '_connector_origtype': 'DataStoreConnectionHandle', + '_connector_overrides': overridedata} + elif isinstance(content, set): + return {'_content': list(content), + '_connector_origtype': 'set', + '_connector_overrides': overridedata} + else: + value['_connector_overrides'] = overridedata + else: + value = {} + value['_connector_overrides'] = overridedata + return value + dataStoreConnectorFindVar.readonly = True + + def dataStoreConnectorGetKeys(self, command, params): + dsindex = params[0] + datastore = command.remotedatastores[dsindex] + return list(datastore.keys()) + dataStoreConnectorGetKeys.readonly = True + + def dataStoreConnectorGetVarHistory(self, command, params): + dsindex = params[0] + name = params[1] + datastore = command.remotedatastores[dsindex] + return datastore.varhistory.variable(name) + dataStoreConnectorGetVarHistory.readonly = True + + def dataStoreConnectorExpandPythonRef(self, command, params): + config_data_dict = params[0] + varname = params[1] + expr = params[2] + + config_data = command.remotedatastores.receive_datastore(config_data_dict) + + varparse = bb.data_smart.VariableParse(varname, config_data) + return varparse.python_sub(expr) + + def dataStoreConnectorRelease(self, command, params): + dsindex = params[0] + if dsindex <= 0: + raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex) + command.remotedatastores.release(dsindex) + + def dataStoreConnectorSetVarFlag(self, command, params): + dsindex = params[0] + name = params[1] + flag = params[2] + value = params[3] + datastore = command.remotedatastores[dsindex] + datastore.setVarFlag(name, flag, value) + + def dataStoreConnectorDelVar(self, command, params): + dsindex = params[0] + name = params[1] + datastore = command.remotedatastores[dsindex] + if len(params) > 2: + flag = params[2] + datastore.delVarFlag(name, flag) + else: + datastore.delVar(name) + + def dataStoreConnectorRenameVar(self, command, params): + dsindex = params[0] + name = params[1] + newname = params[2] + datastore = command.remotedatastores[dsindex] + datastore.renameVar(name, newname) + + def parseRecipeFile(self, command, params): + """ + Parse the specified recipe file (with or without bbappends) + and return a datastore object representing the environment + for the recipe. + """ + fn = params[0] + appends = params[1] + appendlist = params[2] + if len(params) > 3: + config_data_dict = params[3] + config_data = command.remotedatastores.receive_datastore(config_data_dict) + else: + config_data = None + + if appends: + if appendlist is not None: + appendfiles = appendlist + else: + appendfiles = command.cooker.collection.get_file_appends(fn) + else: + appendfiles = [] + # We are calling bb.cache locally here rather than on the server, + # but that's OK because it doesn't actually need anything from + # the server barring the global datastore (which we have a remote + # version of) + if config_data: + # We have to use a different function here if we're passing in a datastore + # NOTE: we took a copy above, so we don't do it here again + envdata = bb.cache.parse_recipe(config_data, fn, appendfiles)[''] + else: + # Use the standard path + parser = bb.cache.NoCache(command.cooker.databuilder) + envdata = parser.loadDataFull(fn, appendfiles) + idx = command.remotedatastores.store(envdata) + return DataStoreConnectionHandle(idx) + parseRecipeFile.readonly = True + +class CommandsAsync: + """ + A class of asynchronous commands + These functions communicate via generated events. + Any function that requires metadata parsing should be here. + """ + + def buildFile(self, command, params): + """ + Build a single specified .bb file + """ + bfile = params[0] + task = params[1] + if len(params) > 2: + internal = params[2] + else: + internal = False + + if internal: + command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True) + else: + command.cooker.buildFile(bfile, task) + buildFile.needcache = False + + def buildTargets(self, command, params): + """ + Build a set of targets + """ + pkgs_to_build = params[0] + task = params[1] + + command.cooker.buildTargets(pkgs_to_build, task) + buildTargets.needcache = True + + def generateDepTreeEvent(self, command, params): + """ + Generate an event containing the dependency information + """ + pkgs_to_build = params[0] + task = params[1] + + command.cooker.generateDepTreeEvent(pkgs_to_build, task) + command.finishAsyncCommand() + generateDepTreeEvent.needcache = True + + def generateDotGraph(self, command, params): + """ + Dump dependency information to disk as .dot files + """ + pkgs_to_build = params[0] + task = params[1] + + command.cooker.generateDotGraphFiles(pkgs_to_build, task) + command.finishAsyncCommand() + generateDotGraph.needcache = True + + def generateTargetsTree(self, command, params): + """ + Generate a tree of buildable targets. + If klass is provided ensure all recipes that inherit the class are + included in the package list. + If pkg_list provided use that list (plus any extras brought in by + klass) rather than generating a tree for all packages. + """ + klass = params[0] + pkg_list = params[1] + + command.cooker.generateTargetsTree(klass, pkg_list) + command.finishAsyncCommand() + generateTargetsTree.needcache = True + + def findConfigFiles(self, command, params): + """ + Find config files which provide appropriate values + for the passed configuration variable. i.e. MACHINE + """ + varname = params[0] + + command.cooker.findConfigFiles(varname) + command.finishAsyncCommand() + findConfigFiles.needcache = False + + def findFilesMatchingInDir(self, command, params): + """ + Find implementation files matching the specified pattern + in the requested subdirectory of a BBPATH + """ + pattern = params[0] + directory = params[1] + + command.cooker.findFilesMatchingInDir(pattern, directory) + command.finishAsyncCommand() + findFilesMatchingInDir.needcache = False + + def findConfigFilePath(self, command, params): + """ + Find the path of the requested configuration file + """ + configfile = params[0] + + command.cooker.findConfigFilePath(configfile) + command.finishAsyncCommand() + findConfigFilePath.needcache = False + + def showVersions(self, command, params): + """ + Show the currently selected versions + """ + command.cooker.showVersions() + command.finishAsyncCommand() + showVersions.needcache = True + + def showEnvironmentTarget(self, command, params): + """ + Print the environment of a target recipe + (needs the cache to work out which recipe to use) + """ + pkg = params[0] + + command.cooker.showEnvironment(None, pkg) + command.finishAsyncCommand() + showEnvironmentTarget.needcache = True + + def showEnvironment(self, command, params): + """ + Print the standard environment + or if specified the environment for a specified recipe + """ + bfile = params[0] + + command.cooker.showEnvironment(bfile) + command.finishAsyncCommand() + showEnvironment.needcache = False + + def parseFiles(self, command, params): + """ + Parse the .bb files + """ + command.cooker.updateCache() + command.finishAsyncCommand() + parseFiles.needcache = True + + def compareRevisions(self, command, params): + """ + Parse the .bb files + """ + if bb.fetch.fetcher_compare_revisions(command.cooker.data): + command.finishAsyncCommand(code=1) + else: + command.finishAsyncCommand() + compareRevisions.needcache = True + + def triggerEvent(self, command, params): + """ + Trigger a certain event + """ + event = params[0] + bb.event.fire(eval(event), command.cooker.data) + command.currentAsyncCommand = None + triggerEvent.needcache = False + + def resetCooker(self, command, params): + """ + Reset the cooker to its initial state, thus forcing a reparse for + any async command that has the needcache property set to True + """ + command.cooker.reset() + command.finishAsyncCommand() + resetCooker.needcache = False + + def clientComplete(self, command, params): + """ + Do the right thing when the controlling client exits + """ + command.cooker.clientComplete() + command.finishAsyncCommand() + clientComplete.needcache = False + + def findSigInfo(self, command, params): + """ + Find signature info files via the signature generator + """ + pn = params[0] + taskname = params[1] + sigs = params[2] + res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data) + bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data) + command.finishAsyncCommand() + findSigInfo.needcache = False diff --git a/poky/bitbake/lib/bb/compat.py b/poky/bitbake/lib/bb/compat.py new file mode 100644 index 0000000000..de1923d28a --- /dev/null +++ b/poky/bitbake/lib/bb/compat.py @@ -0,0 +1,6 @@ +"""Code pulled from future python versions, here for compatibility""" + +from collections import MutableMapping, KeysView, ValuesView, ItemsView, OrderedDict +from functools import total_ordering + + diff --git a/poky/bitbake/lib/bb/cooker.py b/poky/bitbake/lib/bb/cooker.py new file mode 100644 index 0000000000..1fda40dd41 --- /dev/null +++ b/poky/bitbake/lib/bb/cooker.py @@ -0,0 +1,2161 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 - 2007 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +import sys, os, glob, os.path, re, time +import atexit +import itertools +import logging +import multiprocessing +import sre_constants +import threading +from io import StringIO, UnsupportedOperation +from contextlib import closing +from functools import wraps +from collections import defaultdict, namedtuple +import bb, bb.exceptions, bb.command +from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build +import queue +import signal +import subprocess +import errno +import prserv.serv +import pyinotify +import json +import pickle +import codecs + +logger = logging.getLogger("BitBake") +collectlog = logging.getLogger("BitBake.Collection") +buildlog = logging.getLogger("BitBake.Build") +parselog = logging.getLogger("BitBake.Parsing") +providerlog = logging.getLogger("BitBake.Provider") + +class NoSpecificMatch(bb.BBHandledException): + """ + Exception raised when no or multiple file matches are found + """ + +class NothingToBuild(Exception): + """ + Exception raised when there is nothing to build + """ + +class CollectionError(bb.BBHandledException): + """ + Exception raised when layer configuration is incorrect + """ + +class state: + initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7)) + + @classmethod + def get_name(cls, code): + for name in dir(cls): + value = getattr(cls, name) + if type(value) == type(cls.initial) and value == code: + return name + raise ValueError("Invalid status code: %s" % code) + + +class SkippedPackage: + def __init__(self, info = None, reason = None): + self.pn = None + self.skipreason = None + self.provides = None + self.rprovides = None + + if info: + self.pn = info.pn + self.skipreason = info.skipreason + self.provides = info.provides + self.rprovides = info.rprovides + elif reason: + self.skipreason = reason + + +class CookerFeatures(object): + _feature_list = [HOB_EXTRA_CACHES, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS] = list(range(3)) + + def __init__(self): + self._features=set() + + def setFeature(self, f): + # validate we got a request for a feature we support + if f not in CookerFeatures._feature_list: + return + self._features.add(f) + + def __contains__(self, f): + return f in self._features + + def __iter__(self): + return self._features.__iter__() + + def __next__(self): + return next(self._features) + + +class EventWriter: + def __init__(self, cooker, eventfile): + self.file_inited = None + self.cooker = cooker + self.eventfile = eventfile + self.event_queue = [] + + def write_event(self, event): + with open(self.eventfile, "a") as f: + try: + str_event = codecs.encode(pickle.dumps(event), 'base64').decode('utf-8') + f.write("%s\n" % json.dumps({"class": event.__module__ + "." + event.__class__.__name__, + "vars": str_event})) + except Exception as err: + import traceback + print(err, traceback.format_exc()) + + def send(self, event): + if self.file_inited: + # we have the file, just write the event + self.write_event(event) + else: + # init on bb.event.BuildStarted + name = "%s.%s" % (event.__module__, event.__class__.__name__) + if name in ("bb.event.BuildStarted", "bb.cooker.CookerExit"): + with open(self.eventfile, "w") as f: + f.write("%s\n" % json.dumps({ "allvariables" : self.cooker.getAllKeysWithFlags(["doc", "func"])})) + + self.file_inited = True + + # write pending events + for evt in self.event_queue: + self.write_event(evt) + + # also write the current event + self.write_event(event) + else: + # queue all events until the file is inited + self.event_queue.append(event) + +#============================================================================# +# BBCooker +#============================================================================# +class BBCooker: + """ + Manages one bitbake build run + """ + + def __init__(self, configuration, featureSet=None): + self.recipecaches = None + self.skiplist = {} + self.featureset = CookerFeatures() + if featureSet: + for f in featureSet: + self.featureset.setFeature(f) + + self.configuration = configuration + + self.configwatcher = pyinotify.WatchManager() + self.configwatcher.bbseen = [] + self.configwatcher.bbwatchedfiles = [] + self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications) + self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \ + pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \ + pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO + self.watcher = pyinotify.WatchManager() + self.watcher.bbseen = [] + self.watcher.bbwatchedfiles = [] + self.notifier = pyinotify.Notifier(self.watcher, self.notifications) + + # If being called by something like tinfoil, we need to clean cached data + # which may now be invalid + bb.parse.clear_cache() + bb.parse.BBHandler.cached_statements = {} + + self.ui_cmdline = None + + self.initConfigurationData() + + # we log all events to a file if so directed + if self.configuration.writeeventlog: + # register the log file writer as UI Handler + writer = EventWriter(self, self.configuration.writeeventlog) + EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event']) + bb.event.register_UIHhandler(EventLogWriteHandler(writer)) + + self.inotify_modified_files = [] + + def _process_inotify_updates(server, cooker, abort): + cooker.process_inotify_updates() + return 1.0 + + self.configuration.server_register_idlecallback(_process_inotify_updates, self) + + # TOSTOP must not be set or our children will hang when they output + try: + fd = sys.stdout.fileno() + if os.isatty(fd): + import termios + tcattr = termios.tcgetattr(fd) + if tcattr[3] & termios.TOSTOP: + buildlog.info("The terminal had the TOSTOP bit set, clearing...") + tcattr[3] = tcattr[3] & ~termios.TOSTOP + termios.tcsetattr(fd, termios.TCSANOW, tcattr) + except UnsupportedOperation: + pass + + self.command = bb.command.Command(self) + self.state = state.initial + + self.parser = None + + signal.signal(signal.SIGTERM, self.sigterm_exception) + # Let SIGHUP exit as SIGTERM + signal.signal(signal.SIGHUP, self.sigterm_exception) + + def process_inotify_updates(self): + for n in [self.confignotifier, self.notifier]: + if n.check_events(timeout=0): + # read notified events and enqeue them + n.read_events() + n.process_events() + + def config_notifications(self, event): + if event.maskname == "IN_Q_OVERFLOW": + bb.warn("inotify event queue overflowed, invalidating caches.") + self.parsecache_valid = False + self.baseconfig_valid = False + bb.parse.clear_cache() + return + if not event.pathname in self.configwatcher.bbwatchedfiles: + return + if not event.pathname in self.inotify_modified_files: + self.inotify_modified_files.append(event.pathname) + self.baseconfig_valid = False + + def notifications(self, event): + if event.maskname == "IN_Q_OVERFLOW": + bb.warn("inotify event queue overflowed, invalidating caches.") + self.parsecache_valid = False + bb.parse.clear_cache() + return + if event.pathname.endswith("bitbake-cookerdaemon.log") \ + or event.pathname.endswith("bitbake.lock"): + return + if not event.pathname in self.inotify_modified_files: + self.inotify_modified_files.append(event.pathname) + self.parsecache_valid = False + + def add_filewatch(self, deps, watcher=None, dirs=False): + if not watcher: + watcher = self.watcher + for i in deps: + watcher.bbwatchedfiles.append(i[0]) + if dirs: + f = i[0] + else: + f = os.path.dirname(i[0]) + if f in watcher.bbseen: + continue + watcher.bbseen.append(f) + watchtarget = None + while True: + # We try and add watches for files that don't exist but if they did, would influence + # the parser. The parent directory of these files may not exist, in which case we need + # to watch any parent that does exist for changes. + try: + watcher.add_watch(f, self.watchmask, quiet=False) + if watchtarget: + watcher.bbwatchedfiles.append(watchtarget) + break + except pyinotify.WatchManagerError as e: + if 'ENOENT' in str(e): + watchtarget = f + f = os.path.dirname(f) + if f in watcher.bbseen: + break + watcher.bbseen.append(f) + continue + if 'ENOSPC' in str(e): + providerlog.error("No space left on device or exceeds fs.inotify.max_user_watches?") + providerlog.error("To check max_user_watches: sysctl -n fs.inotify.max_user_watches.") + providerlog.error("To modify max_user_watches: sysctl -n -w fs.inotify.max_user_watches=.") + providerlog.error("Root privilege is required to modify max_user_watches.") + raise + + def sigterm_exception(self, signum, stackframe): + if signum == signal.SIGTERM: + bb.warn("Cooker received SIGTERM, shutting down...") + elif signum == signal.SIGHUP: + bb.warn("Cooker received SIGHUP, shutting down...") + self.state = state.forceshutdown + + def setFeatures(self, features): + # we only accept a new feature set if we're in state initial, so we can reset without problems + if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]: + raise Exception("Illegal state for feature set change") + original_featureset = list(self.featureset) + for feature in features: + self.featureset.setFeature(feature) + bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) + if (original_featureset != list(self.featureset)) and self.state != state.error: + self.reset() + + def initConfigurationData(self): + + self.state = state.initial + self.caches_array = [] + + # Need to preserve BB_CONSOLELOG over resets + consolelog = None + if hasattr(self, "data"): + consolelog = self.data.getVar("BB_CONSOLELOG") + + if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: + self.enableDataTracking() + + all_extra_cache_names = [] + # We hardcode all known cache types in a single place, here. + if CookerFeatures.HOB_EXTRA_CACHES in self.featureset: + all_extra_cache_names.append("bb.cache_extra:HobRecipeInfo") + + caches_name_array = ['bb.cache:CoreRecipeInfo'] + all_extra_cache_names + + # At least CoreRecipeInfo will be loaded, so caches_array will never be empty! + # This is the entry point, no further check needed! + for var in caches_name_array: + try: + module_name, cache_name = var.split(':') + module = __import__(module_name, fromlist=(cache_name,)) + self.caches_array.append(getattr(module, cache_name)) + except ImportError as exc: + logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc)) + sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name) + + self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) + self.databuilder.parseBaseConfiguration() + self.data = self.databuilder.data + self.data_hash = self.databuilder.data_hash + self.extraconfigdata = {} + + if consolelog: + self.data.setVar("BB_CONSOLELOG", consolelog) + + self.data.setVar('BB_CMDLINE', self.ui_cmdline) + + # + # Copy of the data store which has been expanded. + # Used for firing events and accessing variables where expansion needs to be accounted for + # + bb.parse.init_parser(self.data) + + if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: + self.disableDataTracking() + + self.data.renameVar("__depends", "__base_depends") + self.add_filewatch(self.data.getVar("__base_depends", False), self.configwatcher) + + self.baseconfig_valid = True + self.parsecache_valid = False + + def handlePRServ(self): + # Setup a PR Server based on the new configuration + try: + self.prhost = prserv.serv.auto_start(self.data) + except prserv.serv.PRServiceConfigError as e: + bb.fatal("Unable to start PR Server, exitting") + + def enableDataTracking(self): + self.configuration.tracking = True + if hasattr(self, "data"): + self.data.enableTracking() + + def disableDataTracking(self): + self.configuration.tracking = False + if hasattr(self, "data"): + self.data.disableTracking() + + def parseConfiguration(self): + # Set log file verbosity + verboselogs = bb.utils.to_boolean(self.data.getVar("BB_VERBOSE_LOGS", False)) + if verboselogs: + bb.msg.loggerVerboseLogs = True + + # Change nice level if we're asked to + nice = self.data.getVar("BB_NICE_LEVEL") + if nice: + curnice = os.nice(0) + nice = int(nice) - curnice + buildlog.verbose("Renice to %s " % os.nice(nice)) + + if self.recipecaches: + del self.recipecaches + self.multiconfigs = self.databuilder.mcdata.keys() + self.recipecaches = {} + for mc in self.multiconfigs: + self.recipecaches[mc] = bb.cache.CacheData(self.caches_array) + + self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS")) + + self.parsecache_valid = False + + def updateConfigOpts(self, options, environment, cmdline): + self.ui_cmdline = cmdline + clean = True + for o in options: + if o in ['prefile', 'postfile']: + # Only these options may require a reparse + try: + if getattr(self.configuration, o) == options[o]: + # Value is the same, no need to mark dirty + continue + except AttributeError: + pass + logger.debug(1, "Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) + print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) + clean = False + setattr(self.configuration, o, options[o]) + for k in bb.utils.approved_variables(): + if k in environment and k not in self.configuration.env: + logger.debug(1, "Updating new environment variable %s to %s" % (k, environment[k])) + self.configuration.env[k] = environment[k] + clean = False + if k in self.configuration.env and k not in environment: + logger.debug(1, "Updating environment variable %s (deleted)" % (k)) + del self.configuration.env[k] + clean = False + if k not in self.configuration.env and k not in environment: + continue + if environment[k] != self.configuration.env[k]: + logger.debug(1, "Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k])) + self.configuration.env[k] = environment[k] + clean = False + if not clean: + logger.debug(1, "Base environment change, triggering reparse") + self.reset() + + def runCommands(self, server, data, abort): + """ + Run any queued asynchronous command + This is done by the idle handler so it runs in true context rather than + tied to any UI. + """ + + return self.command.runAsyncCommand() + + def showVersions(self): + + (latest_versions, preferred_versions) = self.findProviders() + + logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version") + logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================") + + for p in sorted(self.recipecaches[''].pkg_pn): + pref = preferred_versions[p] + latest = latest_versions[p] + + prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2] + lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] + + if pref == latest: + prefstr = "" + + logger.plain("%-35s %25s %25s", p, lateststr, prefstr) + + def showEnvironment(self, buildfile=None, pkgs_to_build=None): + """ + Show the outer or per-recipe environment + """ + fn = None + envdata = None + if not pkgs_to_build: + pkgs_to_build = [] + + orig_tracking = self.configuration.tracking + if not orig_tracking: + self.enableDataTracking() + self.reset() + + + if buildfile: + # Parse the configuration here. We need to do it explicitly here since + # this showEnvironment() code path doesn't use the cache + self.parseConfiguration() + + fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) + fn = self.matchFile(fn) + fn = bb.cache.realfn2virtual(fn, cls, mc) + elif len(pkgs_to_build) == 1: + ignore = self.data.getVar("ASSUME_PROVIDED") or "" + if pkgs_to_build[0] in set(ignore.split()): + bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) + + taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True) + + mc = runlist[0][0] + fn = runlist[0][3] + else: + envdata = self.data + data.expandKeys(envdata) + parse.ast.runAnonFuncs(envdata) + + if fn: + try: + bb_cache = bb.cache.Cache(self.databuilder, self.data_hash, self.caches_array) + envdata = bb_cache.loadDataFull(fn, self.collection.get_file_appends(fn)) + except Exception as e: + parselog.exception("Unable to read %s", fn) + raise + + # Display history + with closing(StringIO()) as env: + self.data.inchistory.emit(env) + logger.plain(env.getvalue()) + + # emit variables and shell functions + with closing(StringIO()) as env: + data.emit_env(env, envdata, True) + logger.plain(env.getvalue()) + + # emit the metadata which isnt valid shell + for e in sorted(envdata.keys()): + if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False): + logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False)) + + if not orig_tracking: + self.disableDataTracking() + self.reset() + + def buildTaskData(self, pkgs_to_build, task, abort, allowincomplete=False): + """ + Prepare a runqueue and taskdata object for iteration over pkgs_to_build + """ + bb.event.fire(bb.event.TreeDataPreparationStarted(), self.data) + + # A task of None means use the default task + if task is None: + task = self.configuration.cmd + if not task.startswith("do_"): + task = "do_%s" % task + + targetlist = self.checkPackages(pkgs_to_build, task) + fulltargetlist = [] + defaulttask_implicit = '' + defaulttask_explicit = False + wildcard = False + + # Wild card expansion: + # Replace string such as "multiconfig:*:bash" + # into "multiconfig:A:bash multiconfig:B:bash bash" + for k in targetlist: + if k.startswith("multiconfig:"): + if wildcard: + bb.fatal('multiconfig conflict') + if k.split(":")[1] == "*": + wildcard = True + for mc in self.multiconfigs: + if mc: + fulltargetlist.append(k.replace('*', mc)) + # implicit default task + else: + defaulttask_implicit = k.split(":")[2] + else: + fulltargetlist.append(k) + else: + defaulttask_explicit = True + fulltargetlist.append(k) + + if not defaulttask_explicit and defaulttask_implicit != '': + fulltargetlist.append(defaulttask_implicit) + + bb.debug(1,"Target list: %s" % (str(fulltargetlist))) + taskdata = {} + localdata = {} + + for mc in self.multiconfigs: + taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete) + localdata[mc] = data.createCopy(self.databuilder.mcdata[mc]) + bb.data.expandKeys(localdata[mc]) + + current = 0 + runlist = [] + for k in fulltargetlist: + mc = "" + if k.startswith("multiconfig:"): + mc = k.split(":")[1] + k = ":".join(k.split(":")[2:]) + ktask = task + if ":do_" in k: + k2 = k.split(":do_") + k = k2[0] + ktask = k2[1] + taskdata[mc].add_provider(localdata[mc], self.recipecaches[mc], k) + current += 1 + if not ktask.startswith("do_"): + ktask = "do_%s" % ktask + if k not in taskdata[mc].build_targets or not taskdata[mc].build_targets[k]: + # e.g. in ASSUME_PROVIDED + continue + fn = taskdata[mc].build_targets[k][0] + runlist.append([mc, k, ktask, fn]) + bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data) + + for mc in self.multiconfigs: + taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc]) + + bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) + return taskdata, runlist + + def prepareTreeData(self, pkgs_to_build, task): + """ + Prepare a runqueue and taskdata object for iteration over pkgs_to_build + """ + + # We set abort to False here to prevent unbuildable targets raising + # an exception when we're just generating data + taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True) + + return runlist, taskdata + + ######## WARNING : this function requires cache_extra to be enabled ######## + + def generateTaskDepTreeData(self, pkgs_to_build, task): + """ + Create a dependency graph of pkgs_to_build including reverse dependency + information. + """ + if not task.startswith("do_"): + task = "do_%s" % task + + runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) + rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) + rq.rqdata.prepare() + return self.buildDependTree(rq, taskdata) + + @staticmethod + def add_mc_prefix(mc, pn): + if mc: + return "multiconfig:%s:%s" % (mc, pn) + return pn + + def buildDependTree(self, rq, taskdata): + seen_fns = [] + depend_tree = {} + depend_tree["depends"] = {} + depend_tree["tdepends"] = {} + depend_tree["pn"] = {} + depend_tree["rdepends-pn"] = {} + depend_tree["packages"] = {} + depend_tree["rdepends-pkg"] = {} + depend_tree["rrecs-pkg"] = {} + depend_tree['providermap'] = {} + depend_tree["layer-priorities"] = self.bbfile_config_priorities + + for mc in taskdata: + for name, fn in list(taskdata[mc].get_providermap().items()): + pn = self.recipecaches[mc].pkg_fn[fn] + pn = self.add_mc_prefix(mc, pn) + if name != pn: + version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[fn] + depend_tree['providermap'][name] = (pn, version) + + for tid in rq.rqdata.runtaskentries: + (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid) + pn = self.recipecaches[mc].pkg_fn[taskfn] + pn = self.add_mc_prefix(mc, pn) + version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn] + if pn not in depend_tree["pn"]: + depend_tree["pn"][pn] = {} + depend_tree["pn"][pn]["filename"] = taskfn + depend_tree["pn"][pn]["version"] = version + depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None) + + # if we have extra caches, list all attributes they bring in + extra_info = [] + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'): + cachefields = getattr(cache_class, 'cachefields', []) + extra_info = extra_info + cachefields + + # for all attributes stored, add them to the dependency tree + for ei in extra_info: + depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] + + + dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid)) + if not dotname in depend_tree["tdepends"]: + depend_tree["tdepends"][dotname] = [] + for dep in rq.rqdata.runtaskentries[tid].depends: + (depmc, depfn, deptaskname, deptaskfn) = bb.runqueue.split_tid_mcfn(dep) + deppn = self.recipecaches[mc].pkg_fn[deptaskfn] + depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep))) + if taskfn not in seen_fns: + seen_fns.append(taskfn) + packages = [] + + depend_tree["depends"][pn] = [] + for dep in taskdata[mc].depids[taskfn]: + depend_tree["depends"][pn].append(dep) + + depend_tree["rdepends-pn"][pn] = [] + for rdep in taskdata[mc].rdepids[taskfn]: + depend_tree["rdepends-pn"][pn].append(rdep) + + rdepends = self.recipecaches[mc].rundeps[taskfn] + for package in rdepends: + depend_tree["rdepends-pkg"][package] = [] + for rdepend in rdepends[package]: + depend_tree["rdepends-pkg"][package].append(rdepend) + packages.append(package) + + rrecs = self.recipecaches[mc].runrecs[taskfn] + for package in rrecs: + depend_tree["rrecs-pkg"][package] = [] + for rdepend in rrecs[package]: + depend_tree["rrecs-pkg"][package].append(rdepend) + if not package in packages: + packages.append(package) + + for package in packages: + if package not in depend_tree["packages"]: + depend_tree["packages"][package] = {} + depend_tree["packages"][package]["pn"] = pn + depend_tree["packages"][package]["filename"] = taskfn + depend_tree["packages"][package]["version"] = version + + return depend_tree + + ######## WARNING : this function requires cache_extra to be enabled ######## + def generatePkgDepTreeData(self, pkgs_to_build, task): + """ + Create a dependency tree of pkgs_to_build, returning the data. + """ + if not task.startswith("do_"): + task = "do_%s" % task + + _, taskdata = self.prepareTreeData(pkgs_to_build, task) + + seen_fns = [] + depend_tree = {} + depend_tree["depends"] = {} + depend_tree["pn"] = {} + depend_tree["rdepends-pn"] = {} + depend_tree["rdepends-pkg"] = {} + depend_tree["rrecs-pkg"] = {} + + # if we have extra caches, list all attributes they bring in + extra_info = [] + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'): + cachefields = getattr(cache_class, 'cachefields', []) + extra_info = extra_info + cachefields + + tids = [] + for mc in taskdata: + for tid in taskdata[mc].taskentries: + tids.append(tid) + + for tid in tids: + (mc, fn, taskname, taskfn) = bb.runqueue.split_tid_mcfn(tid) + + pn = self.recipecaches[mc].pkg_fn[taskfn] + pn = self.add_mc_prefix(mc, pn) + + if pn not in depend_tree["pn"]: + depend_tree["pn"][pn] = {} + depend_tree["pn"][pn]["filename"] = taskfn + version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn] + depend_tree["pn"][pn]["version"] = version + rdepends = self.recipecaches[mc].rundeps[taskfn] + rrecs = self.recipecaches[mc].runrecs[taskfn] + depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None) + + # for all extra attributes stored, add them to the dependency tree + for ei in extra_info: + depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn] + + if taskfn not in seen_fns: + seen_fns.append(taskfn) + + depend_tree["depends"][pn] = [] + for dep in taskdata[mc].depids[taskfn]: + pn_provider = "" + if dep in taskdata[mc].build_targets and taskdata[mc].build_targets[dep]: + fn_provider = taskdata[mc].build_targets[dep][0] + pn_provider = self.recipecaches[mc].pkg_fn[fn_provider] + else: + pn_provider = dep + pn_provider = self.add_mc_prefix(mc, pn_provider) + depend_tree["depends"][pn].append(pn_provider) + + depend_tree["rdepends-pn"][pn] = [] + for rdep in taskdata[mc].rdepids[taskfn]: + pn_rprovider = "" + if rdep in taskdata[mc].run_targets and taskdata[mc].run_targets[rdep]: + fn_rprovider = taskdata[mc].run_targets[rdep][0] + pn_rprovider = self.recipecaches[mc].pkg_fn[fn_rprovider] + else: + pn_rprovider = rdep + pn_rprovider = self.add_mc_prefix(mc, pn_rprovider) + depend_tree["rdepends-pn"][pn].append(pn_rprovider) + + depend_tree["rdepends-pkg"].update(rdepends) + depend_tree["rrecs-pkg"].update(rrecs) + + return depend_tree + + def generateDepTreeEvent(self, pkgs_to_build, task): + """ + Create a task dependency graph of pkgs_to_build. + Generate an event with the result + """ + depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) + bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.data) + + def generateDotGraphFiles(self, pkgs_to_build, task): + """ + Create a task dependency graph of pkgs_to_build. + Save the result to a set of .dot files. + """ + + depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) + + with open('pn-buildlist', 'w') as f: + for pn in depgraph["pn"]: + f.write(pn + "\n") + logger.info("PN build list saved to 'pn-buildlist'") + + # Remove old format output files to ensure no confusion with stale data + try: + os.unlink('pn-depends.dot') + except FileNotFoundError: + pass + try: + os.unlink('package-depends.dot') + except FileNotFoundError: + pass + + with open('task-depends.dot', 'w') as f: + f.write("digraph depends {\n") + for task in sorted(depgraph["tdepends"]): + (pn, taskname) = task.rsplit(".", 1) + fn = depgraph["pn"][pn]["filename"] + version = depgraph["pn"][pn]["version"] + f.write('"%s.%s" [label="%s %s\\n%s\\n%s"]\n' % (pn, taskname, pn, taskname, version, fn)) + for dep in sorted(depgraph["tdepends"][task]): + f.write('"%s" -> "%s"\n' % (task, dep)) + f.write("}\n") + logger.info("Task dependencies saved to 'task-depends.dot'") + + with open('recipe-depends.dot', 'w') as f: + f.write("digraph depends {\n") + pndeps = {} + for task in sorted(depgraph["tdepends"]): + (pn, taskname) = task.rsplit(".", 1) + if pn not in pndeps: + pndeps[pn] = set() + for dep in sorted(depgraph["tdepends"][task]): + (deppn, deptaskname) = dep.rsplit(".", 1) + pndeps[pn].add(deppn) + for pn in sorted(pndeps): + fn = depgraph["pn"][pn]["filename"] + version = depgraph["pn"][pn]["version"] + f.write('"%s" [label="%s\\n%s\\n%s"]\n' % (pn, pn, version, fn)) + for dep in sorted(pndeps[pn]): + if dep == pn: + continue + f.write('"%s" -> "%s"\n' % (pn, dep)) + f.write("}\n") + logger.info("Flattened recipe dependencies saved to 'recipe-depends.dot'") + + def show_appends_with_no_recipes(self): + # Determine which bbappends haven't been applied + + # First get list of recipes, including skipped + recipefns = list(self.recipecaches[''].pkg_fn.keys()) + recipefns.extend(self.skiplist.keys()) + + # Work out list of bbappends that have been applied + applied_appends = [] + for fn in recipefns: + applied_appends.extend(self.collection.get_file_appends(fn)) + + appends_without_recipes = [] + for _, appendfn in self.collection.bbappends: + if not appendfn in applied_appends: + appends_without_recipes.append(appendfn) + + if appends_without_recipes: + msg = 'No recipes available for:\n %s' % '\n '.join(appends_without_recipes) + warn_only = self.data.getVar("BB_DANGLINGAPPENDS_WARNONLY", \ + False) or "no" + if warn_only.lower() in ("1", "yes", "true"): + bb.warn(msg) + else: + bb.fatal(msg) + + def handlePrefProviders(self): + + for mc in self.multiconfigs: + localdata = data.createCopy(self.databuilder.mcdata[mc]) + bb.data.expandKeys(localdata) + + # Handle PREFERRED_PROVIDERS + for p in (localdata.getVar('PREFERRED_PROVIDERS') or "").split(): + try: + (providee, provider) = p.split(':') + except: + providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p) + continue + if providee in self.recipecaches[mc].preferred and self.recipecaches[mc].preferred[providee] != provider: + providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecaches[mc].preferred[providee]) + self.recipecaches[mc].preferred[providee] = provider + + def findConfigFilePath(self, configfile): + """ + Find the location on disk of configfile and if it exists and was parsed by BitBake + emit the ConfigFilePathFound event with the path to the file. + """ + path = bb.cookerdata.findConfigFile(configfile, self.data) + if not path: + return + + # Generate a list of parsed configuration files by searching the files + # listed in the __depends and __base_depends variables with a .conf suffix. + conffiles = [] + dep_files = self.data.getVar('__base_depends', False) or [] + dep_files = dep_files + (self.data.getVar('__depends', False) or []) + + for f in dep_files: + if f[0].endswith(".conf"): + conffiles.append(f[0]) + + _, conf, conffile = path.rpartition("conf/") + match = os.path.join(conf, conffile) + # Try and find matches for conf/conffilename.conf as we don't always + # have the full path to the file. + for cfg in conffiles: + if cfg.endswith(match): + bb.event.fire(bb.event.ConfigFilePathFound(path), + self.data) + break + + def findFilesMatchingInDir(self, filepattern, directory): + """ + Searches for files containing the substring 'filepattern' which are children of + 'directory' in each BBPATH. i.e. to find all rootfs package classes available + to BitBake one could call findFilesMatchingInDir(self, 'rootfs_', 'classes') + or to find all machine configuration files one could call: + findFilesMatchingInDir(self, '.conf', 'conf/machine') + """ + + matches = [] + bbpaths = self.data.getVar('BBPATH').split(':') + for path in bbpaths: + dirpath = os.path.join(path, directory) + if os.path.exists(dirpath): + for root, dirs, files in os.walk(dirpath): + for f in files: + if filepattern in f: + matches.append(f) + + if matches: + bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) + + def findProviders(self, mc=''): + return bb.providers.findProviders(self.data, self.recipecaches[mc], self.recipecaches[mc].pkg_pn) + + def findBestProvider(self, pn, mc=''): + if pn in self.recipecaches[mc].providers: + filenames = self.recipecaches[mc].providers[pn] + eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.data, self.recipecaches[mc]) + filename = eligible[0] + return None, None, None, filename + elif pn in self.recipecaches[mc].pkg_pn: + return bb.providers.findBestProvider(pn, self.data, self.recipecaches[mc], self.recipecaches[mc].pkg_pn) + else: + return None, None, None, None + + def findConfigFiles(self, varname): + """ + Find config files which are appropriate values for varname. + i.e. MACHINE, DISTRO + """ + possible = [] + var = varname.lower() + + data = self.data + # iterate configs + bbpaths = data.getVar('BBPATH').split(':') + for path in bbpaths: + confpath = os.path.join(path, "conf", var) + if os.path.exists(confpath): + for root, dirs, files in os.walk(confpath): + # get all child files, these are appropriate values + for f in files: + val, sep, end = f.rpartition('.') + if end == 'conf': + possible.append(val) + + if possible: + bb.event.fire(bb.event.ConfigFilesFound(var, possible), self.data) + + def findInheritsClass(self, klass): + """ + Find all recipes which inherit the specified class + """ + pkg_list = [] + + for pfn in self.recipecaches[''].pkg_fn: + inherits = self.recipecaches[''].inherits.get(pfn, None) + if inherits and klass in inherits: + pkg_list.append(self.recipecaches[''].pkg_fn[pfn]) + + return pkg_list + + def generateTargetsTree(self, klass=None, pkgs=None): + """ + Generate a dependency tree of buildable targets + Generate an event with the result + """ + # if the caller hasn't specified a pkgs list default to universe + if not pkgs: + pkgs = ['universe'] + # if inherited_class passed ensure all recipes which inherit the + # specified class are included in pkgs + if klass: + extra_pkgs = self.findInheritsClass(klass) + pkgs = pkgs + extra_pkgs + + # generate a dependency tree for all our packages + tree = self.generatePkgDepTreeData(pkgs, 'build') + bb.event.fire(bb.event.TargetsTreeGenerated(tree), self.data) + + def interactiveMode( self ): + """Drop off into a shell""" + try: + from bb import shell + except ImportError: + parselog.exception("Interactive mode not available") + sys.exit(1) + else: + shell.start( self ) + + + def handleCollections(self, collections): + """Handle collections""" + errors = False + self.bbfile_config_priorities = [] + if collections: + collection_priorities = {} + collection_depends = {} + collection_list = collections.split() + min_prio = 0 + for c in collection_list: + bb.debug(1,'Processing %s in collection list' % (c)) + + # Get collection priority if defined explicitly + priority = self.data.getVar("BBFILE_PRIORITY_%s" % c) + if priority: + try: + prio = int(priority) + except ValueError: + parselog.error("invalid value for BBFILE_PRIORITY_%s: \"%s\"", c, priority) + errors = True + if min_prio == 0 or prio < min_prio: + min_prio = prio + collection_priorities[c] = prio + else: + collection_priorities[c] = None + + # Check dependencies and store information for priority calculation + deps = self.data.getVar("LAYERDEPENDS_%s" % c) + if deps: + try: + depDict = bb.utils.explode_dep_versions2(deps) + except bb.utils.VersionStringException as vse: + bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse))) + for dep, oplist in list(depDict.items()): + if dep in collection_list: + for opstr in oplist: + layerver = self.data.getVar("LAYERVERSION_%s" % dep) + (op, depver) = opstr.split() + if layerver: + try: + res = bb.utils.vercmp_string_op(layerver, depver, op) + except bb.utils.VersionStringException as vse: + bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse))) + if not res: + parselog.error("Layer '%s' depends on version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, dep, layerver) + errors = True + else: + parselog.error("Layer '%s' depends on version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, dep) + errors = True + else: + parselog.error("Layer '%s' depends on layer '%s', but this layer is not enabled in your configuration", c, dep) + errors = True + collection_depends[c] = list(depDict.keys()) + else: + collection_depends[c] = [] + + # Check recommends and store information for priority calculation + recs = self.data.getVar("LAYERRECOMMENDS_%s" % c) + if recs: + try: + recDict = bb.utils.explode_dep_versions2(recs) + except bb.utils.VersionStringException as vse: + bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse))) + for rec, oplist in list(recDict.items()): + if rec in collection_list: + if oplist: + opstr = oplist[0] + layerver = self.data.getVar("LAYERVERSION_%s" % rec) + if layerver: + (op, recver) = opstr.split() + try: + res = bb.utils.vercmp_string_op(layerver, recver, op) + except bb.utils.VersionStringException as vse: + bb.fatal('Error parsing LAYERRECOMMENDS_%s: %s' % (c, str(vse))) + if not res: + parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', but version %s is currently enabled in your configuration. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec, layerver) + continue + else: + parselog.debug(3,"Layer '%s' recommends version %s of layer '%s', which exists in your configuration but does not specify a version. Check that you are using the correct matching versions/branches of these two layers.", c, opstr, rec) + continue + parselog.debug(3,"Layer '%s' recommends layer '%s', so we are adding it", c, rec) + collection_depends[c].append(rec) + else: + parselog.debug(3,"Layer '%s' recommends layer '%s', but this layer is not enabled in your configuration", c, rec) + + # Recursively work out collection priorities based on dependencies + def calc_layer_priority(collection): + if not collection_priorities[collection]: + max_depprio = min_prio + for dep in collection_depends[collection]: + calc_layer_priority(dep) + depprio = collection_priorities[dep] + if depprio > max_depprio: + max_depprio = depprio + max_depprio += 1 + parselog.debug(1, "Calculated priority of layer %s as %d", collection, max_depprio) + collection_priorities[collection] = max_depprio + + # Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities + for c in collection_list: + calc_layer_priority(c) + regex = self.data.getVar("BBFILE_PATTERN_%s" % c) + if regex == None: + parselog.error("BBFILE_PATTERN_%s not defined" % c) + errors = True + continue + elif regex == "": + parselog.debug(1, "BBFILE_PATTERN_%s is empty" % c) + errors = False + continue + else: + try: + cre = re.compile(regex) + except re.error: + parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex) + errors = True + continue + self.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c])) + if errors: + # We've already printed the actual error(s) + raise CollectionError("Errors during parsing layer configuration") + + def buildSetVars(self): + """ + Setup any variables needed before starting a build + """ + t = time.gmtime() + for mc in self.databuilder.mcdata: + ds = self.databuilder.mcdata[mc] + if not ds.getVar("BUILDNAME", False): + ds.setVar("BUILDNAME", "${DATE}${TIME}") + ds.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', t)) + ds.setVar("DATE", time.strftime('%Y%m%d', t)) + ds.setVar("TIME", time.strftime('%H%M%S', t)) + + def reset_mtime_caches(self): + """ + Reset mtime caches - this is particularly important when memory resident as something + which is cached is not unlikely to have changed since the last invocation (e.g. a + file associated with a recipe might have been modified by the user). + """ + build.reset_cache() + bb.fetch._checksum_cache.mtime_cache.clear() + siggen_cache = getattr(bb.parse.siggen, 'checksum_cache', None) + if siggen_cache: + bb.parse.siggen.checksum_cache.mtime_cache.clear() + + def matchFiles(self, bf): + """ + Find the .bb files which match the expression in 'buildfile'. + """ + if bf.startswith("/") or bf.startswith("../"): + bf = os.path.abspath(bf) + + self.collection = CookerCollectFiles(self.bbfile_config_priorities) + filelist, masked, searchdirs = self.collection.collect_bbfiles(self.data, self.data) + try: + os.stat(bf) + bf = os.path.abspath(bf) + return [bf] + except OSError: + regexp = re.compile(bf) + matches = [] + for f in filelist: + if regexp.search(f) and os.path.isfile(f): + matches.append(f) + return matches + + def matchFile(self, buildfile): + """ + Find the .bb file which matches the expression in 'buildfile'. + Raise an error if multiple files + """ + matches = self.matchFiles(buildfile) + if len(matches) != 1: + if matches: + msg = "Unable to match '%s' to a specific recipe file - %s matches found:" % (buildfile, len(matches)) + if matches: + for f in matches: + msg += "\n %s" % f + parselog.error(msg) + else: + parselog.error("Unable to find any recipe file matching '%s'" % buildfile) + raise NoSpecificMatch + return matches[0] + + def buildFile(self, buildfile, task): + """ + Build the file matching regexp buildfile + """ + bb.event.fire(bb.event.BuildInit(), self.data) + + # Too many people use -b because they think it's how you normally + # specify a target to be built, so show a warning + bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.") + + self.buildFileInternal(buildfile, task) + + def buildFileInternal(self, buildfile, task, fireevents=True, quietlog=False): + """ + Build the file matching regexp buildfile + """ + + # Parse the configuration here. We need to do it explicitly here since + # buildFile() doesn't use the cache + self.parseConfiguration() + + # If we are told to do the None task then query the default task + if (task == None): + task = self.configuration.cmd + if not task.startswith("do_"): + task = "do_%s" % task + + fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) + fn = self.matchFile(fn) + + self.buildSetVars() + self.reset_mtime_caches() + + bb_cache = bb.cache.Cache(self.databuilder, self.data_hash, self.caches_array) + + infos = bb_cache.parse(fn, self.collection.get_file_appends(fn)) + infos = dict(infos) + + fn = bb.cache.realfn2virtual(fn, cls, mc) + try: + info_array = infos[fn] + except KeyError: + bb.fatal("%s does not exist" % fn) + + if info_array[0].skipped: + bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason)) + + self.recipecaches[mc].add_from_recipeinfo(fn, info_array) + + # Tweak some variables + item = info_array[0].pn + self.recipecaches[mc].ignored_dependencies = set() + self.recipecaches[mc].bbfile_priority[fn] = 1 + self.configuration.limited_deps = True + + # Remove external dependencies + self.recipecaches[mc].task_deps[fn]['depends'] = {} + self.recipecaches[mc].deps[fn] = [] + self.recipecaches[mc].rundeps[fn] = defaultdict(list) + self.recipecaches[mc].runrecs[fn] = defaultdict(list) + + # Invalidate task for target if force mode active + if self.configuration.force: + logger.verbose("Invalidate task %s, %s", task, fn) + bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn) + + # Setup taskdata structure + taskdata = {} + taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort) + taskdata[mc].add_provider(self.databuilder.mcdata[mc], self.recipecaches[mc], item) + + if quietlog: + rqloglevel = bb.runqueue.logger.getEffectiveLevel() + bb.runqueue.logger.setLevel(logging.WARNING) + + buildname = self.databuilder.mcdata[mc].getVar("BUILDNAME") + if fireevents: + bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.databuilder.mcdata[mc]) + + # Execute the runqueue + runlist = [[mc, item, task, fn]] + + rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) + + def buildFileIdle(server, rq, abort): + + msg = None + interrupted = 0 + if abort or self.state == state.forceshutdown: + rq.finish_runqueue(True) + msg = "Forced shutdown" + interrupted = 2 + elif self.state == state.shutdown: + rq.finish_runqueue(False) + msg = "Stopped build" + interrupted = 1 + failures = 0 + try: + retval = rq.execute_runqueue() + except runqueue.TaskFailure as exc: + failures += len(exc.args) + retval = False + except SystemExit as exc: + self.command.finishAsyncCommand(str(exc)) + if quietlog: + bb.runqueue.logger.setLevel(rqloglevel) + return False + + if not retval: + if fireevents: + bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, item, failures, interrupted), self.databuilder.mcdata[mc]) + self.command.finishAsyncCommand(msg) + # We trashed self.recipecaches above + self.parsecache_valid = False + self.configuration.limited_deps = False + bb.parse.siggen.reset(self.data) + if quietlog: + bb.runqueue.logger.setLevel(rqloglevel) + return False + if retval is True: + return True + return retval + + self.configuration.server_register_idlecallback(buildFileIdle, rq) + + def buildTargets(self, targets, task): + """ + Attempt to build the targets specified + """ + + def buildTargetsIdle(server, rq, abort): + msg = None + interrupted = 0 + if abort or self.state == state.forceshutdown: + rq.finish_runqueue(True) + msg = "Forced shutdown" + interrupted = 2 + elif self.state == state.shutdown: + rq.finish_runqueue(False) + msg = "Stopped build" + interrupted = 1 + failures = 0 + try: + retval = rq.execute_runqueue() + except runqueue.TaskFailure as exc: + failures += len(exc.args) + retval = False + except SystemExit as exc: + self.command.finishAsyncCommand(str(exc)) + return False + + if not retval: + try: + for mc in self.multiconfigs: + bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runtaskentries), buildname, targets, failures, interrupted), self.databuilder.mcdata[mc]) + finally: + self.command.finishAsyncCommand(msg) + return False + if retval is True: + return True + return retval + + self.reset_mtime_caches() + self.buildSetVars() + + # If we are told to do the None task then query the default task + if (task == None): + task = self.configuration.cmd + + if not task.startswith("do_"): + task = "do_%s" % task + + packages = [target if ':' in target else '%s:%s' % (target, task) for target in targets] + + bb.event.fire(bb.event.BuildInit(packages), self.data) + + taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort) + + buildname = self.data.getVar("BUILDNAME", False) + + # make targets to always look as :do_ + ntargets = [] + for target in runlist: + if target[0]: + ntargets.append("multiconfig:%s:%s:%s" % (target[0], target[1], target[2])) + ntargets.append("%s:%s" % (target[1], target[2])) + + for mc in self.multiconfigs: + bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.databuilder.mcdata[mc]) + + rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) + if 'universe' in targets: + rq.rqdata.warn_multi_bb = True + + self.configuration.server_register_idlecallback(buildTargetsIdle, rq) + + + def getAllKeysWithFlags(self, flaglist): + dump = {} + for k in self.data.keys(): + try: + expand = True + flags = self.data.getVarFlags(k) + if flags and "func" in flags and "python" in flags: + expand = False + v = self.data.getVar(k, expand) + if not k.startswith("__") and not isinstance(v, bb.data_smart.DataSmart): + dump[k] = { + 'v' : str(v) , + 'history' : self.data.varhistory.variable(k), + } + for d in flaglist: + if flags and d in flags: + dump[k][d] = flags[d] + else: + dump[k][d] = None + except Exception as e: + print(e) + return dump + + + def updateCacheSync(self): + if self.state == state.running: + return + + # reload files for which we got notifications + for p in self.inotify_modified_files: + bb.parse.update_cache(p) + if p in bb.parse.BBHandler.cached_statements: + del bb.parse.BBHandler.cached_statements[p] + self.inotify_modified_files = [] + + if not self.baseconfig_valid: + logger.debug(1, "Reloading base configuration data") + self.initConfigurationData() + self.handlePRServ() + + # This is called for all async commands when self.state != running + def updateCache(self): + if self.state == state.running: + return + + if self.state in (state.shutdown, state.forceshutdown, state.error): + if hasattr(self.parser, 'shutdown'): + self.parser.shutdown(clean=False, force = True) + raise bb.BBHandledException() + + if self.state != state.parsing: + self.updateCacheSync() + + if self.state != state.parsing and not self.parsecache_valid: + bb.parse.siggen.reset(self.data) + self.parseConfiguration () + if CookerFeatures.SEND_SANITYEVENTS in self.featureset: + for mc in self.multiconfigs: + bb.event.fire(bb.event.SanityCheck(False), self.databuilder.mcdata[mc]) + + for mc in self.multiconfigs: + ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED") or "" + self.recipecaches[mc].ignored_dependencies = set(ignore.split()) + + for dep in self.configuration.extra_assume_provided: + self.recipecaches[mc].ignored_dependencies.add(dep) + + self.collection = CookerCollectFiles(self.bbfile_config_priorities) + (filelist, masked, searchdirs) = self.collection.collect_bbfiles(self.data, self.data) + + # Add inotify watches for directories searched for bb/bbappend files + for dirent in searchdirs: + self.add_filewatch([[dirent]], dirs=True) + + self.parser = CookerParser(self, filelist, masked) + self.parsecache_valid = True + + self.state = state.parsing + + if not self.parser.parse_next(): + collectlog.debug(1, "parsing complete") + if self.parser.error: + raise bb.BBHandledException() + self.show_appends_with_no_recipes() + self.handlePrefProviders() + for mc in self.multiconfigs: + self.recipecaches[mc].bbfile_priority = self.collection.collection_priorities(self.recipecaches[mc].pkg_fn, self.data) + self.state = state.running + + # Send an event listing all stamps reachable after parsing + # which the metadata may use to clean up stale data + for mc in self.multiconfigs: + event = bb.event.ReachableStamps(self.recipecaches[mc].stamp) + bb.event.fire(event, self.databuilder.mcdata[mc]) + return None + + return True + + def checkPackages(self, pkgs_to_build, task=None): + + # Return a copy, don't modify the original + pkgs_to_build = pkgs_to_build[:] + + if len(pkgs_to_build) == 0: + raise NothingToBuild + + ignore = (self.data.getVar("ASSUME_PROVIDED") or "").split() + for pkg in pkgs_to_build: + if pkg in ignore: + parselog.warning("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg) + + if 'world' in pkgs_to_build: + pkgs_to_build.remove('world') + for mc in self.multiconfigs: + bb.providers.buildWorldTargetList(self.recipecaches[mc], task) + for t in self.recipecaches[mc].world_target: + if mc: + t = "multiconfig:" + mc + ":" + t + pkgs_to_build.append(t) + + if 'universe' in pkgs_to_build: + parselog.warning("The \"universe\" target is only intended for testing and may produce errors.") + parselog.debug(1, "collating packages for \"universe\"") + pkgs_to_build.remove('universe') + for mc in self.multiconfigs: + for t in self.recipecaches[mc].universe_target: + if task: + foundtask = False + for provider_fn in self.recipecaches[mc].providers[t]: + if task in self.recipecaches[mc].task_deps[provider_fn]['tasks']: + foundtask = True + break + if not foundtask: + bb.debug(1, "Skipping %s for universe tasks as task %s doesn't exist" % (t, task)) + continue + if mc: + t = "multiconfig:" + mc + ":" + t + pkgs_to_build.append(t) + + return pkgs_to_build + + def pre_serve(self): + # We now are in our own process so we can call this here. + # PRServ exits if its parent process exits + self.handlePRServ() + return + + def post_serve(self): + prserv.serv.auto_shutdown() + bb.event.fire(CookerExit(), self.data) + + + def shutdown(self, force = False): + if force: + self.state = state.forceshutdown + else: + self.state = state.shutdown + + if self.parser: + self.parser.shutdown(clean=not force, force=force) + + def finishcommand(self): + self.state = state.initial + + def reset(self): + self.initConfigurationData() + + def clientComplete(self): + """Called when the client is done using the server""" + self.finishcommand() + self.extraconfigdata = {} + self.command.reset() + self.databuilder.reset() + self.data = self.databuilder.data + + +class CookerExit(bb.event.Event): + """ + Notify clients of the Cooker shutdown + """ + + def __init__(self): + bb.event.Event.__init__(self) + + +class CookerCollectFiles(object): + def __init__(self, priorities): + self.bbappends = [] + self.bbfile_config_priorities = priorities + + def calc_bbfile_priority( self, filename, matched = None ): + for _, _, regex, pri in self.bbfile_config_priorities: + if regex.match(filename): + if matched != None: + if not regex in matched: + matched.add(regex) + return pri + return 0 + + def get_bbfiles(self): + """Get list of default .bb files by reading out the current directory""" + path = os.getcwd() + contents = os.listdir(path) + bbfiles = [] + for f in contents: + if f.endswith(".bb"): + bbfiles.append(os.path.abspath(os.path.join(path, f))) + return bbfiles + + def find_bbfiles(self, path): + """Find all the .bb and .bbappend files in a directory""" + found = [] + for dir, dirs, files in os.walk(path): + for ignored in ('SCCS', 'CVS', '.svn'): + if ignored in dirs: + dirs.remove(ignored) + found += [os.path.join(dir, f) for f in files if (f.endswith(['.bb', '.bbappend']))] + + return found + + def collect_bbfiles(self, config, eventdata): + """Collect all available .bb build files""" + masked = 0 + + collectlog.debug(1, "collecting .bb files") + + files = (config.getVar( "BBFILES") or "").split() + config.setVar("BBFILES", " ".join(files)) + + # Sort files by priority + files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem) ) + + if not len(files): + files = self.get_bbfiles() + + if not len(files): + collectlog.error("no recipe files to build, check your BBPATH and BBFILES?") + bb.event.fire(CookerExit(), eventdata) + + # We need to track where we look so that we can add inotify watches. There + # is no nice way to do this, this is horrid. We intercept the os.listdir() + # (or os.scandir() for python 3.6+) calls while we run glob(). + origlistdir = os.listdir + if hasattr(os, 'scandir'): + origscandir = os.scandir + searchdirs = [] + + def ourlistdir(d): + searchdirs.append(d) + return origlistdir(d) + + def ourscandir(d): + searchdirs.append(d) + return origscandir(d) + + os.listdir = ourlistdir + if hasattr(os, 'scandir'): + os.scandir = ourscandir + try: + # Can't use set here as order is important + newfiles = [] + for f in files: + if os.path.isdir(f): + dirfiles = self.find_bbfiles(f) + for g in dirfiles: + if g not in newfiles: + newfiles.append(g) + else: + globbed = glob.glob(f) + if not globbed and os.path.exists(f): + globbed = [f] + # glob gives files in order on disk. Sort to be deterministic. + for g in sorted(globbed): + if g not in newfiles: + newfiles.append(g) + finally: + os.listdir = origlistdir + if hasattr(os, 'scandir'): + os.scandir = origscandir + + bbmask = config.getVar('BBMASK') + + if bbmask: + # First validate the individual regular expressions and ignore any + # that do not compile + bbmasks = [] + for mask in bbmask.split(): + # When constructing an older style single regex, it's possible for BBMASK + # to end up beginning with '|', which matches and masks _everything_. + if mask.startswith("|"): + collectlog.warn("BBMASK contains regular expression beginning with '|', fixing: %s" % mask) + mask = mask[1:] + try: + re.compile(mask) + bbmasks.append(mask) + except sre_constants.error: + collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask) + + # Then validate the combined regular expressions. This should never + # fail, but better safe than sorry... + bbmask = "|".join(bbmasks) + try: + bbmask_compiled = re.compile(bbmask) + except sre_constants.error: + collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask) + bbmask = None + + bbfiles = [] + bbappend = [] + for f in newfiles: + if bbmask and bbmask_compiled.search(f): + collectlog.debug(1, "skipping masked file %s", f) + masked += 1 + continue + if f.endswith('.bb'): + bbfiles.append(f) + elif f.endswith('.bbappend'): + bbappend.append(f) + else: + collectlog.debug(1, "skipping %s: unknown file extension", f) + + # Build a list of .bbappend files for each .bb file + for f in bbappend: + base = os.path.basename(f).replace('.bbappend', '.bb') + self.bbappends.append((base, f)) + + # Find overlayed recipes + # bbfiles will be in priority order which makes this easy + bbfile_seen = dict() + self.overlayed = defaultdict(list) + for f in reversed(bbfiles): + base = os.path.basename(f) + if base not in bbfile_seen: + bbfile_seen[base] = f + else: + topfile = bbfile_seen[base] + self.overlayed[topfile].append(f) + + return (bbfiles, masked, searchdirs) + + def get_file_appends(self, fn): + """ + Returns a list of .bbappend files to apply to fn + """ + filelist = [] + f = os.path.basename(fn) + for b in self.bbappends: + (bbappend, filename) = b + if (bbappend == f) or ('%' in bbappend and bbappend.startswith(f[:bbappend.index('%')])): + filelist.append(filename) + return filelist + + def collection_priorities(self, pkgfns, d): + + priorities = {} + + # Calculate priorities for each file + matched = set() + for p in pkgfns: + realfn, cls, mc = bb.cache.virtualfn2realfn(p) + priorities[p] = self.calc_bbfile_priority(realfn, matched) + + unmatched = set() + for _, _, regex, pri in self.bbfile_config_priorities: + if not regex in matched: + unmatched.add(regex) + + # Don't show the warning if the BBFILE_PATTERN did match .bbappend files + def find_bbappend_match(regex): + for b in self.bbappends: + (bbfile, append) = b + if regex.match(append): + # If the bbappend is matched by already "matched set", return False + for matched_regex in matched: + if matched_regex.match(append): + return False + return True + return False + + for unmatch in unmatched.copy(): + if find_bbappend_match(unmatch): + unmatched.remove(unmatch) + + for collection, pattern, regex, _ in self.bbfile_config_priorities: + if regex in unmatched: + if d.getVar('BBFILE_PATTERN_IGNORE_EMPTY_%s' % collection) != '1': + collectlog.warning("No bb files matched BBFILE_PATTERN_%s '%s'" % (collection, pattern)) + + return priorities + +class ParsingFailure(Exception): + def __init__(self, realexception, recipe): + self.realexception = realexception + self.recipe = recipe + Exception.__init__(self, realexception, recipe) + +class Feeder(multiprocessing.Process): + def __init__(self, jobs, to_parsers, quit): + self.quit = quit + self.jobs = jobs + self.to_parsers = to_parsers + multiprocessing.Process.__init__(self) + + def run(self): + while True: + try: + quit = self.quit.get_nowait() + except queue.Empty: + pass + else: + if quit == 'cancel': + self.to_parsers.cancel_join_thread() + break + + try: + job = self.jobs.pop() + except IndexError: + break + + try: + self.to_parsers.put(job, timeout=0.5) + except queue.Full: + self.jobs.insert(0, job) + continue + +class Parser(multiprocessing.Process): + def __init__(self, jobs, results, quit, init, profile): + self.jobs = jobs + self.results = results + self.quit = quit + self.init = init + multiprocessing.Process.__init__(self) + self.context = bb.utils.get_context().copy() + self.handlers = bb.event.get_class_handlers().copy() + self.profile = profile + + def run(self): + + if not self.profile: + self.realrun() + return + + try: + import cProfile as profile + except: + import profile + prof = profile.Profile() + try: + profile.Profile.runcall(prof, self.realrun) + finally: + logfile = "profile-parse-%s.log" % multiprocessing.current_process().name + prof.dump_stats(logfile) + + def realrun(self): + if self.init: + self.init() + + pending = [] + while True: + try: + self.quit.get_nowait() + except queue.Empty: + pass + else: + self.results.cancel_join_thread() + break + + if pending: + result = pending.pop() + else: + try: + job = self.jobs.get(timeout=0.25) + except queue.Empty: + continue + + if job is None: + break + result = self.parse(*job) + + try: + self.results.put(result, timeout=0.25) + except queue.Full: + pending.append(result) + + def parse(self, filename, appends): + try: + # Record the filename we're parsing into any events generated + def parse_filter(self, record): + record.taskpid = bb.event.worker_pid + record.fn = filename + return True + + # Reset our environment and handlers to the original settings + bb.utils.set_context(self.context.copy()) + bb.event.set_class_handlers(self.handlers.copy()) + bb.event.LogHandler.filter = parse_filter + + return True, self.bb_cache.parse(filename, appends) + except Exception as exc: + tb = sys.exc_info()[2] + exc.recipe = filename + exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3)) + return True, exc + # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown + # and for example a worker thread doesn't just exit on its own in response to + # a SystemExit event for example. + except BaseException as exc: + return True, ParsingFailure(exc, filename) + +class CookerParser(object): + def __init__(self, cooker, filelist, masked): + self.filelist = filelist + self.cooker = cooker + self.cfgdata = cooker.data + self.cfghash = cooker.data_hash + self.cfgbuilder = cooker.databuilder + + # Accounting statistics + self.parsed = 0 + self.cached = 0 + self.error = 0 + self.masked = masked + + self.skipped = 0 + self.virtuals = 0 + self.total = len(filelist) + + self.current = 0 + self.process_names = [] + + self.bb_cache = bb.cache.Cache(self.cfgbuilder, self.cfghash, cooker.caches_array) + self.fromcache = [] + self.willparse = [] + for filename in self.filelist: + appends = self.cooker.collection.get_file_appends(filename) + if not self.bb_cache.cacheValid(filename, appends): + self.willparse.append((filename, appends)) + else: + self.fromcache.append((filename, appends)) + self.toparse = self.total - len(self.fromcache) + self.progress_chunk = int(max(self.toparse / 100, 1)) + + self.num_processes = min(int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS") or + multiprocessing.cpu_count()), len(self.willparse)) + + self.start() + self.haveshutdown = False + + def start(self): + self.results = self.load_cached() + self.processes = [] + if self.toparse: + bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) + def init(): + Parser.bb_cache = self.bb_cache + bb.utils.set_process_name(multiprocessing.current_process().name) + multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1) + multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1) + + self.feeder_quit = multiprocessing.Queue(maxsize=1) + self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes) + self.jobs = multiprocessing.Queue(maxsize=self.num_processes) + self.result_queue = multiprocessing.Queue() + self.feeder = Feeder(self.willparse, self.jobs, self.feeder_quit) + self.feeder.start() + for i in range(0, self.num_processes): + parser = Parser(self.jobs, self.result_queue, self.parser_quit, init, self.cooker.configuration.profile) + parser.start() + self.process_names.append(parser.name) + self.processes.append(parser) + + self.results = itertools.chain(self.results, self.parse_generator()) + + def shutdown(self, clean=True, force=False): + if not self.toparse: + return + if self.haveshutdown: + return + self.haveshutdown = True + + if clean: + event = bb.event.ParseCompleted(self.cached, self.parsed, + self.skipped, self.masked, + self.virtuals, self.error, + self.total) + + bb.event.fire(event, self.cfgdata) + self.feeder_quit.put(None) + for process in self.processes: + self.parser_quit.put(None) + else: + self.feeder_quit.put('cancel') + + self.parser_quit.cancel_join_thread() + for process in self.processes: + self.parser_quit.put(None) + + self.jobs.cancel_join_thread() + + for process in self.processes: + if force: + process.join(.1) + process.terminate() + else: + process.join() + self.feeder.join() + + sync = threading.Thread(target=self.bb_cache.sync) + sync.start() + multiprocessing.util.Finalize(None, sync.join, exitpriority=-100) + bb.codeparser.parser_cache_savemerge() + bb.fetch.fetcher_parse_done() + if self.cooker.configuration.profile: + profiles = [] + for i in self.process_names: + logfile = "profile-parse-%s.log" % i + if os.path.exists(logfile): + profiles.append(logfile) + + pout = "profile-parse.log.processed" + bb.utils.process_profilelog(profiles, pout = pout) + print("Processed parsing statistics saved to %s" % (pout)) + + def load_cached(self): + for filename, appends in self.fromcache: + cached, infos = self.bb_cache.load(filename, appends) + yield not cached, infos + + def parse_generator(self): + while True: + if self.parsed >= self.toparse: + break + + try: + result = self.result_queue.get(timeout=0.25) + except queue.Empty: + pass + else: + value = result[1] + if isinstance(value, BaseException): + raise value + else: + yield result + + def parse_next(self): + result = [] + parsed = None + try: + parsed, result = next(self.results) + except StopIteration: + self.shutdown() + return False + except bb.BBHandledException as exc: + self.error += 1 + logger.error('Failed to parse recipe: %s' % exc.recipe) + self.shutdown(clean=False) + return False + except ParsingFailure as exc: + self.error += 1 + logger.error('Unable to parse %s: %s' % + (exc.recipe, bb.exceptions.to_string(exc.realexception))) + self.shutdown(clean=False) + return False + except bb.parse.ParseError as exc: + self.error += 1 + logger.error(str(exc)) + self.shutdown(clean=False) + return False + except bb.data_smart.ExpansionError as exc: + self.error += 1 + bbdir = os.path.dirname(__file__) + os.sep + etype, value, _ = sys.exc_info() + tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback)) + logger.error('ExpansionError during parsing %s', value.recipe, + exc_info=(etype, value, tb)) + self.shutdown(clean=False) + return False + except Exception as exc: + self.error += 1 + etype, value, tb = sys.exc_info() + if hasattr(value, "recipe"): + logger.error('Unable to parse %s' % value.recipe, + exc_info=(etype, value, exc.traceback)) + else: + # Most likely, an exception occurred during raising an exception + import traceback + logger.error('Exception during parse: %s' % traceback.format_exc()) + self.shutdown(clean=False) + return False + + self.current += 1 + self.virtuals += len(result) + if parsed: + self.parsed += 1 + if self.parsed % self.progress_chunk == 0: + bb.event.fire(bb.event.ParseProgress(self.parsed, self.toparse), + self.cfgdata) + else: + self.cached += 1 + + for virtualfn, info_array in result: + if info_array[0].skipped: + self.skipped += 1 + self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) + (fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn) + self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecaches[mc], + parsed=parsed, watcher = self.cooker.add_filewatch) + return True + + def reparse(self, filename): + infos = self.bb_cache.parse(filename, self.cooker.collection.get_file_appends(filename)) + for vfn, info_array in infos: + (fn, cls, mc) = bb.cache.virtualfn2realfn(vfn) + self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array) diff --git a/poky/bitbake/lib/bb/cookerdata.py b/poky/bitbake/lib/bb/cookerdata.py new file mode 100644 index 0000000000..5df66e6173 --- /dev/null +++ b/poky/bitbake/lib/bb/cookerdata.py @@ -0,0 +1,434 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import logging +import os +import re +import sys +from functools import wraps +import bb +from bb import data +import bb.parse + +logger = logging.getLogger("BitBake") +parselog = logging.getLogger("BitBake.Parsing") + +class ConfigParameters(object): + def __init__(self, argv=sys.argv): + self.options, targets = self.parseCommandLine(argv) + self.environment = self.parseEnvironment() + + self.options.pkgs_to_build = targets or [] + + for key, val in self.options.__dict__.items(): + setattr(self, key, val) + + def parseCommandLine(self, argv=sys.argv): + raise Exception("Caller must implement commandline option parsing") + + def parseEnvironment(self): + return os.environ.copy() + + def updateFromServer(self, server): + if not self.options.cmd: + defaulttask, error = server.runCommand(["getVariable", "BB_DEFAULT_TASK"]) + if error: + raise Exception("Unable to get the value of BB_DEFAULT_TASK from the server: %s" % error) + self.options.cmd = defaulttask or "build" + _, error = server.runCommand(["setConfig", "cmd", self.options.cmd]) + if error: + raise Exception("Unable to set configuration option 'cmd' on the server: %s" % error) + + if not self.options.pkgs_to_build: + bbpkgs, error = server.runCommand(["getVariable", "BBTARGETS"]) + if error: + raise Exception("Unable to get the value of BBTARGETS from the server: %s" % error) + if bbpkgs: + self.options.pkgs_to_build.extend(bbpkgs.split()) + + def updateToServer(self, server, environment): + options = {} + for o in ["abort", "force", "invalidate_stamp", + "verbose", "debug", "dry_run", "dump_signatures", + "debug_domains", "extra_assume_provided", "profile", + "prefile", "postfile", "server_timeout"]: + options[o] = getattr(self.options, o) + + ret, error = server.runCommand(["updateConfig", options, environment, sys.argv]) + if error: + raise Exception("Unable to update the server configuration with local parameters: %s" % error) + + def parseActions(self): + # Parse any commandline into actions + action = {'action':None, 'msg':None} + if self.options.show_environment: + if 'world' in self.options.pkgs_to_build: + action['msg'] = "'world' is not a valid target for --environment." + elif 'universe' in self.options.pkgs_to_build: + action['msg'] = "'universe' is not a valid target for --environment." + elif len(self.options.pkgs_to_build) > 1: + action['msg'] = "Only one target can be used with the --environment option." + elif self.options.buildfile and len(self.options.pkgs_to_build) > 0: + action['msg'] = "No target should be used with the --environment and --buildfile options." + elif len(self.options.pkgs_to_build) > 0: + action['action'] = ["showEnvironmentTarget", self.options.pkgs_to_build] + else: + action['action'] = ["showEnvironment", self.options.buildfile] + elif self.options.buildfile is not None: + action['action'] = ["buildFile", self.options.buildfile, self.options.cmd] + elif self.options.revisions_changed: + action['action'] = ["compareRevisions"] + elif self.options.show_versions: + action['action'] = ["showVersions"] + elif self.options.parse_only: + action['action'] = ["parseFiles"] + elif self.options.dot_graph: + if self.options.pkgs_to_build: + action['action'] = ["generateDotGraph", self.options.pkgs_to_build, self.options.cmd] + else: + action['msg'] = "Please specify a package name for dependency graph generation." + else: + if self.options.pkgs_to_build: + action['action'] = ["buildTargets", self.options.pkgs_to_build, self.options.cmd] + else: + #action['msg'] = "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information." + action = None + self.options.initialaction = action + return action + +class CookerConfiguration(object): + """ + Manages build options and configurations for one run + """ + + def __init__(self): + self.debug_domains = [] + self.extra_assume_provided = [] + self.prefile = [] + self.postfile = [] + self.debug = 0 + self.cmd = None + self.abort = True + self.force = False + self.profile = False + self.nosetscene = False + self.setsceneonly = False + self.invalidate_stamp = False + self.dump_signatures = [] + self.dry_run = False + self.tracking = False + self.xmlrpcinterface = [] + self.server_timeout = None + self.writeeventlog = False + self.server_only = False + self.limited_deps = False + self.runall = [] + self.runonly = [] + + self.env = {} + + def setConfigParameters(self, parameters): + for key in self.__dict__.keys(): + if key in parameters.options.__dict__: + setattr(self, key, parameters.options.__dict__[key]) + self.env = parameters.environment.copy() + + def setServerRegIdleCallback(self, srcb): + self.server_register_idlecallback = srcb + + def __getstate__(self): + state = {} + for key in self.__dict__.keys(): + if key == "server_register_idlecallback": + state[key] = None + else: + state[key] = getattr(self, key) + return state + + def __setstate__(self,state): + for k in state: + setattr(self, k, state[k]) + + +def catch_parse_error(func): + """Exception handling bits for our parsing""" + @wraps(func) + def wrapped(fn, *args): + try: + return func(fn, *args) + except IOError as exc: + import traceback + parselog.critical(traceback.format_exc()) + parselog.critical("Unable to parse %s: %s" % (fn, exc)) + sys.exit(1) + except bb.data_smart.ExpansionError as exc: + import traceback + + bbdir = os.path.dirname(__file__) + os.sep + exc_class, exc, tb = sys.exc_info() + for tb in iter(lambda: tb.tb_next, None): + # Skip frames in bitbake itself, we only want the metadata + fn, _, _, _ = traceback.extract_tb(tb, 1)[0] + if not fn.startswith(bbdir): + break + parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb)) + sys.exit(1) + except bb.parse.ParseError as exc: + parselog.critical(str(exc)) + sys.exit(1) + return wrapped + +@catch_parse_error +def parse_config_file(fn, data, include=True): + return bb.parse.handle(fn, data, include) + +@catch_parse_error +def _inherit(bbclass, data): + bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data) + return data + +def findConfigFile(configfile, data): + search = [] + bbpath = data.getVar("BBPATH") + if bbpath: + for i in bbpath.split(":"): + search.append(os.path.join(i, "conf", configfile)) + path = os.getcwd() + while path != "/": + search.append(os.path.join(path, "conf", configfile)) + path, _ = os.path.split(path) + + for i in search: + if os.path.exists(i): + return i + + return None + +# +# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working +# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH. +# + +def findTopdir(): + d = bb.data.init() + bbpath = None + if 'BBPATH' in os.environ: + bbpath = os.environ['BBPATH'] + d.setVar('BBPATH', bbpath) + + layerconf = findConfigFile("bblayers.conf", d) + if layerconf: + return os.path.dirname(os.path.dirname(layerconf)) + if bbpath: + bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf") + if bitbakeconf: + return os.path.dirname(os.path.dirname(bitbakeconf)) + return None + +class CookerDataBuilder(object): + + def __init__(self, cookercfg, worker = False): + + self.prefiles = cookercfg.prefile + self.postfiles = cookercfg.postfile + self.tracking = cookercfg.tracking + + bb.utils.set_context(bb.utils.clean_context()) + bb.event.set_class_handlers(bb.event.clean_class_handlers()) + self.basedata = bb.data.init() + if self.tracking: + self.basedata.enableTracking() + + # Keep a datastore of the initial environment variables and their + # values from when BitBake was launched to enable child processes + # to use environment variables which have been cleaned from the + # BitBake processes env + self.savedenv = bb.data.init() + for k in cookercfg.env: + self.savedenv.setVar(k, cookercfg.env[k]) + + filtered_keys = bb.utils.approved_variables() + bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys) + self.basedata.setVar("BB_ORIGENV", self.savedenv) + + if worker: + self.basedata.setVar("BB_WORKERCONTEXT", "1") + + self.data = self.basedata + self.mcdata = {} + + def parseBaseConfiguration(self): + try: + bb.parse.init_parser(self.basedata) + self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles) + + if self.data.getVar("BB_WORKERCONTEXT", False) is None: + bb.fetch.fetcher_init(self.data) + bb.codeparser.parser_cache_init(self.data) + + bb.event.fire(bb.event.ConfigParsed(), self.data) + + reparse_cnt = 0 + while self.data.getVar("BB_INVALIDCONF", False) is True: + if reparse_cnt > 20: + logger.error("Configuration has been re-parsed over 20 times, " + "breaking out of the loop...") + raise Exception("Too deep config re-parse loop. Check locations where " + "BB_INVALIDCONF is being set (ConfigParsed event handlers)") + self.data.setVar("BB_INVALIDCONF", False) + self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles) + reparse_cnt += 1 + bb.event.fire(bb.event.ConfigParsed(), self.data) + + bb.parse.init_parser(self.data) + self.data_hash = self.data.get_hash() + self.mcdata[''] = self.data + + multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split() + for config in multiconfig: + mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config) + bb.event.fire(bb.event.ConfigParsed(), mcdata) + self.mcdata[config] = mcdata + if multiconfig: + bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data) + + except (SyntaxError, bb.BBHandledException): + raise bb.BBHandledException + except bb.data_smart.ExpansionError as e: + logger.error(str(e)) + raise bb.BBHandledException + except Exception: + logger.exception("Error parsing configuration files") + raise bb.BBHandledException + + # Create a copy so we can reset at a later date when UIs disconnect + self.origdata = self.data + self.data = bb.data.createCopy(self.origdata) + self.mcdata[''] = self.data + + def reset(self): + # We may not have run parseBaseConfiguration() yet + if not hasattr(self, 'origdata'): + return + self.data = bb.data.createCopy(self.origdata) + self.mcdata[''] = self.data + + def _findLayerConf(self, data): + return findConfigFile("bblayers.conf", data) + + def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"): + data = bb.data.createCopy(self.basedata) + data.setVar("BB_CURRENT_MC", mc) + + # Parse files for loading *before* bitbake.conf and any includes + for f in prefiles: + data = parse_config_file(f, data) + + layerconf = self._findLayerConf(data) + if layerconf: + parselog.debug(2, "Found bblayers.conf (%s)", layerconf) + # By definition bblayers.conf is in conf/ of TOPDIR. + # We may have been called with cwd somewhere else so reset TOPDIR + data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf))) + data = parse_config_file(layerconf, data) + + layers = (data.getVar('BBLAYERS') or "").split() + + data = bb.data.createCopy(data) + approved = bb.utils.approved_variables() + for layer in layers: + if not os.path.isdir(layer): + parselog.critical("Layer directory '%s' does not exist! " + "Please check BBLAYERS in %s" % (layer, layerconf)) + sys.exit(1) + parselog.debug(2, "Adding layer %s", layer) + if 'HOME' in approved and '~' in layer: + layer = os.path.expanduser(layer) + if layer.endswith('/'): + layer = layer.rstrip('/') + data.setVar('LAYERDIR', layer) + data.setVar('LAYERDIR_RE', re.escape(layer)) + data = parse_config_file(os.path.join(layer, "conf", "layer.conf"), data) + data.expandVarref('LAYERDIR') + data.expandVarref('LAYERDIR_RE') + + data.delVar('LAYERDIR_RE') + data.delVar('LAYERDIR') + + bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split() + collections = (data.getVar('BBFILE_COLLECTIONS') or "").split() + invalid = [] + for entry in bbfiles_dynamic: + parts = entry.split(":", 1) + if len(parts) != 2: + invalid.append(entry) + continue + l, f = parts + if l in collections: + data.appendVar("BBFILES", " " + f) + if invalid: + bb.fatal("BBFILES_DYNAMIC entries must be of the form :, not:\n %s" % "\n ".join(invalid)) + + layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split()) + for c in collections: + compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split()) + if compat and not (compat & layerseries): + bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)" + % (c, " ".join(layerseries), " ".join(compat))) + elif not compat and not data.getVar("BB_WORKERCONTEXT"): + bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c)) + + if not data.getVar("BBPATH"): + msg = "The BBPATH variable is not set" + if not layerconf: + msg += (" and bitbake did not find a conf/bblayers.conf file in" + " the expected location.\nMaybe you accidentally" + " invoked bitbake from the wrong directory?") + raise SystemExit(msg) + + data = parse_config_file(os.path.join("conf", "bitbake.conf"), data) + + # Parse files for loading *after* bitbake.conf and any includes + for p in postfiles: + data = parse_config_file(p, data) + + # Handle any INHERITs and inherit the base class + bbclasses = ["base"] + (data.getVar('INHERIT') or "").split() + for bbclass in bbclasses: + data = _inherit(bbclass, data) + + # Nomally we only register event handlers at the end of parsing .bb files + # We register any handlers we've found so far here... + for var in data.getVar('__BBHANDLERS', False) or []: + handlerfn = data.getVarFlag(var, "filename", False) + if not handlerfn: + parselog.critical("Undefined event handler function '%s'" % var) + sys.exit(1) + handlerln = int(data.getVarFlag(var, "lineno", False)) + bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln) + + data.setVar('BBINCLUDED',bb.parse.get_file_depends(data)) + + return data + diff --git a/poky/bitbake/lib/bb/daemonize.py b/poky/bitbake/lib/bb/daemonize.py new file mode 100644 index 0000000000..8300d1d0f0 --- /dev/null +++ b/poky/bitbake/lib/bb/daemonize.py @@ -0,0 +1,82 @@ +""" +Python Daemonizing helper + +Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified +to allow a function to be daemonized and return for bitbake use by Richard Purdie +""" + +import os +import sys +import io +import traceback + +def createDaemon(function, logfile): + """ + Detach a process from the controlling terminal and run it in the + background as a daemon, returning control to the caller. + """ + + try: + # Fork a child process so the parent can exit. This returns control to + # the command-line or shell. It also guarantees that the child will not + # be a process group leader, since the child receives a new process ID + # and inherits the parent's process group ID. This step is required + # to insure that the next call to os.setsid is successful. + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): # The first child. + # To become the session leader of this new session and the process group + # leader of the new process group, we call os.setsid(). The process is + # also guaranteed not to have a controlling terminal. + os.setsid() + try: + # Fork a second child and exit immediately to prevent zombies. This + # causes the second child process to be orphaned, making the init + # process responsible for its cleanup. And, since the first child is + # a session leader without a controlling terminal, it's possible for + # it to acquire one by opening a terminal in the future (System V- + # based systems). This second fork guarantees that the child is no + # longer a session leader, preventing the daemon from ever acquiring + # a controlling terminal. + pid = os.fork() # Fork a second child. + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid != 0): + # Parent (the first child) of the second child. + # exit() or _exit()? + # _exit is like exit(), but it doesn't call any functions registered + # with atexit (and on_exit) or any registered signal handlers. It also + # closes any open file descriptors. Using exit() may cause all stdio + # streams to be flushed twice and any temporary files may be unexpectedly + # removed. It's therefore recommended that child branches of a fork() + # and the parent branch(es) of a daemon use _exit(). + os._exit(0) + else: + os.waitpid(pid, 0) + return + + # The second child. + + # Replace standard fds with our own + si = open('/dev/null', 'r') + os.dup2(si.fileno(), sys.stdin.fileno()) + + try: + so = open(logfile, 'a+') + se = so + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + except io.UnsupportedOperation: + sys.stdout = open(logfile, 'a+') + sys.stderr = sys.stdout + + try: + function() + except Exception as e: + traceback.print_exc() + finally: + bb.event.print_ui_queue() + os._exit(0) diff --git a/poky/bitbake/lib/bb/data.py b/poky/bitbake/lib/bb/data.py new file mode 100644 index 0000000000..80a7879cb6 --- /dev/null +++ b/poky/bitbake/lib/bb/data.py @@ -0,0 +1,403 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Data' implementations + +Functions for interacting with the data structure used by the +BitBake build tools. + +The expandKeys and update_data are the most expensive +operations. At night the cookie monster came by and +suggested 'give me cookies on setting the variables and +things will work out'. Taking this suggestion into account +applying the skills from the not yet passed 'Entwurf und +Analyse von Algorithmen' lecture and the cookie +monster seems to be right. We will track setVar more carefully +to have faster update_data and expandKeys operations. + +This is a trade-off between speed and memory again but +the speed is more critical here. +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2005 Holger Hans Peter Freyther +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import sys, os, re +if sys.argv[0][-5:] == "pydoc": + path = os.path.dirname(os.path.dirname(sys.argv[1])) +else: + path = os.path.dirname(os.path.dirname(sys.argv[0])) +sys.path.insert(0, path) +from itertools import groupby + +from bb import data_smart +from bb import codeparser +import bb + +logger = data_smart.logger +_dict_type = data_smart.DataSmart + +def init(): + """Return a new object representing the Bitbake data""" + return _dict_type() + +def init_db(parent = None): + """Return a new object representing the Bitbake data, + optionally based on an existing object""" + if parent is not None: + return parent.createCopy() + else: + return _dict_type() + +def createCopy(source): + """Link the source set to the destination + If one does not find the value in the destination set, + search will go on to the source set to get the value. + Value from source are copy-on-write. i.e. any try to + modify one of them will end up putting the modified value + in the destination set. + """ + return source.createCopy() + +def initVar(var, d): + """Non-destructive var init for data structure""" + d.initVar(var) + +def keys(d): + """Return a list of keys in d""" + return d.keys() + + +__expand_var_regexp__ = re.compile(r"\${[^{}]+}") +__expand_python_regexp__ = re.compile(r"\${@.+?}") + +def expand(s, d, varname = None): + """Variable expansion using the data store""" + return d.expand(s, varname) + +def expandKeys(alterdata, readdata = None): + if readdata == None: + readdata = alterdata + + todolist = {} + for key in alterdata: + if not '${' in key: + continue + + ekey = expand(key, readdata) + if key == ekey: + continue + todolist[key] = ekey + + # These two for loops are split for performance to maximise the + # usefulness of the expand cache + for key in sorted(todolist): + ekey = todolist[key] + newval = alterdata.getVar(ekey, False) + if newval is not None: + val = alterdata.getVar(key, False) + if val is not None: + bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval)) + alterdata.renameVar(key, ekey) + +def inheritFromOS(d, savedenv, permitted): + """Inherit variables from the initial environment.""" + exportlist = bb.utils.preserved_envvars_exported() + for s in savedenv.keys(): + if s in permitted: + try: + d.setVar(s, savedenv.getVar(s), op = 'from env') + if s in exportlist: + d.setVarFlag(s, "export", True, op = 'auto env export') + except TypeError: + pass + +def emit_var(var, o=sys.__stdout__, d = init(), all=False): + """Emit a variable to be sourced by a shell.""" + func = d.getVarFlag(var, "func", False) + if d.getVarFlag(var, 'python', False) and func: + return False + + export = d.getVarFlag(var, "export", False) + unexport = d.getVarFlag(var, "unexport", False) + if not all and not export and not unexport and not func: + return False + + try: + if all: + oval = d.getVar(var, False) + val = d.getVar(var) + except (KeyboardInterrupt, bb.build.FuncFailed): + raise + except Exception as exc: + o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc))) + return False + + if all: + d.varhistory.emit(var, oval, val, o, d) + + if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: + return False + + varExpanded = d.expand(var) + + if unexport: + o.write('unset %s\n' % varExpanded) + return False + + if val is None: + return False + + val = str(val) + + if varExpanded.startswith("BASH_FUNC_"): + varExpanded = varExpanded[10:-2] + val = val[3:] # Strip off "() " + o.write("%s() %s\n" % (varExpanded, val)) + o.write("export -f %s\n" % (varExpanded)) + return True + + if func: + # NOTE: should probably check for unbalanced {} within the var + val = val.rstrip('\n') + o.write("%s() {\n%s\n}\n" % (varExpanded, val)) + return 1 + + if export: + o.write('export ') + + # if we're going to output this within doublequotes, + # to a shell, we need to escape the quotes in the var + alter = re.sub('"', '\\"', val) + alter = re.sub('\n', ' \\\n', alter) + alter = re.sub('\\$', '\\\\$', alter) + o.write('%s="%s"\n' % (varExpanded, alter)) + return False + +def emit_env(o=sys.__stdout__, d = init(), all=False): + """Emits all items in the data store in a format such that it can be sourced by a shell.""" + + isfunc = lambda key: bool(d.getVarFlag(key, "func", False)) + keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc) + grouped = groupby(keys, isfunc) + for isfunc, keys in grouped: + for key in sorted(keys): + emit_var(key, o, d, all and not isfunc) and o.write('\n') + +def exported_keys(d): + return (key for key in d.keys() if not key.startswith('__') and + d.getVarFlag(key, 'export', False) and + not d.getVarFlag(key, 'unexport', False)) + +def exported_vars(d): + k = list(exported_keys(d)) + for key in k: + try: + value = d.getVar(key) + except Exception as err: + bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err)) + continue + + if value is not None: + yield key, str(value) + +def emit_func(func, o=sys.__stdout__, d = init()): + """Emits all items in the data store in a format such that it can be sourced by a shell.""" + + keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False)) + for key in sorted(keys): + emit_var(key, o, d, False) + + o.write('\n') + emit_var(func, o, d, False) and o.write('\n') + newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func)) + newdeps |= set((d.getVarFlag(func, "vardeps") or "").split()) + seen = set() + while newdeps: + deps = newdeps + seen |= deps + newdeps = set() + for dep in deps: + if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False): + emit_var(dep, o, d, False) and o.write('\n') + newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep)) + newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split()) + newdeps -= seen + +_functionfmt = """ +def {function}(d): +{body}""" + +def emit_func_python(func, o=sys.__stdout__, d = init()): + """Emits all items in the data store in a format such that it can be sourced by a shell.""" + + def write_func(func, o, call = False): + body = d.getVar(func, False) + if not body.startswith("def"): + body = _functionfmt.format(function=func, body=body) + + o.write(body.strip() + "\n\n") + if call: + o.write(func + "(d)" + "\n\n") + + write_func(func, o, True) + pp = bb.codeparser.PythonParser(func, logger) + pp.parse_python(d.getVar(func, False)) + newdeps = pp.execs + newdeps |= set((d.getVarFlag(func, "vardeps") or "").split()) + seen = set() + while newdeps: + deps = newdeps + seen |= deps + newdeps = set() + for dep in deps: + if d.getVarFlag(dep, "func", False) and d.getVarFlag(dep, "python", False): + write_func(dep, o) + pp = bb.codeparser.PythonParser(dep, logger) + pp.parse_python(d.getVar(dep, False)) + newdeps |= pp.execs + newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split()) + newdeps -= seen + +def update_data(d): + """Performs final steps upon the datastore, including application of overrides""" + d.finalize(parent = True) + +def build_dependencies(key, keys, shelldeps, varflagsexcl, d): + deps = set() + try: + if key[-1] == ']': + vf = key[:-1].split('[') + value = d.getVarFlag(vf[0], vf[1], False) + parser = d.expandWithRefs(value, key) + deps |= parser.references + deps = deps | (keys & parser.execs) + return deps, value + varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {} + vardeps = varflags.get("vardeps") + value = d.getVarFlag(key, "_content", False) + + def handle_contains(value, contains, d): + newvalue = "" + for k in sorted(contains): + l = (d.getVar(k) or "").split() + for item in sorted(contains[k]): + for word in item.split(): + if not word in l: + newvalue += "\n%s{%s} = Unset" % (k, item) + break + else: + newvalue += "\n%s{%s} = Set" % (k, item) + if not newvalue: + return value + if not value: + return newvalue + return value + newvalue + + if "vardepvalue" in varflags: + value = varflags.get("vardepvalue") + elif varflags.get("func"): + if varflags.get("python"): + parser = bb.codeparser.PythonParser(key, logger) + if value and "\t" in value: + logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE"))) + parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno")) + deps = deps | parser.references + deps = deps | (keys & parser.execs) + value = handle_contains(value, parser.contains, d) + else: + parsedvar = d.expandWithRefs(value, key) + parser = bb.codeparser.ShellParser(key, logger) + parser.parse_shell(parsedvar.value) + deps = deps | shelldeps + deps = deps | parsedvar.references + deps = deps | (keys & parser.execs) | (keys & parsedvar.execs) + value = handle_contains(value, parsedvar.contains, d) + if vardeps is None: + parser.log.flush() + if "prefuncs" in varflags: + deps = deps | set(varflags["prefuncs"].split()) + if "postfuncs" in varflags: + deps = deps | set(varflags["postfuncs"].split()) + if "exports" in varflags: + deps = deps | set(varflags["exports"].split()) + else: + parser = d.expandWithRefs(value, key) + deps |= parser.references + deps = deps | (keys & parser.execs) + value = handle_contains(value, parser.contains, d) + + if "vardepvalueexclude" in varflags: + exclude = varflags.get("vardepvalueexclude") + for excl in exclude.split('|'): + if excl: + value = value.replace(excl, '') + + # Add varflags, assuming an exclusion list is set + if varflagsexcl: + varfdeps = [] + for f in varflags: + if f not in varflagsexcl: + varfdeps.append('%s[%s]' % (key, f)) + if varfdeps: + deps |= set(varfdeps) + + deps |= set((vardeps or "").split()) + deps -= set(varflags.get("vardepsexclude", "").split()) + except bb.parse.SkipRecipe: + raise + except Exception as e: + bb.warn("Exception during build_dependencies for %s" % key) + raise + return deps, value + #bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs))) + #d.setVarFlag(key, "vardeps", deps) + +def generate_dependencies(d): + + keys = set(key for key in d if not key.startswith("__")) + shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False)) + varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS') + + deps = {} + values = {} + + tasklist = d.getVar('__BBTASKS', False) or [] + for task in tasklist: + deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d) + newdeps = deps[task] + seen = set() + while newdeps: + nextdeps = newdeps + seen |= nextdeps + newdeps = set() + for dep in nextdeps: + if dep not in deps: + deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d) + newdeps |= deps[dep] + newdeps -= seen + #print "For %s: %s" % (task, str(deps[task])) + return tasklist, deps, values + +def inherits_class(klass, d): + val = d.getVar('__inherit_cache', False) or [] + needle = os.path.join('classes', '%s.bbclass' % klass) + for v in val: + if v.endswith(needle): + return True + return False diff --git a/poky/bitbake/lib/bb/data_smart.py b/poky/bitbake/lib/bb/data_smart.py new file mode 100644 index 0000000000..7b09af5cf1 --- /dev/null +++ b/poky/bitbake/lib/bb/data_smart.py @@ -0,0 +1,1037 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake Smart Dictionary Implementation + +Functions for interacting with the data structure used by the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2004, 2005 Seb Frankengul +# Copyright (C) 2005, 2006 Holger Hans Peter Freyther +# Copyright (C) 2005 Uli Luckas +# Copyright (C) 2005 ROAD GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import copy, re, sys, traceback +from collections import MutableMapping +import logging +import hashlib +import bb, bb.codeparser +from bb import utils +from bb.COW import COWDictBase + +logger = logging.getLogger("BitBake.Data") + +__setvar_keyword__ = ["_append", "_prepend", "_remove"] +__setvar_regexp__ = re.compile('(?P.*?)(?P_append|_prepend|_remove)(_(?P[^A-Z]*))?$') +__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}") +__expand_python_regexp__ = re.compile(r"\${@.+?}") + +def infer_caller_details(loginfo, parent = False, varval = True): + """Save the caller the trouble of specifying everything.""" + # Save effort. + if 'ignore' in loginfo and loginfo['ignore']: + return + # If nothing was provided, mark this as possibly unneeded. + if not loginfo: + loginfo['ignore'] = True + return + # Infer caller's likely values for variable (var) and value (value), + # to reduce clutter in the rest of the code. + above = None + def set_above(): + try: + raise Exception + except Exception: + tb = sys.exc_info()[2] + if parent: + return tb.tb_frame.f_back.f_back.f_back + else: + return tb.tb_frame.f_back.f_back + + if varval and ('variable' not in loginfo or 'detail' not in loginfo): + if not above: + above = set_above() + lcls = above.f_locals.items() + for k, v in lcls: + if k == 'value' and 'detail' not in loginfo: + loginfo['detail'] = v + if k == 'var' and 'variable' not in loginfo: + loginfo['variable'] = v + # Infer file/line/function from traceback + # Don't use traceback.extract_stack() since it fills the line contents which + # we don't need and that hits stat syscalls + if 'file' not in loginfo: + if not above: + above = set_above() + f = above.f_back + line = f.f_lineno + file = f.f_code.co_filename + func = f.f_code.co_name + loginfo['file'] = file + loginfo['line'] = line + if func not in loginfo: + loginfo['func'] = func + +class VariableParse: + def __init__(self, varname, d, val = None): + self.varname = varname + self.d = d + self.value = val + + self.references = set() + self.execs = set() + self.contains = {} + + def var_sub(self, match): + key = match.group()[2:-1] + if self.varname and key: + if self.varname == key: + raise Exception("variable %s references itself!" % self.varname) + if key in self.d.expand_cache: + varparse = self.d.expand_cache[key] + var = varparse.value + else: + var = self.d.getVarFlag(key, "_content") + self.references.add(key) + if var is not None: + return var + else: + return match.group() + + def python_sub(self, match): + if isinstance(match, str): + code = match + else: + code = match.group()[3:-1] + + if "_remote_data" in self.d: + connector = self.d["_remote_data"] + return connector.expandPythonRef(self.varname, code, self.d) + + codeobj = compile(code.strip(), self.varname or "", "eval") + + parser = bb.codeparser.PythonParser(self.varname, logger) + parser.parse_python(code) + if self.varname: + vardeps = self.d.getVarFlag(self.varname, "vardeps") + if vardeps is None: + parser.log.flush() + else: + parser.log.flush() + self.references |= parser.references + self.execs |= parser.execs + + for k in parser.contains: + if k not in self.contains: + self.contains[k] = parser.contains[k].copy() + else: + self.contains[k].update(parser.contains[k]) + value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d}) + return str(value) + + +class DataContext(dict): + def __init__(self, metadata, **kwargs): + self.metadata = metadata + dict.__init__(self, **kwargs) + self['d'] = metadata + + def __missing__(self, key): + value = self.metadata.getVar(key) + if value is None or self.metadata.getVarFlag(key, 'func', False): + raise KeyError(key) + else: + return value + +class ExpansionError(Exception): + def __init__(self, varname, expression, exception): + self.expression = expression + self.variablename = varname + self.exception = exception + if varname: + if expression: + self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception) + else: + self.msg = "Failure expanding variable %s: %s: %s" % (varname, type(exception).__name__, exception) + else: + self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception) + Exception.__init__(self, self.msg) + self.args = (varname, expression, exception) + def __str__(self): + return self.msg + +class IncludeHistory(object): + def __init__(self, parent = None, filename = '[TOP LEVEL]'): + self.parent = parent + self.filename = filename + self.children = [] + self.current = self + + def copy(self): + new = IncludeHistory(self.parent, self.filename) + for c in self.children: + new.children.append(c) + return new + + def include(self, filename): + newfile = IncludeHistory(self.current, filename) + self.current.children.append(newfile) + self.current = newfile + return self + + def __enter__(self): + pass + + def __exit__(self, a, b, c): + if self.current.parent: + self.current = self.current.parent + else: + bb.warn("Include log: Tried to finish '%s' at top level." % filename) + return False + + def emit(self, o, level = 0): + """Emit an include history file, and its children.""" + if level: + spaces = " " * (level - 1) + o.write("# %s%s" % (spaces, self.filename)) + if len(self.children) > 0: + o.write(" includes:") + else: + o.write("#\n# INCLUDE HISTORY:\n#") + level = level + 1 + for child in self.children: + o.write("\n") + child.emit(o, level) + +class VariableHistory(object): + def __init__(self, dataroot): + self.dataroot = dataroot + self.variables = COWDictBase.copy() + + def copy(self): + new = VariableHistory(self.dataroot) + new.variables = self.variables.copy() + return new + + def __getstate__(self): + vardict = {} + for k, v in self.variables.iteritems(): + vardict[k] = v + return {'dataroot': self.dataroot, + 'variables': vardict} + + def __setstate__(self, state): + self.dataroot = state['dataroot'] + self.variables = COWDictBase.copy() + for k, v in state['variables'].items(): + self.variables[k] = v + + def record(self, *kwonly, **loginfo): + if not self.dataroot._tracking: + return + if len(kwonly) > 0: + raise TypeError + infer_caller_details(loginfo, parent = True) + if 'ignore' in loginfo and loginfo['ignore']: + return + if 'op' not in loginfo or not loginfo['op']: + loginfo['op'] = 'set' + if 'detail' in loginfo: + loginfo['detail'] = str(loginfo['detail']) + if 'variable' not in loginfo or 'file' not in loginfo: + raise ValueError("record() missing variable or file.") + var = loginfo['variable'] + + if var not in self.variables: + self.variables[var] = [] + if not isinstance(self.variables[var], list): + return + if 'nodups' in loginfo and loginfo in self.variables[var]: + return + self.variables[var].append(loginfo.copy()) + + def variable(self, var): + remote_connector = self.dataroot.getVar('_remote_data', False) + if remote_connector: + varhistory = remote_connector.getVarHistory(var) + else: + varhistory = [] + + if var in self.variables: + varhistory.extend(self.variables[var]) + return varhistory + + def emit(self, var, oval, val, o, d): + history = self.variable(var) + + # Append override history + if var in d.overridedata: + for (r, override) in d.overridedata[var]: + for event in self.variable(r): + loginfo = event.copy() + if 'flag' in loginfo and not loginfo['flag'].startswith("_"): + continue + loginfo['variable'] = var + loginfo['op'] = 'override[%s]:%s' % (override, loginfo['op']) + history.append(loginfo) + + commentVal = re.sub('\n', '\n#', str(oval)) + if history: + if len(history) == 1: + o.write("#\n# $%s\n" % var) + else: + o.write("#\n# $%s [%d operations]\n" % (var, len(history))) + for event in history: + # o.write("# %s\n" % str(event)) + if 'func' in event: + # If we have a function listed, this is internal + # code, not an operation in a config file, and the + # full path is distracting. + event['file'] = re.sub('.*/', '', event['file']) + display_func = ' [%s]' % event['func'] + else: + display_func = '' + if 'flag' in event: + flag = '[%s] ' % (event['flag']) + else: + flag = '' + o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail']))) + if len(history) > 1: + o.write("# pre-expansion value:\n") + o.write('# "%s"\n' % (commentVal)) + else: + o.write("#\n# $%s\n# [no history recorded]\n#\n" % var) + o.write('# "%s"\n' % (commentVal)) + + def get_variable_files(self, var): + """Get the files where operations are made on a variable""" + var_history = self.variable(var) + files = [] + for event in var_history: + files.append(event['file']) + return files + + def get_variable_lines(self, var, f): + """Get the line where a operation is made on a variable in file f""" + var_history = self.variable(var) + lines = [] + for event in var_history: + if f== event['file']: + line = event['line'] + lines.append(line) + return lines + + def get_variable_items_files(self, var, d): + """ + Use variable history to map items added to a list variable and + the files in which they were added. + """ + history = self.variable(var) + finalitems = (d.getVar(var) or '').split() + filemap = {} + isset = False + for event in history: + if 'flag' in event: + continue + if event['op'] == '_remove': + continue + if isset and event['op'] == 'set?': + continue + isset = True + items = d.expand(event['detail']).split() + for item in items: + # This is a little crude but is belt-and-braces to avoid us + # having to handle every possible operation type specifically + if item in finalitems and not item in filemap: + filemap[item] = event['file'] + return filemap + + def del_var_history(self, var, f=None, line=None): + """If file f and line are not given, the entire history of var is deleted""" + if var in self.variables: + if f and line: + self.variables[var] = [ x for x in self.variables[var] if x['file']!=f and x['line']!=line] + else: + self.variables[var] = [] + +class DataSmart(MutableMapping): + def __init__(self): + self.dict = {} + + self.inchistory = IncludeHistory() + self.varhistory = VariableHistory(self) + self._tracking = False + + self.expand_cache = {} + + # cookie monster tribute + # Need to be careful about writes to overridedata as + # its only a shallow copy, could influence other data store + # copies! + self.overridedata = {} + self.overrides = None + self.overridevars = set(["OVERRIDES", "FILE"]) + self.inoverride = False + + def enableTracking(self): + self._tracking = True + + def disableTracking(self): + self._tracking = False + + def expandWithRefs(self, s, varname): + + if not isinstance(s, str): # sanity check + return VariableParse(varname, self, s) + + if varname and varname in self.expand_cache: + return self.expand_cache[varname] + + varparse = VariableParse(varname, self) + + while s.find('${') != -1: + olds = s + try: + s = __expand_var_regexp__.sub(varparse.var_sub, s) + try: + s = __expand_python_regexp__.sub(varparse.python_sub, s) + except SyntaxError as e: + # Likely unmatched brackets, just don't expand the expression + if e.msg != "EOL while scanning string literal": + raise + if s == olds: + break + except ExpansionError: + raise + except bb.parse.SkipRecipe: + raise + except Exception as exc: + raise ExpansionError(varname, s, exc) from exc + + varparse.value = s + + if varname: + self.expand_cache[varname] = varparse + + return varparse + + def expand(self, s, varname = None): + return self.expandWithRefs(s, varname).value + + def finalize(self, parent = False): + return + + def internal_finalize(self, parent = False): + """Performs final steps upon the datastore, including application of overrides""" + self.overrides = None + + def need_overrides(self): + if self.overrides is not None: + return + if self.inoverride: + return + for count in range(5): + self.inoverride = True + # Can end up here recursively so setup dummy values + self.overrides = [] + self.overridesset = set() + self.overrides = (self.getVar("OVERRIDES") or "").split(":") or [] + self.overridesset = set(self.overrides) + self.inoverride = False + self.expand_cache = {} + newoverrides = (self.getVar("OVERRIDES") or "").split(":") or [] + if newoverrides == self.overrides: + break + self.overrides = newoverrides + self.overridesset = set(self.overrides) + else: + bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work.") + + def initVar(self, var): + self.expand_cache = {} + if not var in self.dict: + self.dict[var] = {} + + def _findVar(self, var): + dest = self.dict + while dest: + if var in dest: + return dest[var], self.overridedata.get(var, None) + + if "_remote_data" in dest: + connector = dest["_remote_data"]["_content"] + return connector.getVar(var) + + if "_data" not in dest: + break + dest = dest["_data"] + return None, self.overridedata.get(var, None) + + def _makeShadowCopy(self, var): + if var in self.dict: + return + + local_var, _ = self._findVar(var) + + if local_var: + self.dict[var] = copy.copy(local_var) + else: + self.initVar(var) + + + def setVar(self, var, value, **loginfo): + #print("var=" + str(var) + " val=" + str(value)) + parsing=False + if 'parsing' in loginfo: + parsing=True + + if '_remote_data' in self.dict: + connector = self.dict["_remote_data"]["_content"] + res = connector.setVar(var, value) + if not res: + return + + if 'op' not in loginfo: + loginfo['op'] = "set" + self.expand_cache = {} + match = __setvar_regexp__.match(var) + if match and match.group("keyword") in __setvar_keyword__: + base = match.group('base') + keyword = match.group("keyword") + override = match.group('add') + l = self.getVarFlag(base, keyword, False) or [] + l.append([value, override]) + self.setVarFlag(base, keyword, l, ignore=True) + # And cause that to be recorded: + loginfo['detail'] = value + loginfo['variable'] = base + if override: + loginfo['op'] = '%s[%s]' % (keyword, override) + else: + loginfo['op'] = keyword + self.varhistory.record(**loginfo) + # todo make sure keyword is not __doc__ or __module__ + # pay the cookie monster + + # more cookies for the cookie monster + if '_' in var: + self._setvar_update_overrides(base, **loginfo) + + if base in self.overridevars: + self._setvar_update_overridevars(var, value) + return + + if not var in self.dict: + self._makeShadowCopy(var) + + if not parsing: + if "_append" in self.dict[var]: + del self.dict[var]["_append"] + if "_prepend" in self.dict[var]: + del self.dict[var]["_prepend"] + if "_remove" in self.dict[var]: + del self.dict[var]["_remove"] + if var in self.overridedata: + active = [] + self.need_overrides() + for (r, o) in self.overridedata[var]: + if o in self.overridesset: + active.append(r) + elif "_" in o: + if set(o.split("_")).issubset(self.overridesset): + active.append(r) + for a in active: + self.delVar(a) + del self.overridedata[var] + + # more cookies for the cookie monster + if '_' in var: + self._setvar_update_overrides(var, **loginfo) + + # setting var + self.dict[var]["_content"] = value + self.varhistory.record(**loginfo) + + if var in self.overridevars: + self._setvar_update_overridevars(var, value) + + def _setvar_update_overridevars(self, var, value): + vardata = self.expandWithRefs(value, var) + new = vardata.references + new.update(vardata.contains.keys()) + while not new.issubset(self.overridevars): + nextnew = set() + self.overridevars.update(new) + for i in new: + vardata = self.expandWithRefs(self.getVar(i), i) + nextnew.update(vardata.references) + nextnew.update(vardata.contains.keys()) + new = nextnew + self.internal_finalize(True) + + def _setvar_update_overrides(self, var, **loginfo): + # aka pay the cookie monster + override = var[var.rfind('_')+1:] + shortvar = var[:var.rfind('_')] + while override and override.islower(): + if shortvar not in self.overridedata: + self.overridedata[shortvar] = [] + if [var, override] not in self.overridedata[shortvar]: + # Force CoW by recreating the list first + self.overridedata[shortvar] = list(self.overridedata[shortvar]) + self.overridedata[shortvar].append([var, override]) + override = None + if "_" in shortvar: + override = var[shortvar.rfind('_')+1:] + shortvar = var[:shortvar.rfind('_')] + if len(shortvar) == 0: + override = None + + def getVar(self, var, expand=True, noweakdefault=False, parsing=False): + return self.getVarFlag(var, "_content", expand, noweakdefault, parsing) + + def renameVar(self, key, newkey, **loginfo): + """ + Rename the variable key to newkey + """ + if '_remote_data' in self.dict: + connector = self.dict["_remote_data"]["_content"] + res = connector.renameVar(key, newkey) + if not res: + return + + val = self.getVar(key, 0, parsing=True) + if val is not None: + loginfo['variable'] = newkey + loginfo['op'] = 'rename from %s' % key + loginfo['detail'] = val + self.varhistory.record(**loginfo) + self.setVar(newkey, val, ignore=True, parsing=True) + + for i in (__setvar_keyword__): + src = self.getVarFlag(key, i, False) + if src is None: + continue + + dest = self.getVarFlag(newkey, i, False) or [] + dest.extend(src) + self.setVarFlag(newkey, i, dest, ignore=True) + + if key in self.overridedata: + self.overridedata[newkey] = [] + for (v, o) in self.overridedata[key]: + self.overridedata[newkey].append([v.replace(key, newkey), o]) + self.renameVar(v, v.replace(key, newkey)) + + if '_' in newkey and val is None: + self._setvar_update_overrides(newkey, **loginfo) + + loginfo['variable'] = key + loginfo['op'] = 'rename (to)' + loginfo['detail'] = newkey + self.varhistory.record(**loginfo) + self.delVar(key, ignore=True) + + def appendVar(self, var, value, **loginfo): + loginfo['op'] = 'append' + self.varhistory.record(**loginfo) + self.setVar(var + "_append", value, ignore=True, parsing=True) + + def prependVar(self, var, value, **loginfo): + loginfo['op'] = 'prepend' + self.varhistory.record(**loginfo) + self.setVar(var + "_prepend", value, ignore=True, parsing=True) + + def delVar(self, var, **loginfo): + if '_remote_data' in self.dict: + connector = self.dict["_remote_data"]["_content"] + res = connector.delVar(var) + if not res: + return + + loginfo['detail'] = "" + loginfo['op'] = 'del' + self.varhistory.record(**loginfo) + self.expand_cache = {} + self.dict[var] = {} + if var in self.overridedata: + del self.overridedata[var] + if '_' in var: + override = var[var.rfind('_')+1:] + shortvar = var[:var.rfind('_')] + while override and override.islower(): + try: + if shortvar in self.overridedata: + # Force CoW by recreating the list first + self.overridedata[shortvar] = list(self.overridedata[shortvar]) + self.overridedata[shortvar].remove([var, override]) + except ValueError as e: + pass + override = None + if "_" in shortvar: + override = var[shortvar.rfind('_')+1:] + shortvar = var[:shortvar.rfind('_')] + if len(shortvar) == 0: + override = None + + def setVarFlag(self, var, flag, value, **loginfo): + if '_remote_data' in self.dict: + connector = self.dict["_remote_data"]["_content"] + res = connector.setVarFlag(var, flag, value) + if not res: + return + + self.expand_cache = {} + if 'op' not in loginfo: + loginfo['op'] = "set" + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + if not var in self.dict: + self._makeShadowCopy(var) + self.dict[var][flag] = value + + if flag == "_defaultval" and '_' in var: + self._setvar_update_overrides(var, **loginfo) + if flag == "_defaultval" and var in self.overridevars: + self._setvar_update_overridevars(var, value) + + if flag == "unexport" or flag == "export": + if not "__exportlist" in self.dict: + self._makeShadowCopy("__exportlist") + if not "_content" in self.dict["__exportlist"]: + self.dict["__exportlist"]["_content"] = set() + self.dict["__exportlist"]["_content"].add(var) + + def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False): + local_var, overridedata = self._findVar(var) + value = None + if flag == "_content" and overridedata is not None and not parsing: + match = False + active = {} + self.need_overrides() + for (r, o) in overridedata: + # What about double overrides both with "_" in the name? + if o in self.overridesset: + active[o] = r + elif "_" in o: + if set(o.split("_")).issubset(self.overridesset): + active[o] = r + + mod = True + while mod: + mod = False + for o in self.overrides: + for a in active.copy(): + if a.endswith("_" + o): + t = active[a] + del active[a] + active[a.replace("_" + o, "")] = t + mod = True + elif a == o: + match = active[a] + del active[a] + if match: + value = self.getVar(match, False) + + if local_var is not None and value is None: + if flag in local_var: + value = copy.copy(local_var[flag]) + elif flag == "_content" and "_defaultval" in local_var and not noweakdefault: + value = copy.copy(local_var["_defaultval"]) + + + if flag == "_content" and local_var is not None and "_append" in local_var and not parsing: + if not value: + value = "" + self.need_overrides() + for (r, o) in local_var["_append"]: + match = True + if o: + for o2 in o.split("_"): + if not o2 in self.overrides: + match = False + if match: + value = value + r + + if flag == "_content" and local_var is not None and "_prepend" in local_var and not parsing: + if not value: + value = "" + self.need_overrides() + for (r, o) in local_var["_prepend"]: + + match = True + if o: + for o2 in o.split("_"): + if not o2 in self.overrides: + match = False + if match: + value = r + value + + if expand and value: + # Only getvar (flag == _content) hits the expand cache + cachename = None + if flag == "_content": + cachename = var + else: + cachename = var + "[" + flag + "]" + value = self.expand(value, cachename) + + if value and flag == "_content" and local_var is not None and "_remove" in local_var: + removes = [] + self.need_overrides() + for (r, o) in local_var["_remove"]: + match = True + if o: + for o2 in o.split("_"): + if not o2 in self.overrides: + match = False + if match: + removes.extend(self.expand(r).split()) + + if removes: + filtered = filter(lambda v: v not in removes, + value.split()) + value = " ".join(filtered) + if expand and var in self.expand_cache: + # We need to ensure the expand cache has the correct value + # flag == "_content" here + self.expand_cache[var].value = value + return value + + def delVarFlag(self, var, flag, **loginfo): + if '_remote_data' in self.dict: + connector = self.dict["_remote_data"]["_content"] + res = connector.delVarFlag(var, flag) + if not res: + return + + self.expand_cache = {} + local_var, _ = self._findVar(var) + if not local_var: + return + if not var in self.dict: + self._makeShadowCopy(var) + + if var in self.dict and flag in self.dict[var]: + loginfo['detail'] = "" + loginfo['op'] = 'delFlag' + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + + del self.dict[var][flag] + + def appendVarFlag(self, var, flag, value, **loginfo): + loginfo['op'] = 'append' + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + newvalue = (self.getVarFlag(var, flag, False) or "") + value + self.setVarFlag(var, flag, newvalue, ignore=True) + + def prependVarFlag(self, var, flag, value, **loginfo): + loginfo['op'] = 'prepend' + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + newvalue = value + (self.getVarFlag(var, flag, False) or "") + self.setVarFlag(var, flag, newvalue, ignore=True) + + def setVarFlags(self, var, flags, **loginfo): + self.expand_cache = {} + infer_caller_details(loginfo) + if not var in self.dict: + self._makeShadowCopy(var) + + for i in flags: + if i == "_content": + continue + loginfo['flag'] = i + loginfo['detail'] = flags[i] + self.varhistory.record(**loginfo) + self.dict[var][i] = flags[i] + + def getVarFlags(self, var, expand = False, internalflags=False): + local_var, _ = self._findVar(var) + flags = {} + + if local_var: + for i in local_var: + if i.startswith("_") and not internalflags: + continue + flags[i] = local_var[i] + if expand and i in expand: + flags[i] = self.expand(flags[i], var + "[" + i + "]") + if len(flags) == 0: + return None + return flags + + + def delVarFlags(self, var, **loginfo): + self.expand_cache = {} + if not var in self.dict: + self._makeShadowCopy(var) + + if var in self.dict: + content = None + + loginfo['op'] = 'delete flags' + self.varhistory.record(**loginfo) + + # try to save the content + if "_content" in self.dict[var]: + content = self.dict[var]["_content"] + self.dict[var] = {} + self.dict[var]["_content"] = content + else: + del self.dict[var] + + def createCopy(self): + """ + Create a copy of self by setting _data to self + """ + # we really want this to be a DataSmart... + data = DataSmart() + data.dict["_data"] = self.dict + data.varhistory = self.varhistory.copy() + data.varhistory.dataroot = data + data.inchistory = self.inchistory.copy() + + data._tracking = self._tracking + + data.overrides = None + data.overridevars = copy.copy(self.overridevars) + # Should really be a deepcopy but has heavy overhead. + # Instead, we're careful with writes. + data.overridedata = copy.copy(self.overridedata) + + return data + + def expandVarref(self, variable, parents=False): + """Find all references to variable in the data and expand it + in place, optionally descending to parent datastores.""" + + if parents: + keys = iter(self) + else: + keys = self.localkeys() + + ref = '${%s}' % variable + value = self.getVar(variable, False) + for key in keys: + referrervalue = self.getVar(key, False) + if referrervalue and ref in referrervalue: + self.setVar(key, referrervalue.replace(ref, value)) + + def localkeys(self): + for key in self.dict: + if key not in ['_data', '_remote_data']: + yield key + + def __iter__(self): + deleted = set() + overrides = set() + def keylist(d): + klist = set() + for key in d: + if key in ["_data", "_remote_data"]: + continue + if key in deleted: + continue + if key in overrides: + continue + if not d[key]: + deleted.add(key) + continue + klist.add(key) + + if "_data" in d: + klist |= keylist(d["_data"]) + + if "_remote_data" in d: + connector = d["_remote_data"]["_content"] + for key in connector.getKeys(): + if key in deleted: + continue + klist.add(key) + + return klist + + self.need_overrides() + for var in self.overridedata: + for (r, o) in self.overridedata[var]: + if o in self.overridesset: + overrides.add(var) + elif "_" in o: + if set(o.split("_")).issubset(self.overridesset): + overrides.add(var) + + for k in keylist(self.dict): + yield k + + for k in overrides: + yield k + + def __len__(self): + return len(frozenset(iter(self))) + + def __getitem__(self, item): + value = self.getVar(item, False) + if value is None: + raise KeyError(item) + else: + return value + + def __setitem__(self, var, value): + self.setVar(var, value) + + def __delitem__(self, var): + self.delVar(var) + + def get_hash(self): + data = {} + d = self.createCopy() + bb.data.expandKeys(d) + + config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split()) + keys = set(key for key in iter(d) if not key.startswith("__")) + for key in keys: + if key in config_whitelist: + continue + + value = d.getVar(key, False) or "" + data.update({key:value}) + + varflags = d.getVarFlags(key, internalflags = True) + if not varflags: + continue + for f in varflags: + if f == "_content": + continue + data.update({'%s[%s]' % (key, f):varflags[f]}) + + for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]: + bb_list = d.getVar(key, False) or [] + data.update({key:str(bb_list)}) + + if key == "__BBANONFUNCS": + for i in bb_list: + value = d.getVar(i, False) or "" + data.update({i:value}) + + data_str = str([(k, data[k]) for k in sorted(data.keys())]) + return hashlib.md5(data_str.encode("utf-8")).hexdigest() diff --git a/poky/bitbake/lib/bb/event.py b/poky/bitbake/lib/bb/event.py new file mode 100644 index 0000000000..5d0049626d --- /dev/null +++ b/poky/bitbake/lib/bb/event.py @@ -0,0 +1,831 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Event' implementation + +Classes and functions for manipulating 'events' in the +BitBake build tools. +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os, sys +import warnings +import pickle +import logging +import atexit +import traceback +import ast +import threading + +import bb.utils +import bb.compat +import bb.exceptions + +# This is the pid for which we should generate the event. This is set when +# the runqueue forks off. +worker_pid = 0 +worker_fire = None + +logger = logging.getLogger('BitBake.Event') + +class Event(object): + """Base class for events""" + + def __init__(self): + self.pid = worker_pid + + +class HeartbeatEvent(Event): + """Triggered at regular time intervals of 10 seconds. Other events can fire much more often + (runQueueTaskStarted when there are many short tasks) or not at all for long periods + of time (again runQueueTaskStarted, when there is just one long-running task), so this + event is more suitable for doing some task-independent work occassionally.""" + def __init__(self, time): + Event.__init__(self) + self.time = time + +Registered = 10 +AlreadyRegistered = 14 + +def get_class_handlers(): + return _handlers + +def set_class_handlers(h): + global _handlers + _handlers = h + +def clean_class_handlers(): + return bb.compat.OrderedDict() + +# Internal +_handlers = clean_class_handlers() +_ui_handlers = {} +_ui_logfilters = {} +_ui_handler_seq = 0 +_event_handler_map = {} +_catchall_handlers = {} +_eventfilter = None +_uiready = False +_thread_lock = threading.Lock() +_thread_lock_enabled = False + +if hasattr(__builtins__, '__setitem__'): + builtins = __builtins__ +else: + builtins = __builtins__.__dict__ + +def enable_threadlock(): + global _thread_lock_enabled + _thread_lock_enabled = True + +def disable_threadlock(): + global _thread_lock_enabled + _thread_lock_enabled = False + +def execute_handler(name, handler, event, d): + event.data = d + addedd = False + if 'd' not in builtins: + builtins['d'] = d + addedd = True + try: + ret = handler(event) + except (bb.parse.SkipRecipe, bb.BBHandledException): + raise + except Exception: + etype, value, tb = sys.exc_info() + logger.error("Execution of event handler '%s' failed" % name, + exc_info=(etype, value, tb.tb_next)) + raise + except SystemExit as exc: + if exc.code != 0: + logger.error("Execution of event handler '%s' failed" % name) + raise + finally: + del event.data + if addedd: + del builtins['d'] + +def fire_class_handlers(event, d): + if isinstance(event, logging.LogRecord): + return + + eid = str(event.__class__)[8:-2] + evt_hmap = _event_handler_map.get(eid, {}) + for name, handler in list(_handlers.items()): + if name in _catchall_handlers or name in evt_hmap: + if _eventfilter: + if not _eventfilter(name, handler, event, d): + continue + execute_handler(name, handler, event, d) + +ui_queue = [] +@atexit.register +def print_ui_queue(): + """If we're exiting before a UI has been spawned, display any queued + LogRecords to the console.""" + logger = logging.getLogger("BitBake") + if not _uiready: + from bb.msg import BBLogFormatter + stdout = logging.StreamHandler(sys.stdout) + stderr = logging.StreamHandler(sys.stderr) + formatter = BBLogFormatter("%(levelname)s: %(message)s") + stdout.setFormatter(formatter) + stderr.setFormatter(formatter) + + # First check to see if we have any proper messages + msgprint = False + msgerrs = False + + # Should we print to stderr? + for event in ui_queue[:]: + if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING: + msgerrs = True + break + + if msgerrs: + logger.addHandler(stderr) + else: + logger.addHandler(stdout) + + for event in ui_queue[:]: + if isinstance(event, logging.LogRecord): + if event.levelno > logging.DEBUG: + logger.handle(event) + msgprint = True + + # Nope, so just print all of the messages we have (including debug messages) + if not msgprint: + for event in ui_queue[:]: + if isinstance(event, logging.LogRecord): + logger.handle(event) + if msgerrs: + logger.removeHandler(stderr) + else: + logger.removeHandler(stdout) + +def fire_ui_handlers(event, d): + global _thread_lock + global _thread_lock_enabled + + if not _uiready: + # No UI handlers registered yet, queue up the messages + ui_queue.append(event) + return + + if _thread_lock_enabled: + _thread_lock.acquire() + + errors = [] + for h in _ui_handlers: + #print "Sending event %s" % event + try: + if not _ui_logfilters[h].filter(event): + continue + # We use pickle here since it better handles object instances + # which xmlrpc's marshaller does not. Events *must* be serializable + # by pickle. + if hasattr(_ui_handlers[h].event, "sendpickle"): + _ui_handlers[h].event.sendpickle((pickle.dumps(event))) + else: + _ui_handlers[h].event.send(event) + except: + errors.append(h) + for h in errors: + del _ui_handlers[h] + + if _thread_lock_enabled: + _thread_lock.release() + +def fire(event, d): + """Fire off an Event""" + + # We can fire class handlers in the worker process context and this is + # desired so they get the task based datastore. + # UI handlers need to be fired in the server context so we defer this. They + # don't have a datastore so the datastore context isn't a problem. + + fire_class_handlers(event, d) + if worker_fire: + worker_fire(event, d) + else: + # If messages have been queued up, clear the queue + global _uiready, ui_queue + if _uiready and ui_queue: + for queue_event in ui_queue: + fire_ui_handlers(queue_event, d) + ui_queue = [] + fire_ui_handlers(event, d) + +def fire_from_worker(event, d): + fire_ui_handlers(event, d) + +noop = lambda _: None +def register(name, handler, mask=None, filename=None, lineno=None): + """Register an Event handler""" + + # already registered + if name in _handlers: + return AlreadyRegistered + + if handler is not None: + # handle string containing python code + if isinstance(handler, str): + tmp = "def %s(e):\n%s" % (name, handler) + try: + code = bb.methodpool.compile_cache(tmp) + if not code: + if filename is None: + filename = "%s(e)" % name + code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST) + if lineno is not None: + ast.increment_lineno(code, lineno-1) + code = compile(code, filename, "exec") + bb.methodpool.compile_cache_add(tmp, code) + except SyntaxError: + logger.error("Unable to register event handler '%s':\n%s", name, + ''.join(traceback.format_exc(limit=0))) + _handlers[name] = noop + return + env = {} + bb.utils.better_exec(code, env) + func = bb.utils.better_eval(name, env) + _handlers[name] = func + else: + _handlers[name] = handler + + if not mask or '*' in mask: + _catchall_handlers[name] = True + else: + for m in mask: + if _event_handler_map.get(m, None) is None: + _event_handler_map[m] = {} + _event_handler_map[m][name] = True + + return Registered + +def remove(name, handler): + """Remove an Event handler""" + _handlers.pop(name) + if name in _catchall_handlers: + _catchall_handlers.pop(name) + for event in _event_handler_map.keys(): + if name in _event_handler_map[event]: + _event_handler_map[event].pop(name) + +def get_handlers(): + return _handlers + +def set_handlers(handlers): + global _handlers + _handlers = handlers + +def set_eventfilter(func): + global _eventfilter + _eventfilter = func + +def register_UIHhandler(handler, mainui=False): + bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1 + _ui_handlers[_ui_handler_seq] = handler + level, debug_domains = bb.msg.constructLogOptions() + _ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains) + if mainui: + global _uiready + _uiready = _ui_handler_seq + return _ui_handler_seq + +def unregister_UIHhandler(handlerNum, mainui=False): + if mainui: + global _uiready + _uiready = False + if handlerNum in _ui_handlers: + del _ui_handlers[handlerNum] + return + +def get_uihandler(): + if _uiready is False: + return None + return _uiready + +# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC +class UIEventFilter(object): + def __init__(self, level, debug_domains): + self.update(None, level, debug_domains) + + def update(self, eventmask, level, debug_domains): + self.eventmask = eventmask + self.stdlevel = level + self.debug_domains = debug_domains + + def filter(self, event): + if isinstance(event, logging.LogRecord): + if event.levelno >= self.stdlevel: + return True + if event.name in self.debug_domains and event.levelno >= self.debug_domains[event.name]: + return True + return False + eid = str(event.__class__)[8:-2] + if self.eventmask and eid not in self.eventmask: + return False + return True + +def set_UIHmask(handlerNum, level, debug_domains, mask): + if not handlerNum in _ui_handlers: + return False + if '*' in mask: + _ui_logfilters[handlerNum].update(None, level, debug_domains) + else: + _ui_logfilters[handlerNum].update(mask, level, debug_domains) + return True + +def getName(e): + """Returns the name of a class or class instance""" + if getattr(e, "__name__", None) == None: + return e.__class__.__name__ + else: + return e.__name__ + +class OperationStarted(Event): + """An operation has begun""" + def __init__(self, msg = "Operation Started"): + Event.__init__(self) + self.msg = msg + +class OperationCompleted(Event): + """An operation has completed""" + def __init__(self, total, msg = "Operation Completed"): + Event.__init__(self) + self.total = total + self.msg = msg + +class OperationProgress(Event): + """An operation is in progress""" + def __init__(self, current, total, msg = "Operation in Progress"): + Event.__init__(self) + self.current = current + self.total = total + self.msg = msg + ": %s/%s" % (current, total); + +class ConfigParsed(Event): + """Configuration Parsing Complete""" + +class MultiConfigParsed(Event): + """Multi-Config Parsing Complete""" + def __init__(self, mcdata): + self.mcdata = mcdata + Event.__init__(self) + +class RecipeEvent(Event): + def __init__(self, fn): + self.fn = fn + Event.__init__(self) + +class RecipePreFinalise(RecipeEvent): + """ Recipe Parsing Complete but not yet finialised""" + +class RecipeTaskPreProcess(RecipeEvent): + """ + Recipe Tasks about to be finalised + The list of tasks should be final at this point and handlers + are only able to change interdependencies + """ + def __init__(self, fn, tasklist): + self.fn = fn + self.tasklist = tasklist + Event.__init__(self) + +class RecipeParsed(RecipeEvent): + """ Recipe Parsing Complete """ + +class StampUpdate(Event): + """Trigger for any adjustment of the stamp files to happen""" + + def __init__(self, targets, stampfns): + self._targets = targets + self._stampfns = stampfns + Event.__init__(self) + + def getStampPrefix(self): + return self._stampfns + + def getTargets(self): + return self._targets + + stampPrefix = property(getStampPrefix) + targets = property(getTargets) + +class BuildBase(Event): + """Base class for bitbake build events""" + + def __init__(self, n, p, failures = 0): + self._name = n + self._pkgs = p + Event.__init__(self) + self._failures = failures + + def getPkgs(self): + return self._pkgs + + def setPkgs(self, pkgs): + self._pkgs = pkgs + + def getName(self): + return self._name + + def setName(self, name): + self._name = name + + def getFailures(self): + """ + Return the number of failed packages + """ + return self._failures + + pkgs = property(getPkgs, setPkgs, None, "pkgs property") + name = property(getName, setName, None, "name property") + +class BuildInit(BuildBase): + """buildFile or buildTargets was invoked""" + def __init__(self, p=[]): + name = None + BuildBase.__init__(self, name, p) + +class BuildStarted(BuildBase, OperationStarted): + """Event when builds start""" + def __init__(self, n, p, failures = 0): + OperationStarted.__init__(self, "Building Started") + BuildBase.__init__(self, n, p, failures) + +class BuildCompleted(BuildBase, OperationCompleted): + """Event when builds have completed""" + def __init__(self, total, n, p, failures=0, interrupted=0): + if not failures: + OperationCompleted.__init__(self, total, "Building Succeeded") + else: + OperationCompleted.__init__(self, total, "Building Failed") + self._interrupted = interrupted + BuildBase.__init__(self, n, p, failures) + +class DiskFull(Event): + """Disk full case build aborted""" + def __init__(self, dev, type, freespace, mountpoint): + Event.__init__(self) + self._dev = dev + self._type = type + self._free = freespace + self._mountpoint = mountpoint + +class DiskUsageSample: + def __init__(self, available_bytes, free_bytes, total_bytes): + # Number of bytes available to non-root processes. + self.available_bytes = available_bytes + # Number of bytes available to root processes. + self.free_bytes = free_bytes + # Total capacity of the volume. + self.total_bytes = total_bytes + +class MonitorDiskEvent(Event): + """If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked. + Provides information about devices that are getting monitored.""" + def __init__(self, disk_usage): + Event.__init__(self) + # hash of device root path -> DiskUsageSample + self.disk_usage = disk_usage + +class NoProvider(Event): + """No Provider for an Event""" + + def __init__(self, item, runtime=False, dependees=None, reasons=None, close_matches=None): + Event.__init__(self) + self._item = item + self._runtime = runtime + self._dependees = dependees + self._reasons = reasons + self._close_matches = close_matches + + def getItem(self): + return self._item + + def isRuntime(self): + return self._runtime + + def __str__(self): + msg = '' + if self._runtime: + r = "R" + else: + r = "" + + extra = '' + if not self._reasons: + if self._close_matches: + extra = ". Close matches:\n %s" % '\n '.join(self._close_matches) + + if self._dependees: + msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra) + else: + msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra) + if self._reasons: + for reason in self._reasons: + msg += '\n' + reason + return msg + + +class MultipleProviders(Event): + """Multiple Providers""" + + def __init__(self, item, candidates, runtime = False): + Event.__init__(self) + self._item = item + self._candidates = candidates + self._is_runtime = runtime + + def isRuntime(self): + """ + Is this a runtime issue? + """ + return self._is_runtime + + def getItem(self): + """ + The name for the to be build item + """ + return self._item + + def getCandidates(self): + """ + Get the possible Candidates for a PROVIDER. + """ + return self._candidates + + def __str__(self): + msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "", + self._item, + ", ".join(self._candidates)) + rtime = "" + if self._is_runtime: + rtime = "R" + msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item) + return msg + +class ParseStarted(OperationStarted): + """Recipe parsing for the runqueue has begun""" + def __init__(self, total): + OperationStarted.__init__(self, "Recipe parsing Started") + self.total = total + +class ParseCompleted(OperationCompleted): + """Recipe parsing for the runqueue has completed""" + def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total): + OperationCompleted.__init__(self, total, "Recipe parsing Completed") + self.cached = cached + self.parsed = parsed + self.skipped = skipped + self.virtuals = virtuals + self.masked = masked + self.errors = errors + self.sofar = cached + parsed + +class ParseProgress(OperationProgress): + """Recipe parsing progress""" + def __init__(self, current, total): + OperationProgress.__init__(self, current, total, "Recipe parsing") + + +class CacheLoadStarted(OperationStarted): + """Loading of the dependency cache has begun""" + def __init__(self, total): + OperationStarted.__init__(self, "Loading cache Started") + self.total = total + +class CacheLoadProgress(OperationProgress): + """Cache loading progress""" + def __init__(self, current, total): + OperationProgress.__init__(self, current, total, "Loading cache") + +class CacheLoadCompleted(OperationCompleted): + """Cache loading is complete""" + def __init__(self, total, num_entries): + OperationCompleted.__init__(self, total, "Loading cache Completed") + self.num_entries = num_entries + +class TreeDataPreparationStarted(OperationStarted): + """Tree data preparation started""" + def __init__(self): + OperationStarted.__init__(self, "Preparing tree data Started") + +class TreeDataPreparationProgress(OperationProgress): + """Tree data preparation is in progress""" + def __init__(self, current, total): + OperationProgress.__init__(self, current, total, "Preparing tree data") + +class TreeDataPreparationCompleted(OperationCompleted): + """Tree data preparation completed""" + def __init__(self, total): + OperationCompleted.__init__(self, total, "Preparing tree data Completed") + +class DepTreeGenerated(Event): + """ + Event when a dependency tree has been generated + """ + + def __init__(self, depgraph): + Event.__init__(self) + self._depgraph = depgraph + +class TargetsTreeGenerated(Event): + """ + Event when a set of buildable targets has been generated + """ + def __init__(self, model): + Event.__init__(self) + self._model = model + +class ReachableStamps(Event): + """ + An event listing all stamps reachable after parsing + which the metadata may use to clean up stale data + """ + + def __init__(self, stamps): + Event.__init__(self) + self.stamps = stamps + +class FilesMatchingFound(Event): + """ + Event when a list of files matching the supplied pattern has + been generated + """ + def __init__(self, pattern, matches): + Event.__init__(self) + self._pattern = pattern + self._matches = matches + +class ConfigFilesFound(Event): + """ + Event when a list of appropriate config files has been generated + """ + def __init__(self, variable, values): + Event.__init__(self) + self._variable = variable + self._values = values + +class ConfigFilePathFound(Event): + """ + Event when a path for a config file has been found + """ + def __init__(self, path): + Event.__init__(self) + self._path = path + +class MsgBase(Event): + """Base class for messages""" + + def __init__(self, msg): + self._message = msg + Event.__init__(self) + +class MsgDebug(MsgBase): + """Debug Message""" + +class MsgNote(MsgBase): + """Note Message""" + +class MsgWarn(MsgBase): + """Warning Message""" + +class MsgError(MsgBase): + """Error Message""" + +class MsgFatal(MsgBase): + """Fatal Message""" + +class MsgPlain(MsgBase): + """General output""" + +class LogExecTTY(Event): + """Send event containing program to spawn on tty of the logger""" + def __init__(self, msg, prog, sleep_delay, retries): + Event.__init__(self) + self.msg = msg + self.prog = prog + self.sleep_delay = sleep_delay + self.retries = retries + +class LogHandler(logging.Handler): + """Dispatch logging messages as bitbake events""" + + def emit(self, record): + if record.exc_info: + etype, value, tb = record.exc_info + if hasattr(tb, 'tb_next'): + tb = list(bb.exceptions.extract_traceback(tb, context=3)) + # Need to turn the value into something the logging system can pickle + record.bb_exc_info = (etype, value, tb) + record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5) + value = str(value) + record.exc_info = None + fire(record, None) + + def filter(self, record): + record.taskpid = worker_pid + return True + +class MetadataEvent(Event): + """ + Generic event that target for OE-Core classes + to report information during asynchrous execution + """ + def __init__(self, eventtype, eventdata): + Event.__init__(self) + self.type = eventtype + self._localdata = eventdata + +class ProcessStarted(Event): + """ + Generic process started event (usually part of the initial startup) + where further progress events will be delivered + """ + def __init__(self, processname, total): + Event.__init__(self) + self.processname = processname + self.total = total + +class ProcessProgress(Event): + """ + Generic process progress event (usually part of the initial startup) + """ + def __init__(self, processname, progress): + Event.__init__(self) + self.processname = processname + self.progress = progress + +class ProcessFinished(Event): + """ + Generic process finished event (usually part of the initial startup) + """ + def __init__(self, processname): + Event.__init__(self) + self.processname = processname + +class SanityCheck(Event): + """ + Event to run sanity checks, either raise errors or generate events as return status. + """ + def __init__(self, generateevents = True): + Event.__init__(self) + self.generateevents = generateevents + +class SanityCheckPassed(Event): + """ + Event to indicate sanity check has passed + """ + +class SanityCheckFailed(Event): + """ + Event to indicate sanity check has failed + """ + def __init__(self, msg, network_error=False): + Event.__init__(self) + self._msg = msg + self._network_error = network_error + +class NetworkTest(Event): + """ + Event to run network connectivity tests, either raise errors or generate events as return status. + """ + def __init__(self, generateevents = True): + Event.__init__(self) + self.generateevents = generateevents + +class NetworkTestPassed(Event): + """ + Event to indicate network test has passed + """ + +class NetworkTestFailed(Event): + """ + Event to indicate network test has failed + """ + +class FindSigInfoResult(Event): + """ + Event to return results from findSigInfo command + """ + def __init__(self, result): + Event.__init__(self) + self.result = result diff --git a/poky/bitbake/lib/bb/exceptions.py b/poky/bitbake/lib/bb/exceptions.py new file mode 100644 index 0000000000..cd713439ea --- /dev/null +++ b/poky/bitbake/lib/bb/exceptions.py @@ -0,0 +1,91 @@ + +import inspect +import traceback +import bb.namedtuple_with_abc +from collections import namedtuple + + +class TracebackEntry(namedtuple.abc): + """Pickleable representation of a traceback entry""" + _fields = 'filename lineno function args code_context index' + _header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}' + + def format(self, formatter=None): + if not self.code_context: + return self._header.format(self) + '\n' + + formatted = [self._header.format(self) + ':\n'] + + for lineindex, line in enumerate(self.code_context): + if formatter: + line = formatter(line) + + if lineindex == self.index: + formatted.append(' >%s' % line) + else: + formatted.append(' %s' % line) + return formatted + + def __str__(self): + return ''.join(self.format()) + +def _get_frame_args(frame): + """Get the formatted arguments and class (if available) for a frame""" + arginfo = inspect.getargvalues(frame) + + try: + if not arginfo.args: + return '', None + # There have been reports from the field of python 2.6 which doesn't + # return a namedtuple here but simply a tuple so fallback gracefully if + # args isn't present. + except AttributeError: + return '', None + + firstarg = arginfo.args[0] + if firstarg == 'self': + self = arginfo.locals['self'] + cls = self.__class__.__name__ + + arginfo.args.pop(0) + del arginfo.locals['self'] + else: + cls = None + + formatted = inspect.formatargvalues(*arginfo) + return formatted, cls + +def extract_traceback(tb, context=1): + frames = inspect.getinnerframes(tb, context) + for frame, filename, lineno, function, code_context, index in frames: + formatted_args, cls = _get_frame_args(frame) + if cls: + function = '%s.%s' % (cls, function) + yield TracebackEntry(filename, lineno, function, formatted_args, + code_context, index) + +def format_extracted(extracted, formatter=None, limit=None): + if limit: + extracted = extracted[-limit:] + + formatted = [] + for tracebackinfo in extracted: + formatted.extend(tracebackinfo.format(formatter)) + return formatted + + +def format_exception(etype, value, tb, context=1, limit=None, formatter=None): + formatted = ['Traceback (most recent call last):\n'] + + if hasattr(tb, 'tb_next'): + tb = extract_traceback(tb, context) + + formatted.extend(format_extracted(tb, formatter, limit)) + formatted.extend(traceback.format_exception_only(etype, value)) + return formatted + +def to_string(exc): + if isinstance(exc, SystemExit): + if not isinstance(exc.code, str): + return 'Exited with "%d"' % exc.code + return str(exc) diff --git a/poky/bitbake/lib/bb/fetch2/__init__.py b/poky/bitbake/lib/bb/fetch2/__init__.py new file mode 100644 index 0000000000..72d6092deb --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/__init__.py @@ -0,0 +1,1864 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2012 Intel Corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os, re +import signal +import logging +import urllib.request, urllib.parse, urllib.error +if 'git' not in urllib.parse.uses_netloc: + urllib.parse.uses_netloc.append('git') +import operator +import collections +import subprocess +import pickle +import errno +import bb.persist_data, bb.utils +import bb.checksum +import bb.process +import bb.event + +__version__ = "2" +_checksum_cache = bb.checksum.FileChecksumCache() + +logger = logging.getLogger("BitBake.Fetcher") + +class BBFetchException(Exception): + """Class all fetch exceptions inherit from""" + def __init__(self, message): + self.msg = message + Exception.__init__(self, message) + + def __str__(self): + return self.msg + +class UntrustedUrl(BBFetchException): + """Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS""" + def __init__(self, url, message=''): + if message: + msg = message + else: + msg = "The URL: '%s' is not trusted and cannot be used" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) + +class MalformedUrl(BBFetchException): + """Exception raised when encountering an invalid url""" + def __init__(self, url, message=''): + if message: + msg = message + else: + msg = "The URL: '%s' is invalid and cannot be interpreted" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) + +class FetchError(BBFetchException): + """General fetcher exception when something happens incorrectly""" + def __init__(self, message, url = None): + if url: + msg = "Fetcher failure for URL: '%s'. %s" % (url, message) + else: + msg = "Fetcher failure: %s" % message + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) + +class ChecksumError(FetchError): + """Exception when mismatched checksum encountered""" + def __init__(self, message, url = None, checksum = None): + self.checksum = checksum + FetchError.__init__(self, message, url) + +class NoChecksumError(FetchError): + """Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set""" + +class UnpackError(BBFetchException): + """General fetcher exception when something happens incorrectly when unpacking""" + def __init__(self, message, url): + msg = "Unpack failure for URL: '%s'. %s" % (url, message) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) + +class NoMethodError(BBFetchException): + """Exception raised when there is no method to obtain a supplied url or set of urls""" + def __init__(self, url): + msg = "Could not find a fetcher which supports the URL: '%s'" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) + +class MissingParameterError(BBFetchException): + """Exception raised when a fetch method is missing a critical parameter in the url""" + def __init__(self, missing, url): + msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) + self.url = url + self.missing = missing + BBFetchException.__init__(self, msg) + self.args = (missing, url) + +class ParameterError(BBFetchException): + """Exception raised when a url cannot be proccessed due to invalid parameters.""" + def __init__(self, message, url): + msg = "URL: '%s' has invalid parameters. %s" % (url, message) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) + +class NetworkAccess(BBFetchException): + """Exception raised when network access is disabled but it is required.""" + def __init__(self, url, cmd): + msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) + self.url = url + self.cmd = cmd + BBFetchException.__init__(self, msg) + self.args = (url, cmd) + +class NonLocalMethod(Exception): + def __init__(self): + Exception.__init__(self) + +class MissingChecksumEvent(bb.event.Event): + def __init__(self, url, md5sum, sha256sum): + self.url = url + self.checksums = {'md5sum': md5sum, + 'sha256sum': sha256sum} + bb.event.Event.__init__(self) + + +class URI(object): + """ + A class representing a generic URI, with methods for + accessing the URI components, and stringifies to the + URI. + + It is constructed by calling it with a URI, or setting + the attributes manually: + + uri = URI("http://example.com/") + + uri = URI() + uri.scheme = 'http' + uri.hostname = 'example.com' + uri.path = '/' + + It has the following attributes: + + * scheme (read/write) + * userinfo (authentication information) (read/write) + * username (read/write) + * password (read/write) + + Note, password is deprecated as of RFC 3986. + + * hostname (read/write) + * port (read/write) + * hostport (read only) + "hostname:port", if both are set, otherwise just "hostname" + * path (read/write) + * path_quoted (read/write) + A URI quoted version of path + * params (dict) (read/write) + * query (dict) (read/write) + * relative (bool) (read only) + True if this is a "relative URI", (e.g. file:foo.diff) + + It stringifies to the URI itself. + + Some notes about relative URIs: while it's specified that + a URI beginning with :// should either be directly + followed by a hostname or a /, the old URI handling of the + fetch2 library did not comform to this. Therefore, this URI + class has some kludges to make sure that URIs are parsed in + a way comforming to bitbake's current usage. This URI class + supports the following: + + file:relative/path.diff (IETF compliant) + git:relative/path.git (IETF compliant) + git:///absolute/path.git (IETF compliant) + file:///absolute/path.diff (IETF compliant) + + file://relative/path.diff (not IETF compliant) + + But it does not support the following: + + file://hostname/absolute/path.diff (would be IETF compliant) + + Note that the last case only applies to a list of + "whitelisted" schemes (currently only file://), that requires + its URIs to not have a network location. + """ + + _relative_schemes = ['file', 'git'] + _netloc_forbidden = ['file'] + + def __init__(self, uri=None): + self.scheme = '' + self.userinfo = '' + self.hostname = '' + self.port = None + self._path = '' + self.params = {} + self.query = {} + self.relative = False + + if not uri: + return + + # We hijack the URL parameters, since the way bitbake uses + # them are not quite RFC compliant. + uri, param_str = (uri.split(";", 1) + [None])[:2] + + urlp = urllib.parse.urlparse(uri) + self.scheme = urlp.scheme + + reparse = 0 + + # Coerce urlparse to make URI scheme use netloc + if not self.scheme in urllib.parse.uses_netloc: + urllib.parse.uses_params.append(self.scheme) + reparse = 1 + + # Make urlparse happy(/ier) by converting local resources + # to RFC compliant URL format. E.g.: + # file://foo.diff -> file:foo.diff + if urlp.scheme in self._netloc_forbidden: + uri = re.sub("(?<=:)//(?!/)", "", uri, 1) + reparse = 1 + + if reparse: + urlp = urllib.parse.urlparse(uri) + + # Identify if the URI is relative or not + if urlp.scheme in self._relative_schemes and \ + re.compile("^\w+:(?!//)").match(uri): + self.relative = True + + if not self.relative: + self.hostname = urlp.hostname or '' + self.port = urlp.port + + self.userinfo += urlp.username or '' + + if urlp.password: + self.userinfo += ':%s' % urlp.password + + self.path = urllib.parse.unquote(urlp.path) + + if param_str: + self.params = self._param_str_split(param_str, ";") + if urlp.query: + self.query = self._param_str_split(urlp.query, "&") + + def __str__(self): + userinfo = self.userinfo + if userinfo: + userinfo += '@' + + return "%s:%s%s%s%s%s%s" % ( + self.scheme, + '' if self.relative else '//', + userinfo, + self.hostport, + self.path_quoted, + self._query_str(), + self._param_str()) + + def _param_str(self): + return ( + ''.join([';', self._param_str_join(self.params, ";")]) + if self.params else '') + + def _query_str(self): + return ( + ''.join(['?', self._param_str_join(self.query, "&")]) + if self.query else '') + + def _param_str_split(self, string, elmdelim, kvdelim="="): + ret = collections.OrderedDict() + for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]: + ret[k] = v + return ret + + def _param_str_join(self, dict_, elmdelim, kvdelim="="): + return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()]) + + @property + def hostport(self): + if not self.port: + return self.hostname + return "%s:%d" % (self.hostname, self.port) + + @property + def path_quoted(self): + return urllib.parse.quote(self.path) + + @path_quoted.setter + def path_quoted(self, path): + self.path = urllib.parse.unquote(path) + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + self._path = path + + if not path or re.compile("^/").match(path): + self.relative = False + else: + self.relative = True + + @property + def username(self): + if self.userinfo: + return (self.userinfo.split(":", 1))[0] + return '' + + @username.setter + def username(self, username): + password = self.password + self.userinfo = username + if password: + self.userinfo += ":%s" % password + + @property + def password(self): + if self.userinfo and ":" in self.userinfo: + return (self.userinfo.split(":", 1))[1] + return '' + + @password.setter + def password(self, password): + self.userinfo = "%s:%s" % (self.username, password) + +def decodeurl(url): + """Decodes an URL into the tokens (scheme, network location, path, + user, password, parameters). + """ + + m = re.compile('(?P[^:]*)://((?P[^/;]+)@)?(?P[^;]+)(;(?P.*))?').match(url) + if not m: + raise MalformedUrl(url) + + type = m.group('type') + location = m.group('location') + if not location: + raise MalformedUrl(url) + user = m.group('user') + parm = m.group('parm') + + locidx = location.find('/') + if locidx != -1 and type.lower() != 'file': + host = location[:locidx] + path = location[locidx:] + elif type.lower() == 'file': + host = "" + path = location + else: + host = location + path = "" + if user: + m = re.compile('(?P[^:]+)(:?(?P.*))').match(user) + if m: + user = m.group('user') + pswd = m.group('pswd') + else: + user = '' + pswd = '' + + p = collections.OrderedDict() + if parm: + for s in parm.split(';'): + if s: + if not '=' in s: + raise MalformedUrl(url, "The URL: '%s' is invalid: parameter %s does not specify a value (missing '=')" % (url, s)) + s1, s2 = s.split('=') + p[s1] = s2 + + return type, host, urllib.parse.unquote(path), user, pswd, p + +def encodeurl(decoded): + """Encodes a URL from tokens (scheme, network location, path, + user, password, parameters). + """ + + type, host, path, user, pswd, p = decoded + + if not type: + raise MissingParameterError('type', "encoded from the data %s" % str(decoded)) + url = '%s://' % type + if user and type != "file": + url += "%s" % user + if pswd: + url += ":%s" % pswd + url += "@" + if host and type != "file": + url += "%s" % host + if path: + # Standardise path to ensure comparisons work + while '//' in path: + path = path.replace("//", "/") + url += "%s" % urllib.parse.quote(path) + if p: + for parm in p: + url += ";%s=%s" % (parm, p[parm]) + + return url + +def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None): + if not ud.url or not uri_find or not uri_replace: + logger.error("uri_replace: passed an undefined value, not replacing") + return None + uri_decoded = list(decodeurl(ud.url)) + uri_find_decoded = list(decodeurl(uri_find)) + uri_replace_decoded = list(decodeurl(uri_replace)) + logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded)) + result_decoded = ['', '', '', '', '', {}] + for loc, i in enumerate(uri_find_decoded): + result_decoded[loc] = uri_decoded[loc] + regexp = i + if loc == 0 and regexp and not regexp.endswith("$"): + # Leaving the type unanchored can mean "https" matching "file" can become "files" + # which is clearly undesirable. + regexp += "$" + if loc == 5: + # Handle URL parameters + if i: + # Any specified URL parameters must match + for k in uri_replace_decoded[loc]: + if uri_decoded[loc][k] != uri_replace_decoded[loc][k]: + return None + # Overwrite any specified replacement parameters + for k in uri_replace_decoded[loc]: + for l in replacements: + uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l]) + result_decoded[loc][k] = uri_replace_decoded[loc][k] + elif (re.match(regexp, uri_decoded[loc])): + if not uri_replace_decoded[loc]: + result_decoded[loc] = "" + else: + for k in replacements: + uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k]) + #bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc])) + result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1) + if loc == 2: + # Handle path manipulations + basename = None + if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball: + # If the source and destination url types differ, must be a mirrortarball mapping + basename = os.path.basename(mirrortarball) + # Kill parameters, they make no sense for mirror tarballs + uri_decoded[5] = {} + elif ud.localpath and ud.method.supports_checksum(ud): + basename = os.path.basename(ud.localpath) + if basename and not result_decoded[loc].endswith(basename): + result_decoded[loc] = os.path.join(result_decoded[loc], basename) + else: + return None + result = encodeurl(result_decoded) + if result == ud.url: + return None + logger.debug(2, "For url %s returning %s" % (ud.url, result)) + return result + +methods = [] +urldata_cache = {} +saved_headrevs = {} + +def fetcher_init(d): + """ + Called to initialize the fetchers once the configuration data is known. + Calls before this must not hit the cache. + """ + # When to drop SCM head revisions controlled by user policy + srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear" + if srcrev_policy == "cache": + logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy) + elif srcrev_policy == "clear": + logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy) + revs = bb.persist_data.persist('BB_URI_HEADREVS', d) + try: + bb.fetch2.saved_headrevs = revs.items() + except: + pass + revs.clear() + else: + raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy) + + _checksum_cache.init_cache(d) + + for m in methods: + if hasattr(m, "init"): + m.init(d) + +def fetcher_parse_save(): + _checksum_cache.save_extras() + +def fetcher_parse_done(): + _checksum_cache.save_merge() + +def fetcher_compare_revisions(): + """ + Compare the revisions in the persistant cache with current values and + return true/false on whether they've changed. + """ + + data = bb.persist_data.persist('BB_URI_HEADREVS', d).items() + data2 = bb.fetch2.saved_headrevs + + changed = False + for key in data: + if key not in data2 or data2[key] != data[key]: + logger.debug(1, "%s changed", key) + changed = True + return True + else: + logger.debug(2, "%s did not change", key) + return False + +def mirror_from_string(data): + mirrors = (data or "").replace('\\n',' ').split() + # Split into pairs + if len(mirrors) % 2 != 0: + bb.warn('Invalid mirror data %s, should have paired members.' % data) + return list(zip(*[iter(mirrors)]*2)) + +def verify_checksum(ud, d, precomputed={}): + """ + verify the MD5 and SHA256 checksum for downloaded src + + Raises a FetchError if one or both of the SRC_URI checksums do not match + the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no + checksums specified. + + Returns a dict of checksums that can be stored in a done stamp file and + passed in as precomputed parameter in a later call to avoid re-computing + the checksums from the file. This allows verifying the checksums of the + file against those in the recipe each time, rather than only after + downloading. See https://bugzilla.yoctoproject.org/show_bug.cgi?id=5571. + """ + + _MD5_KEY = "md5" + _SHA256_KEY = "sha256" + + if ud.ignore_checksums or not ud.method.supports_checksum(ud): + return {} + + if _MD5_KEY in precomputed: + md5data = precomputed[_MD5_KEY] + else: + md5data = bb.utils.md5_file(ud.localpath) + + if _SHA256_KEY in precomputed: + sha256data = precomputed[_SHA256_KEY] + else: + sha256data = bb.utils.sha256_file(ud.localpath) + + if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected: + # If strict checking enabled and neither sum defined, raise error + strict = d.getVar("BB_STRICT_CHECKSUM") or "0" + if strict == "1": + logger.error('No checksum specified for %s, please add at least one to the recipe:\n' + 'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' % + (ud.localpath, ud.md5_name, md5data, + ud.sha256_name, sha256data)) + raise NoChecksumError('Missing SRC_URI checksum', ud.url) + + bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d) + + if strict == "ignore": + return { + _MD5_KEY: md5data, + _SHA256_KEY: sha256data + } + + # Log missing sums so user can more easily add them + logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n' + 'SRC_URI[%s] = "%s"', + ud.localpath, ud.md5_name, md5data) + logger.warning('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n' + 'SRC_URI[%s] = "%s"', + ud.localpath, ud.sha256_name, sha256data) + + # We want to alert the user if a checksum is defined in the recipe but + # it does not match. + msg = "" + mismatch = False + if ud.md5_expected and ud.md5_expected != md5data: + msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected) + mismatch = True; + + if ud.sha256_expected and ud.sha256_expected != sha256data: + msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected) + mismatch = True; + + if mismatch: + msg = msg + '\nIf this change is expected (e.g. you have upgraded to a new version without updating the checksums) then you can use these lines within the recipe:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"\nOtherwise you should retry the download and/or check with upstream to determine if the file has become corrupted or otherwise unexpectedly modified.\n' % (ud.md5_name, md5data, ud.sha256_name, sha256data) + + if len(msg): + raise ChecksumError('Checksum mismatch!%s' % msg, ud.url, md5data) + + return { + _MD5_KEY: md5data, + _SHA256_KEY: sha256data + } + + +def verify_donestamp(ud, d, origud=None): + """ + Check whether the done stamp file has the right checksums (if the fetch + method supports them). If it doesn't, delete the done stamp and force + a re-download. + + Returns True, if the donestamp exists and is valid, False otherwise. When + returning False, any existing done stamps are removed. + """ + if not ud.needdonestamp or (origud and not origud.needdonestamp): + return True + + if not os.path.exists(ud.localpath): + # local path does not exist + if os.path.exists(ud.donestamp): + # done stamp exists, but the downloaded file does not; the done stamp + # must be incorrect, re-trigger the download + bb.utils.remove(ud.donestamp) + return False + + if (not ud.method.supports_checksum(ud) or + (origud and not origud.method.supports_checksum(origud))): + # if done stamp exists and checksums not supported; assume the local + # file is current + return os.path.exists(ud.donestamp) + + precomputed_checksums = {} + # Only re-use the precomputed checksums if the donestamp is newer than the + # file. Do not rely on the mtime of directories, though. If ud.localpath is + # a directory, there will probably not be any checksums anyway. + if os.path.exists(ud.donestamp) and (os.path.isdir(ud.localpath) or + os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)): + try: + with open(ud.donestamp, "rb") as cachefile: + pickled = pickle.Unpickler(cachefile) + precomputed_checksums.update(pickled.load()) + except Exception as e: + # Avoid the warnings on the upgrade path from emtpy done stamp + # files to those containing the checksums. + if not isinstance(e, EOFError): + # Ignore errors, they aren't fatal + logger.warning("Couldn't load checksums from donestamp %s: %s " + "(msg: %s)" % (ud.donestamp, type(e).__name__, + str(e))) + + try: + checksums = verify_checksum(ud, d, precomputed_checksums) + # If the cache file did not have the checksums, compute and store them + # as an upgrade path from the previous done stamp file format. + if checksums != precomputed_checksums: + with open(ud.donestamp, "wb") as cachefile: + p = pickle.Pickler(cachefile, 2) + p.dump(checksums) + return True + except ChecksumError as e: + # Checksums failed to verify, trigger re-download and remove the + # incorrect stamp file. + logger.warning("Checksum mismatch for local file %s\n" + "Cleaning and trying again." % ud.localpath) + if os.path.exists(ud.localpath): + rename_bad_checksum(ud, e.checksum) + bb.utils.remove(ud.donestamp) + return False + + +def update_stamp(ud, d): + """ + donestamp is file stamp indicating the whole fetching is done + this function update the stamp after verifying the checksum + """ + if not ud.needdonestamp: + return + + if os.path.exists(ud.donestamp): + # Touch the done stamp file to show active use of the download + try: + os.utime(ud.donestamp, None) + except: + # Errors aren't fatal here + pass + else: + try: + checksums = verify_checksum(ud, d) + # Store the checksums for later re-verification against the recipe + with open(ud.donestamp, "wb") as cachefile: + p = pickle.Pickler(cachefile, 2) + p.dump(checksums) + except ChecksumError as e: + # Checksums failed to verify, trigger re-download and remove the + # incorrect stamp file. + logger.warning("Checksum mismatch for local file %s\n" + "Cleaning and trying again." % ud.localpath) + if os.path.exists(ud.localpath): + rename_bad_checksum(ud, e.checksum) + bb.utils.remove(ud.donestamp) + raise + +def subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + # SIGPIPE errors are known issues with gzip/bash + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +def get_autorev(d): + # only not cache src rev in autorev case + if d.getVar('BB_SRCREV_POLICY') != "cache": + d.setVar('BB_DONT_CACHE', '1') + return "AUTOINC" + +def get_srcrev(d, method_name='sortable_revision'): + """ + Return the revision string, usually for use in the version string (PV) of the current package + Most packages usually only have one SCM so we just pass on the call. + In the multi SCM case, we build a value based on SRCREV_FORMAT which must + have been set. + + The idea here is that we put the string "AUTOINC+" into return value if the revisions are not + incremental, other code is then responsible for turning that into an increasing value (if needed) + + A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if + that fetcher provides a method with the given name and the same signature as sortable_revision. + """ + + scms = [] + fetcher = Fetch(d.getVar('SRC_URI').split(), d) + urldata = fetcher.ud + for u in urldata: + if urldata[u].method.supports_srcrev(): + scms.append(u) + + if len(scms) == 0: + raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI") + + if len(scms) == 1 and len(urldata[scms[0]].names) == 1: + autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0]) + if len(rev) > 10: + rev = rev[:10] + if autoinc: + return "AUTOINC+" + rev + return rev + + # + # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT + # + format = d.getVar('SRCREV_FORMAT') + if not format: + raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.") + + name_to_rev = {} + seenautoinc = False + for scm in scms: + ud = urldata[scm] + for name in ud.names: + autoinc, rev = getattr(ud.method, method_name)(ud, d, name) + seenautoinc = seenautoinc or autoinc + if len(rev) > 10: + rev = rev[:10] + name_to_rev[name] = rev + # Replace names by revisions in the SRCREV_FORMAT string. The approach used + # here can handle names being prefixes of other names and names appearing + # as substrings in revisions (in which case the name should not be + # expanded). The '|' regular expression operator tries matches from left to + # right, so we need to sort the names with the longest ones first. + names_descending_len = sorted(name_to_rev, key=len, reverse=True) + name_to_rev_re = "|".join(re.escape(name) for name in names_descending_len) + format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format) + + if seenautoinc: + format = "AUTOINC+" + format + + return format + +def localpath(url, d): + fetcher = bb.fetch2.Fetch([url], d) + return fetcher.localpath(url) + +def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None): + """ + Run cmd returning the command output + Raise an error if interrupted or cmd fails + Optionally echo command output to stdout + Optionally remove the files/directories listed in cleanup upon failure + """ + + # Need to export PATH as binary could be in metadata paths + # rather than host provided + # Also include some other variables. + # FIXME: Should really include all export varaiables? + exportvars = ['HOME', 'PATH', + 'HTTP_PROXY', 'http_proxy', + 'HTTPS_PROXY', 'https_proxy', + 'FTP_PROXY', 'ftp_proxy', + 'FTPS_PROXY', 'ftps_proxy', + 'NO_PROXY', 'no_proxy', + 'ALL_PROXY', 'all_proxy', + 'GIT_PROXY_COMMAND', + 'GIT_SSL_CAINFO', + 'GIT_SMART_HTTP', + 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', + 'SOCKS5_USER', 'SOCKS5_PASSWD', + 'DBUS_SESSION_BUS_ADDRESS', + 'P4CONFIG'] + + if not cleanup: + cleanup = [] + + # If PATH contains WORKDIR which contains PV which contains SRCPV we + # can end up in circular recursion here so give the option of breaking it + # in a data store copy. + try: + d.getVar("PV") + except bb.data_smart.ExpansionError: + d = bb.data.createCopy(d) + d.setVar("PV", "fetcheravoidrecurse") + + origenv = d.getVar("BB_ORIGENV", False) + for var in exportvars: + val = d.getVar(var) or (origenv and origenv.getVar(var)) + if val: + cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) + + # Disable pseudo as it may affect ssh, potentially causing it to hang. + cmd = 'export PSEUDO_DISABLED=1; ' + cmd + + logger.debug(1, "Running %s", cmd) + + success = False + error_message = "" + + try: + (output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir) + success = True + except bb.process.NotFoundError as e: + error_message = "Fetch command %s" % (e.command) + except bb.process.ExecutionError as e: + if e.stdout: + output = "output:\n%s\n%s" % (e.stdout, e.stderr) + elif e.stderr: + output = "output:\n%s" % e.stderr + else: + output = "no output" + error_message = "Fetch command %s failed with exit code %s, %s" % (e.command, e.exitcode, output) + except bb.process.CmdError as e: + error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg) + if not success: + for f in cleanup: + try: + bb.utils.remove(f, True) + except OSError: + pass + + raise FetchError(error_message) + + return output + +def check_network_access(d, info, url): + """ + log remote network access, and error if BB_NO_NETWORK is set or the given + URI is untrusted + """ + if d.getVar("BB_NO_NETWORK") == "1": + raise NetworkAccess(url, info) + elif not trusted_network(d, url): + raise UntrustedUrl(url, info) + else: + logger.debug(1, "Fetcher accessed the network with the command %s" % info) + +def build_mirroruris(origud, mirrors, ld): + uris = [] + uds = [] + + replacements = {} + replacements["TYPE"] = origud.type + replacements["HOST"] = origud.host + replacements["PATH"] = origud.path + replacements["BASENAME"] = origud.path.split("/")[-1] + replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.') + + def adduri(ud, uris, uds, mirrors, tarballs): + for line in mirrors: + try: + (find, replace) = line + except ValueError: + continue + + for tarball in tarballs: + newuri = uri_replace(ud, find, replace, replacements, ld, tarball) + if not newuri or newuri in uris or newuri == origud.url: + continue + + if not trusted_network(ld, newuri): + logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri)) + continue + + # Create a local copy of the mirrors minus the current line + # this will prevent us from recursively processing the same line + # as well as indirect recursion A -> B -> C -> A + localmirrors = list(mirrors) + localmirrors.remove(line) + + try: + newud = FetchData(newuri, ld) + newud.setup_localpath(ld) + except bb.fetch2.BBFetchException as e: + logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) + logger.debug(1, str(e)) + try: + # setup_localpath of file:// urls may fail, we should still see + # if mirrors of the url exist + adduri(newud, uris, uds, localmirrors, tarballs) + except UnboundLocalError: + pass + continue + uris.append(newuri) + uds.append(newud) + + adduri(newud, uris, uds, localmirrors, tarballs) + + adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None]) + + return uris, uds + +def rename_bad_checksum(ud, suffix): + """ + Renames files to have suffix from parameter + """ + + if ud.localpath is None: + return + + new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix) + bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath)) + bb.utils.movefile(ud.localpath, new_localpath) + + +def try_mirror_url(fetch, origud, ud, ld, check = False): + # Return of None or a value means we're finished + # False means try another url + + if ud.lockfile and ud.lockfile != origud.lockfile: + lf = bb.utils.lockfile(ud.lockfile) + + try: + if check: + found = ud.method.checkstatus(fetch, ud, ld) + if found: + return found + return False + + if not verify_donestamp(ud, ld, origud) or ud.method.need_update(ud, ld): + ud.method.download(ud, ld) + if hasattr(ud.method,"build_mirror_data"): + ud.method.build_mirror_data(ud, ld) + + if not ud.localpath or not os.path.exists(ud.localpath): + return False + + if ud.localpath == origud.localpath: + return ud.localpath + + # We may be obtaining a mirror tarball which needs further processing by the real fetcher + # If that tarball is a local file:// we need to provide a symlink to it + dldir = ld.getVar("DL_DIR") + + if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): + # Create donestamp in old format to avoid triggering a re-download + if ud.donestamp: + bb.utils.mkdirhier(os.path.dirname(ud.donestamp)) + open(ud.donestamp, 'w').close() + dest = os.path.join(dldir, os.path.basename(ud.localpath)) + if not os.path.exists(dest): + # In case this is executing without any file locks held (as is + # the case for file:// URLs), two tasks may end up here at the + # same time, in which case we do not want the second task to + # fail when the link has already been created by the first task. + try: + os.symlink(ud.localpath, dest) + except FileExistsError: + pass + if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld): + origud.method.download(origud, ld) + if hasattr(origud.method, "build_mirror_data"): + origud.method.build_mirror_data(origud, ld) + return origud.localpath + # Otherwise the result is a local file:// and we symlink to it + if not os.path.exists(origud.localpath): + if os.path.islink(origud.localpath): + # Broken symbolic link + os.unlink(origud.localpath) + + # As per above, in case two tasks end up here simultaneously. + try: + os.symlink(ud.localpath, origud.localpath) + except FileExistsError: + pass + update_stamp(origud, ld) + return ud.localpath + + except bb.fetch2.NetworkAccess: + raise + + except IOError as e: + if e.errno in [os.errno.ESTALE]: + logger.warning("Stale Error Observed %s." % ud.url) + return False + raise + + except bb.fetch2.BBFetchException as e: + if isinstance(e, ChecksumError): + logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url)) + logger.warning(str(e)) + if os.path.exists(ud.localpath): + rename_bad_checksum(ud, e.checksum) + elif isinstance(e, NoChecksumError): + raise + else: + logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url)) + logger.debug(1, str(e)) + try: + ud.method.clean(ud, ld) + except UnboundLocalError: + pass + return False + finally: + if ud.lockfile and ud.lockfile != origud.lockfile: + bb.utils.unlockfile(lf) + + +def try_mirrors(fetch, d, origud, mirrors, check = False): + """ + Try to use a mirrored version of the sources. + This method will be automatically called before the fetchers go. + + d Is a bb.data instance + uri is the original uri we're trying to download + mirrors is the list of mirrors we're going to try + """ + ld = d.createCopy() + + uris, uds = build_mirroruris(origud, mirrors, ld) + + for index, uri in enumerate(uris): + ret = try_mirror_url(fetch, origud, uds[index], ld, check) + if ret != False: + return ret + return None + +def trusted_network(d, url): + """ + Use a trusted url during download if networking is enabled and + BB_ALLOWED_NETWORKS is set globally or for a specific recipe. + Note: modifies SRC_URI & mirrors. + """ + if d.getVar('BB_NO_NETWORK') == "1": + return True + + pkgname = d.expand(d.getVar('PN', False)) + trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False) + + if not trusted_hosts: + trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS') + + # Not enabled. + if not trusted_hosts: + return True + + scheme, network, path, user, passwd, param = decodeurl(url) + + if not network: + return True + + network = network.split(':')[0] + network = network.lower() + + for host in trusted_hosts.split(" "): + host = host.lower() + if host.startswith("*.") and ("." + network).endswith(host[1:]): + return True + if host == network: + return True + + return False + +def srcrev_internal_helper(ud, d, name): + """ + Return: + a) a source revision if specified + b) latest revision if SRCREV="AUTOINC" + c) None if not specified + """ + + srcrev = None + pn = d.getVar("PN") + attempts = [] + if name != '' and pn: + attempts.append("SRCREV_%s_pn-%s" % (name, pn)) + if name != '': + attempts.append("SRCREV_%s" % name) + if pn: + attempts.append("SRCREV_pn-%s" % pn) + attempts.append("SRCREV") + + for a in attempts: + srcrev = d.getVar(a) + if srcrev and srcrev != "INVALID": + break + + if 'rev' in ud.parm and 'tag' in ud.parm: + raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url)) + + if 'rev' in ud.parm or 'tag' in ud.parm: + if 'rev' in ud.parm: + parmrev = ud.parm['rev'] + else: + parmrev = ud.parm['tag'] + if srcrev == "INVALID" or not srcrev: + return parmrev + if srcrev != parmrev: + raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev)) + return parmrev + + if srcrev == "INVALID" or not srcrev: + raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url) + if srcrev == "AUTOINC": + srcrev = ud.method.latest_revision(ud, d, name) + + return srcrev + +def get_checksum_file_list(d): + """ Get a list of files checksum in SRC_URI + + Returns the resolved local paths of all local file entries in + SRC_URI as a space-separated string + """ + fetch = Fetch([], d, cache = False, localonly = True) + + dl_dir = d.getVar('DL_DIR') + filelist = [] + for u in fetch.urls: + ud = fetch.ud[u] + + if ud and isinstance(ud.method, local.Local): + paths = ud.method.localpaths(ud, d) + for f in paths: + pth = ud.decodedurl + if '*' in pth: + f = os.path.join(os.path.abspath(f), pth) + if f.startswith(dl_dir): + # The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else + if os.path.exists(f): + bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f))) + else: + bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f))) + filelist.append(f + ":" + str(os.path.exists(f))) + + return " ".join(filelist) + +def get_file_checksums(filelist, pn): + """Get a list of the checksums for a list of local files + + Returns the checksums for a list of local files, caching the results as + it proceeds + + """ + return _checksum_cache.get_checksums(filelist, pn) + + +class FetchData(object): + """ + A class which represents the fetcher state for a given URI. + """ + def __init__(self, url, d, localonly = False): + # localpath is the location of a downloaded result. If not set, the file is local. + self.donestamp = None + self.needdonestamp = True + self.localfile = "" + self.localpath = None + self.lockfile = None + self.mirrortarballs = [] + self.basename = None + self.basepath = None + (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url)) + self.date = self.getSRCDate(d) + self.url = url + if not self.user and "user" in self.parm: + self.user = self.parm["user"] + if not self.pswd and "pswd" in self.parm: + self.pswd = self.parm["pswd"] + self.setup = False + + if "name" in self.parm: + self.md5_name = "%s.md5sum" % self.parm["name"] + self.sha256_name = "%s.sha256sum" % self.parm["name"] + else: + self.md5_name = "md5sum" + self.sha256_name = "sha256sum" + if self.md5_name in self.parm: + self.md5_expected = self.parm[self.md5_name] + elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]: + self.md5_expected = None + else: + self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name) + if self.sha256_name in self.parm: + self.sha256_expected = self.parm[self.sha256_name] + elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]: + self.sha256_expected = None + else: + self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name) + self.ignore_checksums = False + + self.names = self.parm.get("name",'default').split(',') + + self.method = None + for m in methods: + if m.supports(self, d): + self.method = m + break + + if not self.method: + raise NoMethodError(url) + + if localonly and not isinstance(self.method, local.Local): + raise NonLocalMethod() + + if self.parm.get("proto", None) and "protocol" not in self.parm: + logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN')) + self.parm["protocol"] = self.parm.get("proto", None) + + if hasattr(self.method, "urldata_init"): + self.method.urldata_init(self, d) + + if "localpath" in self.parm: + # if user sets localpath for file, use it instead. + self.localpath = self.parm["localpath"] + self.basename = os.path.basename(self.localpath) + elif self.localfile: + self.localpath = self.method.localpath(self, d) + + dldir = d.getVar("DL_DIR") + + if not self.needdonestamp: + return + + # Note: .done and .lock files should always be in DL_DIR whereas localpath may not be. + if self.localpath and self.localpath.startswith(dldir): + basepath = self.localpath + elif self.localpath: + basepath = dldir + os.sep + os.path.basename(self.localpath) + elif self.basepath or self.basename: + basepath = dldir + os.sep + (self.basepath or self.basename) + else: + bb.fatal("Can't determine lock path for url %s" % url) + + self.donestamp = basepath + '.done' + self.lockfile = basepath + '.lock' + + def setup_revisions(self, d): + self.revisions = {} + for name in self.names: + self.revisions[name] = srcrev_internal_helper(self, d, name) + + # add compatibility code for non name specified case + if len(self.names) == 1: + self.revision = self.revisions[self.names[0]] + + def setup_localpath(self, d): + if not self.localpath: + self.localpath = self.method.localpath(self, d) + + def getSRCDate(self, d): + """ + Return the SRC Date for the component + + d the bb.data module + """ + if "srcdate" in self.parm: + return self.parm['srcdate'] + + pn = d.getVar("PN") + + if pn: + return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE") + + return d.getVar("SRCDATE") or d.getVar("DATE") + +class FetchMethod(object): + """Base class for 'fetch'ing data""" + + def __init__(self, urls=None): + self.urls = [] + + def supports(self, urldata, d): + """ + Check to see if this fetch class supports a given url. + """ + return 0 + + def localpath(self, urldata, d): + """ + Return the local filename of a given url assuming a successful fetch. + Can also setup variables in urldata for use in go (saving code duplication + and duplicate code execution) + """ + return os.path.join(d.getVar("DL_DIR"), urldata.localfile) + + def supports_checksum(self, urldata): + """ + Is localpath something that can be represented by a checksum? + """ + + # We cannot compute checksums for directories + if os.path.isdir(urldata.localpath) == True: + return False + if urldata.localpath.find("*") != -1: + return False + + return True + + def recommends_checksum(self, urldata): + """ + Is the backend on where checksumming is recommended (should warnings + be displayed if there is no checksum)? + """ + return False + + def _strip_leading_slashes(self, relpath): + """ + Remove leading slash as os.path.join can't cope + """ + while os.path.isabs(relpath): + relpath = relpath[1:] + return relpath + + def setUrls(self, urls): + self.__urls = urls + + def getUrls(self): + return self.__urls + + urls = property(getUrls, setUrls, None, "Urls property") + + def need_update(self, ud, d): + """ + Force a fetch, even if localpath exists? + """ + if os.path.exists(ud.localpath): + return False + return True + + def supports_srcrev(self): + """ + The fetcher supports auto source revisions (SRCREV) + """ + return False + + def download(self, urldata, d): + """ + Fetch urls + Assumes localpath was called first + """ + raise NoMethodError(url) + + def unpack(self, urldata, rootdir, data): + iterate = False + file = urldata.localpath + + # Localpath can't deal with 'dir/*' entries, so it converts them to '.', + # but it must be corrected back for local files copying + if urldata.basename == '*' and file.endswith('/.'): + file = '%s/%s' % (file.rstrip('/.'), urldata.path) + + try: + unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True) + except ValueError as exc: + bb.fatal("Invalid value for 'unpack' parameter for %s: %s" % + (file, urldata.parm.get('unpack'))) + + base, ext = os.path.splitext(file) + if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']: + efile = os.path.join(rootdir, os.path.basename(base)) + else: + efile = file + cmd = None + + if unpack: + if file.endswith('.tar'): + cmd = 'tar x --no-same-owner -f %s' % file + elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'): + cmd = 'tar xz --no-same-owner -f %s' % file + elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'): + cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file + elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'): + cmd = 'gzip -dc %s > %s' % (file, efile) + elif file.endswith('.bz2'): + cmd = 'bzip2 -dc %s > %s' % (file, efile) + elif file.endswith('.txz') or file.endswith('.tar.xz'): + cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file + elif file.endswith('.xz'): + cmd = 'xz -dc %s > %s' % (file, efile) + elif file.endswith('.tar.lz'): + cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file + elif file.endswith('.lz'): + cmd = 'lzip -dc %s > %s' % (file, efile) + elif file.endswith('.tar.7z'): + cmd = '7z x -so %s | tar x --no-same-owner -f -' % file + elif file.endswith('.7z'): + cmd = '7za x -y %s 1>/dev/null' % file + elif file.endswith('.zip') or file.endswith('.jar'): + try: + dos = bb.utils.to_boolean(urldata.parm.get('dos'), False) + except ValueError as exc: + bb.fatal("Invalid value for 'dos' parameter for %s: %s" % + (file, urldata.parm.get('dos'))) + cmd = 'unzip -q -o' + if dos: + cmd = '%s -a' % cmd + cmd = "%s '%s'" % (cmd, file) + elif file.endswith('.rpm') or file.endswith('.srpm'): + if 'extract' in urldata.parm: + unpack_file = urldata.parm.get('extract') + cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file) + iterate = True + iterate_file = unpack_file + else: + cmd = 'rpm2cpio.sh %s | cpio -id' % (file) + elif file.endswith('.deb') or file.endswith('.ipk'): + output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True) + datafile = None + if output: + for line in output.decode().splitlines(): + if line.startswith('data.tar.'): + datafile = line + break + else: + raise UnpackError("Unable to unpack deb/ipk package - does not contain data.tar.* file", urldata.url) + else: + raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url) + cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile) + + # If 'subdir' param exists, create a dir and use it as destination for unpack cmd + if 'subdir' in urldata.parm: + subdir = urldata.parm.get('subdir') + if os.path.isabs(subdir): + if not os.path.realpath(subdir).startswith(os.path.realpath(rootdir)): + raise UnpackError("subdir argument isn't a subdirectory of unpack root %s" % rootdir, urldata.url) + unpackdir = subdir + else: + unpackdir = os.path.join(rootdir, subdir) + bb.utils.mkdirhier(unpackdir) + else: + unpackdir = rootdir + + if not unpack or not cmd: + # If file == dest, then avoid any copies, as we already put the file into dest! + dest = os.path.join(unpackdir, os.path.basename(file)) + if file != dest and not (os.path.exists(dest) and os.path.samefile(file, dest)): + destdir = '.' + # For file:// entries all intermediate dirs in path must be created at destination + if urldata.type == "file": + # Trailing '/' does a copying to wrong place + urlpath = urldata.path.rstrip('/') + # Want files places relative to cwd so no leading '/' + urlpath = urlpath.lstrip('/') + if urlpath.find("/") != -1: + destdir = urlpath.rsplit("/", 1)[0] + '/' + bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir)) + cmd = 'cp -fpPRH %s %s' % (file, destdir) + + if not cmd: + return + + path = data.getVar('PATH') + if path: + cmd = "PATH=\"%s\" %s" % (path, cmd) + bb.note("Unpacking %s to %s/" % (file, unpackdir)) + ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=unpackdir) + + if ret != 0: + raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url) + + if iterate is True: + iterate_urldata = urldata + iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file) + self.unpack(urldata, rootdir, data) + + return + + def clean(self, urldata, d): + """ + Clean any existing full or partial download + """ + bb.utils.remove(urldata.localpath) + + def try_premirror(self, urldata, d): + """ + Should premirrors be used? + """ + return True + + def checkstatus(self, fetch, urldata, d): + """ + Check the status of a URL + Assumes localpath was called first + """ + logger.info("URL %s could not be checked for status since no method exists.", url) + return True + + def latest_revision(self, ud, d, name): + """ + Look in the cache for the latest revision, if not present ask the SCM. + """ + if not hasattr(self, "_latest_revision"): + raise ParameterError("The fetcher for this URL does not support _latest_revision", url) + + revs = bb.persist_data.persist('BB_URI_HEADREVS', d) + key = self.generate_revision_key(ud, d, name) + try: + return revs[key] + except KeyError: + revs[key] = rev = self._latest_revision(ud, d, name) + return rev + + def sortable_revision(self, ud, d, name): + latest_rev = self._build_revision(ud, d, name) + return True, str(latest_rev) + + def generate_revision_key(self, ud, d, name): + key = self._revision_key(ud, d, name) + return "%s-%s" % (key, d.getVar("PN") or "") + + def latest_versionstring(self, ud, d): + """ + Compute the latest release name like "x.y.x" in "x.y.x+gitHASH" + by searching through the tags output of ls-remote, comparing + versions and returning the highest match as a (version, revision) pair. + """ + return ('', '') + +class Fetch(object): + def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None): + if localonly and cache: + raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time") + + if len(urls) == 0: + urls = d.getVar("SRC_URI").split() + self.urls = urls + self.d = d + self.ud = {} + self.connection_cache = connection_cache + + fn = d.getVar('FILE') + mc = d.getVar('__BBMULTICONFIG') or "" + if cache and fn and mc + fn in urldata_cache: + self.ud = urldata_cache[mc + fn] + + for url in urls: + if url not in self.ud: + try: + self.ud[url] = FetchData(url, d, localonly) + except NonLocalMethod: + if localonly: + self.ud[url] = None + pass + + if fn and cache: + urldata_cache[mc + fn] = self.ud + + def localpath(self, url): + if url not in self.urls: + self.ud[url] = FetchData(url, self.d) + + self.ud[url].setup_localpath(self.d) + return self.d.expand(self.ud[url].localpath) + + def localpaths(self): + """ + Return a list of the local filenames, assuming successful fetch + """ + local = [] + + for u in self.urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + local.append(ud.localpath) + + return local + + def download(self, urls=None): + """ + Fetch all urls + """ + if not urls: + urls = self.urls + + network = self.d.getVar("BB_NO_NETWORK") + premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1") + + for u in urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + m = ud.method + localpath = "" + + if ud.lockfile: + lf = bb.utils.lockfile(ud.lockfile) + + try: + self.d.setVar("BB_NO_NETWORK", network) + + if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d): + localpath = ud.localpath + elif m.try_premirror(ud, self.d): + logger.debug(1, "Trying PREMIRRORS") + mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) + localpath = try_mirrors(self, self.d, ud, mirrors, False) + if localpath: + try: + # early checksum verification so that if the checksum of the premirror + # contents mismatch the fetcher can still try upstream and mirrors + update_stamp(ud, self.d) + except ChecksumError as e: + logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u) + logger.debug(1, str(e)) + localpath = "" + + if premirroronly: + self.d.setVar("BB_NO_NETWORK", "1") + + firsterr = None + verified_stamp = verify_donestamp(ud, self.d) + if not localpath and (not verified_stamp or m.need_update(ud, self.d)): + try: + if not trusted_network(self.d, ud.url): + raise UntrustedUrl(ud.url) + logger.debug(1, "Trying Upstream") + m.download(ud, self.d) + if hasattr(m, "build_mirror_data"): + m.build_mirror_data(ud, self.d) + localpath = ud.localpath + # early checksum verify, so that if checksum mismatched, + # fetcher still have chance to fetch from mirror + update_stamp(ud, self.d) + + except bb.fetch2.NetworkAccess: + raise + + except BBFetchException as e: + if isinstance(e, ChecksumError): + logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u) + logger.debug(1, str(e)) + if os.path.exists(ud.localpath): + rename_bad_checksum(ud, e.checksum) + elif isinstance(e, NoChecksumError): + raise + else: + logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u) + logger.debug(1, str(e)) + firsterr = e + # Remove any incomplete fetch + if not verified_stamp: + m.clean(ud, self.d) + logger.debug(1, "Trying MIRRORS") + mirrors = mirror_from_string(self.d.getVar('MIRRORS')) + localpath = try_mirrors(self, self.d, ud, mirrors) + + if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1): + if firsterr: + logger.error(str(firsterr)) + raise FetchError("Unable to fetch URL from any source.", u) + + update_stamp(ud, self.d) + + except IOError as e: + if e.errno in [os.errno.ESTALE]: + logger.error("Stale Error Observed %s." % u) + raise ChecksumError("Stale Error Detected") + + except BBFetchException as e: + if isinstance(e, ChecksumError): + logger.error("Checksum failure fetching %s" % u) + raise + + finally: + if ud.lockfile: + bb.utils.unlockfile(lf) + + def checkstatus(self, urls=None): + """ + Check all urls exist upstream + """ + + if not urls: + urls = self.urls + + for u in urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + m = ud.method + logger.debug(1, "Testing URL %s", u) + # First try checking uri, u, from PREMIRRORS + mirrors = mirror_from_string(self.d.getVar('PREMIRRORS')) + ret = try_mirrors(self, self.d, ud, mirrors, True) + if not ret: + # Next try checking from the original uri, u + ret = m.checkstatus(self, ud, self.d) + if not ret: + # Finally, try checking uri, u, from MIRRORS + mirrors = mirror_from_string(self.d.getVar('MIRRORS')) + ret = try_mirrors(self, self.d, ud, mirrors, True) + + if not ret: + raise FetchError("URL %s doesn't work" % u, u) + + def unpack(self, root, urls=None): + """ + Unpack urls to root + """ + + if not urls: + urls = self.urls + + for u in urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + + if ud.lockfile: + lf = bb.utils.lockfile(ud.lockfile) + + ud.method.unpack(ud, root, self.d) + + if ud.lockfile: + bb.utils.unlockfile(lf) + + def clean(self, urls=None): + """ + Clean files that the fetcher gets or places + """ + + if not urls: + urls = self.urls + + for url in urls: + if url not in self.ud: + self.ud[url] = FetchData(url, d) + ud = self.ud[url] + ud.setup_localpath(self.d) + + if not ud.localfile and ud.localpath is None: + continue + + if ud.lockfile: + lf = bb.utils.lockfile(ud.lockfile) + + ud.method.clean(ud, self.d) + if ud.donestamp: + bb.utils.remove(ud.donestamp) + + if ud.lockfile: + bb.utils.unlockfile(lf) + +class FetchConnectionCache(object): + """ + A class which represents an container for socket connections. + """ + def __init__(self): + self.cache = {} + + def get_connection_name(self, host, port): + return host + ':' + str(port) + + def add_connection(self, host, port, connection): + cn = self.get_connection_name(host, port) + + if cn not in self.cache: + self.cache[cn] = connection + + def get_connection(self, host, port): + connection = None + + cn = self.get_connection_name(host, port) + if cn in self.cache: + connection = self.cache[cn] + + return connection + + def remove_connection(self, host, port): + cn = self.get_connection_name(host, port) + if cn in self.cache: + self.cache[cn].close() + del self.cache[cn] + + def close_connections(self): + for cn in list(self.cache.keys()): + self.cache[cn].close() + del self.cache[cn] + +from . import cvs +from . import git +from . import gitsm +from . import gitannex +from . import local +from . import svn +from . import wget +from . import ssh +from . import sftp +from . import s3 +from . import perforce +from . import bzr +from . import hg +from . import osc +from . import repo +from . import clearcase +from . import npm + +methods.append(local.Local()) +methods.append(wget.Wget()) +methods.append(svn.Svn()) +methods.append(git.Git()) +methods.append(gitsm.GitSM()) +methods.append(gitannex.GitANNEX()) +methods.append(cvs.Cvs()) +methods.append(ssh.SSH()) +methods.append(sftp.SFTP()) +methods.append(s3.S3()) +methods.append(perforce.Perforce()) +methods.append(bzr.Bzr()) +methods.append(hg.Hg()) +methods.append(osc.Osc()) +methods.append(repo.Repo()) +methods.append(clearcase.ClearCase()) +methods.append(npm.Npm()) diff --git a/poky/bitbake/lib/bb/fetch2/bzr.py b/poky/bitbake/lib/bb/fetch2/bzr.py new file mode 100644 index 0000000000..16123f8af9 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/bzr.py @@ -0,0 +1,139 @@ +""" +BitBake 'Fetch' implementation for bzr. + +""" + +# Copyright (C) 2007 Ross Burton +# Copyright (C) 2007 Richard Purdie +# +# Classes for obtaining upstream sources for the +# BitBake build tools. +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import sys +import logging +import bb +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Bzr(FetchMethod): + def supports(self, ud, d): + return ud.type in ['bzr'] + + def urldata_init(self, ud, d): + """ + init bzr specific variable within url data + """ + # Create paths to bzr checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(d.expand('${BZRDIR}'), ud.host, relpath) + + ud.setup_revisions(d) + + if not ud.revision: + ud.revision = self.latest_revision(ud, d) + + ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision)) + + def _buildbzrcommand(self, ud, d, command): + """ + Build up an bzr commandline based on ud + command is "fetch", "update", "revno" + """ + + basecmd = d.expand('${FETCHCMD_bzr}') + + proto = ud.parm.get('protocol', 'http') + + bzrroot = ud.host + ud.path + + options = [] + + if command == "revno": + bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) + else: + if ud.revision: + options.append("-r %s" % ud.revision) + + if command == "fetch": + bzrcmd = "%s branch %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) + elif command == "update": + bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options)) + else: + raise FetchError("Invalid bzr command %s" % command, ud.url) + + return bzrcmd + + def download(self, ud, d): + """Fetch url""" + + if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK): + bzrcmd = self._buildbzrcommand(ud, d, "update") + logger.debug(1, "BZR Update %s", ud.url) + bb.fetch2.check_network_access(d, bzrcmd, ud.url) + runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path))) + else: + bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True) + bzrcmd = self._buildbzrcommand(ud, d, "fetch") + bb.fetch2.check_network_access(d, bzrcmd, ud.url) + logger.debug(1, "BZR Checkout %s", ud.url) + bb.utils.mkdirhier(ud.pkgdir) + logger.debug(1, "Running %s", bzrcmd) + runfetchcmd(bzrcmd, d, workdir=ud.pkgdir) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude='.bzr' --exclude='.bzrtags'" + + # tar them up to a defined filename + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), + d, cleanup=[ud.localpath], workdir=ud.pkgdir) + + def supports_srcrev(self): + return True + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "bzr:" + ud.pkgdir + + def _latest_revision(self, ud, d, name): + """ + Return the latest upstream revision number + """ + logger.debug(2, "BZR fetcher hitting network for %s", ud.url) + + bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url) + + output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True) + + return output.strip() + + def sortable_revision(self, ud, d, name): + """ + Return a sortable revision number which in our case is the revision number + """ + + return False, self._build_revision(ud, d) + + def _build_revision(self, ud, d): + return ud.revision diff --git a/poky/bitbake/lib/bb/fetch2/clearcase.py b/poky/bitbake/lib/bb/fetch2/clearcase.py new file mode 100644 index 0000000000..36beab6a5b --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/clearcase.py @@ -0,0 +1,260 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' clearcase implementation + +The clearcase fetcher is used to retrieve files from a ClearCase repository. + +Usage in the recipe: + + SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module" + SRCREV = "EXAMPLE_CLEARCASE_TAG" + PV = "${@d.getVar("SRCREV", False).replace("/", "+")}" + +The fetcher uses the rcleartool or cleartool remote client, depending on which one is available. + +Supported SRC_URI options are: + +- vob + (required) The name of the clearcase VOB (with prepending "/") + +- module + The module in the selected VOB (with prepending "/") + + The module and vob parameters are combined to create + the following load rule in the view config spec: + load + +- proto + http or https + +Related variables: + + CCASE_CUSTOM_CONFIG_SPEC + Write a config spec to this variable in your recipe to use it instead + of the default config spec generated by this fetcher. + Please note that the SRCREV loses its functionality if you specify + this variable. SRCREV is still used to label the archive after a fetch, + but it doesn't define what's fetched. + +User credentials: + cleartool: + The login of cleartool is handled by the system. No special steps needed. + + rcleartool: + In order to use rcleartool with authenticated users an `rcleartool login` is + necessary before using the fetcher. +""" +# Copyright (C) 2014 Siemens AG +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# + +import os +import sys +import shutil +import bb +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger +from distutils import spawn + +class ClearCase(FetchMethod): + """Class to fetch urls via 'clearcase'""" + def init(self, d): + pass + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with Clearcase. + """ + return ud.type in ['ccrc'] + + def debug(self, msg): + logger.debug(1, "ClearCase: %s", msg) + + def urldata_init(self, ud, d): + """ + init ClearCase specific variable within url data + """ + ud.proto = "https" + if 'protocol' in ud.parm: + ud.proto = ud.parm['protocol'] + if not ud.proto in ('http', 'https'): + raise fetch2.ParameterError("Invalid protocol type", ud.url) + + ud.vob = '' + if 'vob' in ud.parm: + ud.vob = ud.parm['vob'] + else: + msg = ud.url+": vob must be defined so the fetcher knows what to get." + raise MissingParameterError('vob', msg) + + if 'module' in ud.parm: + ud.module = ud.parm['module'] + else: + ud.module = "" + + ud.basecmd = d.getVar("FETCHCMD_ccrc") or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool") + + if d.getVar("SRCREV") == "INVALID": + raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.") + + ud.label = d.getVar("SRCREV", False) + ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC") + + ud.server = "%s://%s%s" % (ud.proto, ud.host, ud.path) + + ud.identifier = "clearcase-%s%s-%s" % ( ud.vob.replace("/", ""), + ud.module.replace("/", "."), + ud.label.replace("/", ".")) + + ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True)) + ud.csname = "%s-config-spec" % (ud.identifier) + ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type) + ud.viewdir = os.path.join(ud.ccasedir, ud.viewname) + ud.configspecfile = os.path.join(ud.ccasedir, ud.csname) + ud.localfile = "%s.tar.gz" % (ud.identifier) + + self.debug("host = %s" % ud.host) + self.debug("path = %s" % ud.path) + self.debug("server = %s" % ud.server) + self.debug("proto = %s" % ud.proto) + self.debug("type = %s" % ud.type) + self.debug("vob = %s" % ud.vob) + self.debug("module = %s" % ud.module) + self.debug("basecmd = %s" % ud.basecmd) + self.debug("label = %s" % ud.label) + self.debug("ccasedir = %s" % ud.ccasedir) + self.debug("viewdir = %s" % ud.viewdir) + self.debug("viewname = %s" % ud.viewname) + self.debug("configspecfile = %s" % ud.configspecfile) + self.debug("localfile = %s" % ud.localfile) + + ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile) + + def _build_ccase_command(self, ud, command): + """ + Build up a commandline based on ud + command is: mkview, setcs, rmview + """ + options = [] + + if "rcleartool" in ud.basecmd: + options.append("-server %s" % ud.server) + + basecmd = "%s %s" % (ud.basecmd, command) + + if command is 'mkview': + if not "rcleartool" in ud.basecmd: + # Cleartool needs a -snapshot view + options.append("-snapshot") + options.append("-tag %s" % ud.viewname) + options.append(ud.viewdir) + + elif command is 'rmview': + options.append("-force") + options.append("%s" % ud.viewdir) + + elif command is 'setcs': + options.append("-overwrite") + options.append(ud.configspecfile) + + else: + raise FetchError("Invalid ccase command %s" % command) + + ccasecmd = "%s %s" % (basecmd, " ".join(options)) + self.debug("ccasecmd = %s" % ccasecmd) + return ccasecmd + + def _write_configspec(self, ud, d): + """ + Create config spec file (ud.configspecfile) for ccase view + """ + config_spec = "" + custom_config_spec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", d) + if custom_config_spec is not None: + for line in custom_config_spec.split("\\n"): + config_spec += line+"\n" + bb.warn("A custom config spec has been set, SRCREV is only relevant for the tarball name.") + else: + config_spec += "element * CHECKEDOUT\n" + config_spec += "element * %s\n" % ud.label + config_spec += "load %s%s\n" % (ud.vob, ud.module) + + logger.info("Using config spec: \n%s" % config_spec) + + with open(ud.configspecfile, 'w') as f: + f.write(config_spec) + + def _remove_view(self, ud, d): + if os.path.exists(ud.viewdir): + cmd = self._build_ccase_command(ud, 'rmview'); + logger.info("cleaning up [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname) + bb.fetch2.check_network_access(d, cmd, ud.url) + output = runfetchcmd(cmd, d, workdir=ud.ccasedir) + logger.info("rmview output: %s", output) + + def need_update(self, ud, d): + if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec): + ud.identifier += "-%s" % d.getVar("DATETIME",d, True) + return True + if os.path.exists(ud.localpath): + return False + return True + + def supports_srcrev(self): + return True + + def sortable_revision(self, ud, d, name): + return False, ud.identifier + + def download(self, ud, d): + """Fetch url""" + + # Make a fresh view + bb.utils.mkdirhier(ud.ccasedir) + self._write_configspec(ud, d) + cmd = self._build_ccase_command(ud, 'mkview') + logger.info("creating view [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname) + bb.fetch2.check_network_access(d, cmd, ud.url) + try: + runfetchcmd(cmd, d) + except FetchError as e: + if "CRCLI2008E" in e.msg: + raise FetchError("%s\n%s\n" % (e.msg, "Call `rcleartool login` in your console to authenticate to the clearcase server before running bitbake.")) + else: + raise e + + # Set configspec: Setting the configspec effectively fetches the files as defined in the configspec + cmd = self._build_ccase_command(ud, 'setcs'); + logger.info("fetching data [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname) + bb.fetch2.check_network_access(d, cmd, ud.url) + output = runfetchcmd(cmd, d, workdir=ud.viewdir) + logger.info("%s", output) + + # Copy the configspec to the viewdir so we have it in our source tarball later + shutil.copyfile(ud.configspecfile, os.path.join(ud.viewdir, ud.csname)) + + # Clean clearcase meta-data before tar + + runfetchcmd('tar -czf "%s" .' % (ud.localpath), d, cleanup = [ud.localpath]) + + # Clean up so we can create a new view next time + self.clean(ud, d); + + def clean(self, ud, d): + self._remove_view(ud, d) + bb.utils.remove(ud.configspecfile) diff --git a/poky/bitbake/lib/bb/fetch2/cvs.py b/poky/bitbake/lib/bb/fetch2/cvs.py new file mode 100644 index 0000000000..490c954718 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/cvs.py @@ -0,0 +1,172 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +#Based on functions from the base bb module, Copyright 2003 Holger Schurig +# + +import os +import logging +import bb +from bb.fetch2 import FetchMethod, FetchError, MissingParameterError, logger +from bb.fetch2 import runfetchcmd + +class Cvs(FetchMethod): + """ + Class to fetch a module or modules from cvs repositories + """ + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with cvs. + """ + return ud.type in ['cvs'] + + def urldata_init(self, ud, d): + if not "module" in ud.parm: + raise MissingParameterError("module", ud.url) + ud.module = ud.parm["module"] + + ud.tag = ud.parm.get('tag', "") + + # Override the default date in certain cases + if 'date' in ud.parm: + ud.date = ud.parm['date'] + elif ud.tag: + ud.date = "" + + norecurse = '' + if 'norecurse' in ud.parm: + norecurse = '_norecurse' + + fullpath = '' + if 'fullpath' in ud.parm: + fullpath = '_fullpath' + + ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath)) + + def need_update(self, ud, d): + if (ud.date == "now"): + return True + if not os.path.exists(ud.localpath): + return True + return False + + def download(self, ud, d): + + method = ud.parm.get('method', 'pserver') + localdir = ud.parm.get('localdir', ud.module) + cvs_port = ud.parm.get('port', '') + + cvs_rsh = None + if method == "ext": + if "rsh" in ud.parm: + cvs_rsh = ud.parm["rsh"] + + if method == "dir": + cvsroot = ud.path + else: + cvsroot = ":" + method + cvsproxyhost = d.getVar('CVS_PROXY_HOST') + if cvsproxyhost: + cvsroot += ";proxy=" + cvsproxyhost + cvsproxyport = d.getVar('CVS_PROXY_PORT') + if cvsproxyport: + cvsroot += ";proxyport=" + cvsproxyport + cvsroot += ":" + ud.user + if ud.pswd: + cvsroot += ":" + ud.pswd + cvsroot += "@" + ud.host + ":" + cvs_port + ud.path + + options = [] + if 'norecurse' in ud.parm: + options.append("-l") + if ud.date: + # treat YYYYMMDDHHMM specially for CVS + if len(ud.date) == 12: + options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) + else: + options.append("-D \"%s UTC\"" % ud.date) + if ud.tag: + options.append("-r %s" % ud.tag) + + cvsbasecmd = d.getVar("FETCHCMD_cvs") + cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module + cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options) + + if cvs_rsh: + cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) + cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) + + # create module directory + logger.debug(2, "Fetch: checking for module directory") + pkg = d.getVar('PN') + pkgdir = os.path.join(d.getVar('CVSDIR'), pkg) + moddir = os.path.join(pkgdir, localdir) + workdir = None + if os.access(os.path.join(moddir, 'CVS'), os.R_OK): + logger.info("Update " + ud.url) + bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url) + # update sources there + workdir = moddir + cmd = cvsupdatecmd + else: + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(pkgdir) + workdir = pkgdir + logger.debug(1, "Running %s", cvscmd) + bb.fetch2.check_network_access(d, cvscmd, ud.url) + cmd = cvscmd + + runfetchcmd(cmd, d, cleanup=[moddir], workdir=workdir) + + if not os.access(moddir, os.R_OK): + raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude='CVS'" + + # tar them up to a defined filename + workdir = None + if 'fullpath' in ud.parm: + workdir = pkgdir + cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir) + else: + workdir = os.path.dirname(os.path.realpath(moddir)) + cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir)) + + runfetchcmd(cmd, d, cleanup=[ud.localpath], workdir=workdir) + + def clean(self, ud, d): + """ Clean CVS Files and tarballs """ + + pkg = d.getVar('PN') + pkgdir = os.path.join(d.getVar("CVSDIR"), pkg) + + bb.utils.remove(pkgdir, True) + bb.utils.remove(ud.localpath) + diff --git a/poky/bitbake/lib/bb/fetch2/git.py b/poky/bitbake/lib/bb/fetch2/git.py new file mode 100644 index 0000000000..3de83bed17 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/git.py @@ -0,0 +1,664 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' git implementation + +git fetcher support the SRC_URI with format of: +SRC_URI = "git://some.host/somepath;OptionA=xxx;OptionB=xxx;..." + +Supported SRC_URI options are: + +- branch + The git branch to retrieve from. The default is "master" + + This option also supports multiple branch fetching, with branches + separated by commas. In multiple branches case, the name option + must have the same number of names to match the branches, which is + used to specify the SRC_REV for the branch + e.g: + SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY" + SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx" + SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY" + +- tag + The git tag to retrieve. The default is "master" + +- protocol + The method to use to access the repository. Common options are "git", + "http", "https", "file", "ssh" and "rsync". The default is "git". + +- rebaseable + rebaseable indicates that the upstream git repo may rebase in the future, + and current revision may disappear from upstream repo. This option will + remind fetcher to preserve local cache carefully for future use. + The default value is "0", set rebaseable=1 for rebaseable git repo. + +- nocheckout + Don't checkout source code when unpacking. set this option for the recipe + who has its own routine to checkout code. + The default is "0", set nocheckout=1 if needed. + +- bareclone + Create a bare clone of the source code and don't checkout the source code + when unpacking. Set this option for the recipe who has its own routine to + checkout code and tracking branch requirements. + The default is "0", set bareclone=1 if needed. + +- nobranch + Don't check the SHA validation for branch. set this option for the recipe + referring to commit which is valid in tag instead of branch. + The default is "0", set nobranch=1 if needed. + +- usehead + For local git:// urls to use the current branch HEAD as the revision for use with + AUTOREV. Implies nobranch. + +""" + +#Copyright (C) 2005 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import collections +import errno +import fnmatch +import os +import re +import subprocess +import tempfile +import bb +import bb.progress +from bb.fetch2 import FetchMethod +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + + +class GitProgressHandler(bb.progress.LineFilterProgressHandler): + """Extract progress information from git output""" + def __init__(self, d): + self._buffer = '' + self._count = 0 + super(GitProgressHandler, self).__init__(d) + # Send an initial progress event so the bar gets shown + self._fire_progress(-1) + + def write(self, string): + self._buffer += string + stages = ['Counting objects', 'Compressing objects', 'Receiving objects', 'Resolving deltas'] + stage_weights = [0.2, 0.05, 0.5, 0.25] + stagenum = 0 + for i, stage in reversed(list(enumerate(stages))): + if stage in self._buffer: + stagenum = i + self._buffer = '' + break + self._status = stages[stagenum] + percs = re.findall(r'(\d+)%', string) + if percs: + progress = int(round((int(percs[-1]) * stage_weights[stagenum]) + (sum(stage_weights[:stagenum]) * 100))) + rates = re.findall(r'([\d.]+ [a-zA-Z]*/s+)', string) + if rates: + rate = rates[-1] + else: + rate = None + self.update(progress, rate) + else: + if stagenum == 0: + percs = re.findall(r': (\d+)', string) + if percs: + count = int(percs[-1]) + if count > self._count: + self._count = count + self._fire_progress(-count) + super(GitProgressHandler, self).write(string) + + +class Git(FetchMethod): + bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..')) + make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow') + + """Class to fetch a module or modules from git repositories""" + def init(self, d): + pass + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with git. + """ + return ud.type in ['git'] + + def supports_checksum(self, urldata): + return False + + def urldata_init(self, ud, d): + """ + init git specific variable within url data + so that the git method like latest_revision() can work + """ + if 'protocol' in ud.parm: + ud.proto = ud.parm['protocol'] + elif not ud.host: + ud.proto = 'file' + else: + ud.proto = "git" + + if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'): + raise bb.fetch2.ParameterError("Invalid protocol type", ud.url) + + ud.nocheckout = ud.parm.get("nocheckout","0") == "1" + + ud.rebaseable = ud.parm.get("rebaseable","0") == "1" + + ud.nobranch = ud.parm.get("nobranch","0") == "1" + + # usehead implies nobranch + ud.usehead = ud.parm.get("usehead","0") == "1" + if ud.usehead: + if ud.proto != "file": + raise bb.fetch2.ParameterError("The usehead option is only for use with local ('protocol=file') git repositories", ud.url) + ud.nobranch = 1 + + # bareclone implies nocheckout + ud.bareclone = ud.parm.get("bareclone","0") == "1" + if ud.bareclone: + ud.nocheckout = 1 + + ud.unresolvedrev = {} + branches = ud.parm.get("branch", "master").split(',') + if len(branches) != len(ud.names): + raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url) + + ud.cloneflags = "-s -n" + if ud.bareclone: + ud.cloneflags += " --mirror" + + ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1" + ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split() + + depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH") + if depth_default is not None: + try: + depth_default = int(depth_default or 0) + except ValueError: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default) + else: + if depth_default < 0: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default) + else: + depth_default = 1 + ud.shallow_depths = collections.defaultdict(lambda: depth_default) + + revs_default = d.getVar("BB_GIT_SHALLOW_REVS", True) + ud.shallow_revs = [] + ud.branches = {} + for pos, name in enumerate(ud.names): + branch = branches[pos] + ud.branches[name] = branch + ud.unresolvedrev[name] = branch + + shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name) + if shallow_depth is not None: + try: + shallow_depth = int(shallow_depth or 0) + except ValueError: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth)) + else: + if shallow_depth < 0: + raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth)) + ud.shallow_depths[name] = shallow_depth + + revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name) + if revs is not None: + ud.shallow_revs.extend(revs.split()) + elif revs_default is not None: + ud.shallow_revs.extend(revs_default.split()) + + if (ud.shallow and + not ud.shallow_revs and + all(ud.shallow_depths[n] == 0 for n in ud.names)): + # Shallow disabled for this URL + ud.shallow = False + + if ud.usehead: + ud.unresolvedrev['default'] = 'HEAD' + + ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0" + + write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0" + ud.write_tarballs = write_tarballs != "0" or ud.rebaseable + ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0" + + ud.setup_revisions(d) + + for name in ud.names: + # Ensure anything that doesn't look like a sha256 checksum/revision is translated into one + if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]): + if ud.revisions[name]: + ud.unresolvedrev[name] = ud.revisions[name] + ud.revisions[name] = self.latest_revision(ud, d, name) + + gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.')) + if gitsrcname.startswith('.'): + gitsrcname = gitsrcname[1:] + + # for rebaseable git repo, it is necessary to keep mirror tar ball + # per revision, so that even the revision disappears from the + # upstream repo in the future, the mirror will remain intact and still + # contains the revision + if ud.rebaseable: + for name in ud.names: + gitsrcname = gitsrcname + '_' + ud.revisions[name] + + dl_dir = d.getVar("DL_DIR") + gitdir = d.getVar("GITDIR") or (dl_dir + "/git2/") + ud.clonedir = os.path.join(gitdir, gitsrcname) + ud.localfile = ud.clonedir + + mirrortarball = 'git2_%s.tar.gz' % gitsrcname + ud.fullmirror = os.path.join(dl_dir, mirrortarball) + ud.mirrortarballs = [mirrortarball] + if ud.shallow: + tarballname = gitsrcname + if ud.bareclone: + tarballname = "%s_bare" % tarballname + + if ud.shallow_revs: + tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs))) + + for name, revision in sorted(ud.revisions.items()): + tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7]) + depth = ud.shallow_depths[name] + if depth: + tarballname = "%s-%s" % (tarballname, depth) + + shallow_refs = [] + if not ud.nobranch: + shallow_refs.extend(ud.branches.values()) + if ud.shallow_extra_refs: + shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs) + if shallow_refs: + tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.')) + + fetcher = self.__class__.__name__.lower() + ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname) + ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball) + ud.mirrortarballs.insert(0, ud.shallowtarball) + + def localpath(self, ud, d): + return ud.clonedir + + def need_update(self, ud, d): + if not os.path.exists(ud.clonedir): + return True + for name in ud.names: + if not self._contains_ref(ud, d, name, ud.clonedir): + return True + if ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow): + return True + if ud.write_tarballs and not os.path.exists(ud.fullmirror): + return True + return False + + def try_premirror(self, ud, d): + # If we don't do this, updating an existing checkout with only premirrors + # is not possible + if d.getVar("BB_FETCH_PREMIRRORONLY") is not None: + return True + if os.path.exists(ud.clonedir): + return False + return True + + def download(self, ud, d): + """Fetch url""" + + no_clone = not os.path.exists(ud.clonedir) + need_update = no_clone or self.need_update(ud, d) + + # A current clone is preferred to either tarball, a shallow tarball is + # preferred to an out of date clone, and a missing clone will use + # either tarball. + if ud.shallow and os.path.exists(ud.fullshallow) and need_update: + ud.localpath = ud.fullshallow + return + elif os.path.exists(ud.fullmirror) and no_clone: + bb.utils.mkdirhier(ud.clonedir) + runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir) + + repourl = self._get_repo_url(ud) + + # If the repo still doesn't exist, fallback to cloning it + if not os.path.exists(ud.clonedir): + # We do this since git will use a "-l" option automatically for local urls where possible + if repourl.startswith("file://"): + repourl = repourl[7:] + clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, repourl, ud.clonedir) + if ud.proto.lower() != 'file': + bb.fetch2.check_network_access(d, clone_cmd, ud.url) + progresshandler = GitProgressHandler(d) + runfetchcmd(clone_cmd, d, log=progresshandler) + + # Update the checkout if needed + needupdate = False + for name in ud.names: + if not self._contains_ref(ud, d, name, ud.clonedir): + needupdate = True + if needupdate: + try: + runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir) + except bb.fetch2.FetchError: + logger.debug(1, "No Origin") + + runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d, workdir=ud.clonedir) + fetch_cmd = "LANG=C %s fetch -f --prune --progress %s refs/*:refs/*" % (ud.basecmd, repourl) + if ud.proto.lower() != 'file': + bb.fetch2.check_network_access(d, fetch_cmd, ud.url) + progresshandler = GitProgressHandler(d) + runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir) + runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir) + runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir) + runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir) + try: + os.unlink(ud.fullmirror) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + for name in ud.names: + if not self._contains_ref(ud, d, name, ud.clonedir): + raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name])) + + def build_mirror_data(self, ud, d): + if ud.shallow and ud.write_shallow_tarballs: + if not os.path.exists(ud.fullshallow): + if os.path.islink(ud.fullshallow): + os.unlink(ud.fullshallow) + tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR')) + shallowclone = os.path.join(tempdir, 'git') + try: + self.clone_shallow_local(ud, shallowclone, d) + + logger.info("Creating tarball of git repository") + runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone) + runfetchcmd("touch %s.done" % ud.fullshallow, d) + finally: + bb.utils.remove(tempdir, recurse=True) + elif ud.write_tarballs and not os.path.exists(ud.fullmirror): + if os.path.islink(ud.fullmirror): + os.unlink(ud.fullmirror) + + logger.info("Creating tarball of git repository") + runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir) + runfetchcmd("touch %s.done" % ud.fullmirror, d) + + def clone_shallow_local(self, ud, dest, d): + """Clone the repo and make it shallow. + + The upstream url of the new clone isn't set at this time, as it'll be + set correctly when unpacked.""" + runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d) + + to_parse, shallow_branches = [], [] + for name in ud.names: + revision = ud.revisions[name] + depth = ud.shallow_depths[name] + if depth: + to_parse.append('%s~%d^{}' % (revision, depth - 1)) + + # For nobranch, we need a ref, otherwise the commits will be + # removed, and for non-nobranch, we truncate the branch to our + # srcrev, to avoid keeping unnecessary history beyond that. + branch = ud.branches[name] + if ud.nobranch: + ref = "refs/shallow/%s" % name + elif ud.bareclone: + ref = "refs/heads/%s" % branch + else: + ref = "refs/remotes/origin/%s" % branch + + shallow_branches.append(ref) + runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest) + + # Map srcrev+depths to revisions + parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest) + + # Resolve specified revisions + parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest) + shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines() + + # Apply extra ref wildcards + all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd, + d, workdir=dest).splitlines() + for r in ud.shallow_extra_refs: + if not ud.bareclone: + r = r.replace('refs/heads/', 'refs/remotes/origin/') + + if '*' in r: + matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs) + shallow_branches.extend(matches) + else: + shallow_branches.append(r) + + # Make the repository shallow + shallow_cmd = [self.make_shallow_path, '-s'] + for b in shallow_branches: + shallow_cmd.append('-r') + shallow_cmd.append(b) + shallow_cmd.extend(shallow_revisions) + runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest) + + def unpack(self, ud, destdir, d): + """ unpack the downloaded src to destdir""" + + subdir = ud.parm.get("subpath", "") + if subdir != "": + readpathspec = ":%s" % subdir + def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/')) + else: + readpathspec = "" + def_destsuffix = "git/" + + destsuffix = ud.parm.get("destsuffix", def_destsuffix) + destdir = ud.destdir = os.path.join(destdir, destsuffix) + if os.path.exists(destdir): + bb.utils.prunedir(destdir) + + if ud.shallow and (not os.path.exists(ud.clonedir) or self.need_update(ud, d)): + bb.utils.mkdirhier(destdir) + runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir) + else: + runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d) + + repourl = self._get_repo_url(ud) + runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir) + if not ud.nocheckout: + if subdir != "": + runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d, + workdir=destdir) + runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir) + elif not ud.nobranch: + branchname = ud.branches[ud.names[0]] + runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \ + ud.revisions[ud.names[0]]), d, workdir=destdir) + runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \ + branchname), d, workdir=destdir) + else: + runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir) + + return True + + def clean(self, ud, d): + """ clean the git directory """ + + bb.utils.remove(ud.localpath, True) + bb.utils.remove(ud.fullmirror) + bb.utils.remove(ud.fullmirror + ".done") + + def supports_srcrev(self): + return True + + def _contains_ref(self, ud, d, name, wd): + cmd = "" + if ud.nobranch: + cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % ( + ud.basecmd, ud.revisions[name]) + else: + cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % ( + ud.basecmd, ud.revisions[name], ud.branches[name]) + try: + output = runfetchcmd(cmd, d, quiet=True, workdir=wd) + except bb.fetch2.FetchError: + return False + if len(output.split()) > 1: + raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output)) + return output.split()[0] != "0" + + def _get_repo_url(self, ud): + """ + Return the repository URL + """ + if ud.user: + username = ud.user + '@' + else: + username = "" + return "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path) + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "git:" + ud.host + ud.path.replace('/', '.') + ud.unresolvedrev[name] + + def _lsremote(self, ud, d, search): + """ + Run git ls-remote with the specified search string + """ + # Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR, + # and WORKDIR is in PATH (as a result of RSS), our call to + # runfetchcmd() exports PATH so this function will get called again (!) + # In this scenario the return call of the function isn't actually + # important - WORKDIR isn't needed in PATH to call git ls-remote + # anyway. + if d.getVar('_BB_GIT_IN_LSREMOTE', False): + return '' + d.setVar('_BB_GIT_IN_LSREMOTE', '1') + try: + repourl = self._get_repo_url(ud) + cmd = "%s ls-remote %s %s" % \ + (ud.basecmd, repourl, search) + if ud.proto.lower() != 'file': + bb.fetch2.check_network_access(d, cmd, repourl) + output = runfetchcmd(cmd, d, True) + if not output: + raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url) + finally: + d.delVar('_BB_GIT_IN_LSREMOTE') + return output + + def _latest_revision(self, ud, d, name): + """ + Compute the HEAD revision for the url + """ + output = self._lsremote(ud, d, "") + # Tags of the form ^{} may not work, need to fallback to other form + if ud.unresolvedrev[name][:5] == "refs/" or ud.usehead: + head = ud.unresolvedrev[name] + tag = ud.unresolvedrev[name] + else: + head = "refs/heads/%s" % ud.unresolvedrev[name] + tag = "refs/tags/%s" % ud.unresolvedrev[name] + for s in [head, tag + "^{}", tag]: + for l in output.strip().split('\n'): + sha1, ref = l.split() + if s == ref: + return sha1 + raise bb.fetch2.FetchError("Unable to resolve '%s' in upstream git repository in git ls-remote output for %s" % \ + (ud.unresolvedrev[name], ud.host+ud.path)) + + def latest_versionstring(self, ud, d): + """ + Compute the latest release name like "x.y.x" in "x.y.x+gitHASH" + by searching through the tags output of ls-remote, comparing + versions and returning the highest match. + """ + pupver = ('', '') + + tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or "(?P([0-9][\.|_]?)+)") + try: + output = self._lsremote(ud, d, "refs/tags/*") + except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e: + bb.note("Could not list remote: %s" % str(e)) + return pupver + + verstring = "" + revision = "" + for line in output.split("\n"): + if not line: + break + + tag_head = line.split("/")[-1] + # Ignore non-released branches + m = re.search("(alpha|beta|rc|final)+", tag_head) + if m: + continue + + # search for version in the line + tag = tagregex.search(tag_head) + if tag == None: + continue + + tag = tag.group('pver') + tag = tag.replace("_", ".") + + if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0: + continue + + verstring = tag + revision = line.split()[0] + pupver = (verstring, revision) + + return pupver + + def _build_revision(self, ud, d, name): + return ud.revisions[name] + + def gitpkgv_revision(self, ud, d, name): + """ + Return a sortable revision number by counting commits in the history + Based on gitpkgv.bblass in meta-openembedded + """ + rev = self._build_revision(ud, d, name) + localpath = ud.localpath + rev_file = os.path.join(localpath, "oe-gitpkgv_" + rev) + if not os.path.exists(localpath): + commits = None + else: + if not os.path.exists(rev_file) or not os.path.getsize(rev_file): + from pipes import quote + commits = bb.fetch2.runfetchcmd( + "git rev-list %s -- | wc -l" % quote(rev), + d, quiet=True).strip().lstrip('0') + if commits: + open(rev_file, "w").write("%d\n" % int(commits)) + else: + commits = open(rev_file, "r").readline(128).strip() + if commits: + return False, "%s+%s" % (commits, rev[:7]) + else: + return True, str(rev) + + def checkstatus(self, fetch, ud, d): + try: + self._lsremote(ud, d, "") + return True + except bb.fetch2.FetchError: + return False diff --git a/poky/bitbake/lib/bb/fetch2/gitannex.py b/poky/bitbake/lib/bb/fetch2/gitannex.py new file mode 100644 index 0000000000..a9b69caab4 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/gitannex.py @@ -0,0 +1,91 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' git annex implementation +""" + +# Copyright (C) 2014 Otavio Salvador +# Copyright (C) 2014 O.S. Systems Software LTDA. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb.fetch2.git import Git +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class GitANNEX(Git): + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with git. + """ + return ud.type in ['gitannex'] + + def urldata_init(self, ud, d): + super(GitANNEX, self).urldata_init(ud, d) + if ud.shallow: + ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*'] + + def uses_annex(self, ud, d, wd): + for name in ud.names: + try: + runfetchcmd("%s rev-list git-annex" % (ud.basecmd), d, quiet=True, workdir=wd) + return True + except bb.fetch.FetchError: + pass + + return False + + def update_annex(self, ud, d, wd): + try: + runfetchcmd("%s annex get --all" % (ud.basecmd), d, quiet=True, workdir=wd) + except bb.fetch.FetchError: + return False + runfetchcmd("chmod u+w -R %s/annex" % (ud.clonedir), d, quiet=True, workdir=wd) + + return True + + def download(self, ud, d): + Git.download(self, ud, d) + + if not ud.shallow or ud.localpath != ud.fullshallow: + if self.uses_annex(ud, d, ud.clonedir): + self.update_annex(ud, d, ud.clonedir) + + def clone_shallow_local(self, ud, dest, d): + super(GitANNEX, self).clone_shallow_local(ud, dest, d) + + try: + runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest) + except bb.fetch.FetchError: + pass + + if self.uses_annex(ud, d, dest): + runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest) + runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest) + + def unpack(self, ud, destdir, d): + Git.unpack(self, ud, destdir, d) + + try: + runfetchcmd("%s annex init" % (ud.basecmd), d, workdir=ud.destdir) + except bb.fetch.FetchError: + pass + + annex = self.uses_annex(ud, d, ud.destdir) + if annex: + runfetchcmd("%s annex get" % (ud.basecmd), d, workdir=ud.destdir) + runfetchcmd("chmod u+w -R %s/.git/annex" % (ud.destdir), d, quiet=True, workdir=ud.destdir) + diff --git a/poky/bitbake/lib/bb/fetch2/gitsm.py b/poky/bitbake/lib/bb/fetch2/gitsm.py new file mode 100644 index 0000000000..0aff1008e5 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/gitsm.py @@ -0,0 +1,135 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' git submodules implementation + +Inherits from and extends the Git fetcher to retrieve submodules of a git repository +after cloning. + +SRC_URI = "gitsm://" + +See the Git fetcher, git://, for usage documentation. + +NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your recipe. + +""" + +# Copyright (C) 2013 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb.fetch2.git import Git +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class GitSM(Git): + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with git. + """ + return ud.type in ['gitsm'] + + def uses_submodules(self, ud, d, wd): + for name in ud.names: + try: + runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=wd) + return True + except bb.fetch.FetchError: + pass + return False + + def _set_relative_paths(self, repopath): + """ + Fix submodule paths to be relative instead of absolute, + so that when we move the repo it doesn't break + (In Git 1.7.10+ this is done automatically) + """ + submodules = [] + with open(os.path.join(repopath, '.gitmodules'), 'r') as f: + for line in f.readlines(): + if line.startswith('[submodule'): + submodules.append(line.split('"')[1]) + + for module in submodules: + repo_conf = os.path.join(repopath, module, '.git') + if os.path.exists(repo_conf): + with open(repo_conf, 'r') as f: + lines = f.readlines() + newpath = '' + for i, line in enumerate(lines): + if line.startswith('gitdir:'): + oldpath = line.split(': ')[-1].rstrip() + if oldpath.startswith('/'): + newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module + lines[i] = 'gitdir: %s\n' % newpath + break + if newpath: + with open(repo_conf, 'w') as f: + for line in lines: + f.write(line) + + repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config') + if os.path.exists(repo_conf2): + with open(repo_conf2, 'r') as f: + lines = f.readlines() + newpath = '' + for i, line in enumerate(lines): + if line.lstrip().startswith('worktree = '): + oldpath = line.split(' = ')[-1].rstrip() + if oldpath.startswith('/'): + newpath = '../' * (module.count('/') + 3) + module + lines[i] = '\tworktree = %s\n' % newpath + break + if newpath: + with open(repo_conf2, 'w') as f: + for line in lines: + f.write(line) + + def update_submodules(self, ud, d): + # We have to convert bare -> full repo, do the submodule bit, then convert back + tmpclonedir = ud.clonedir + ".tmp" + gitdir = tmpclonedir + os.sep + ".git" + bb.utils.remove(tmpclonedir, True) + os.mkdir(tmpclonedir) + os.rename(ud.clonedir, gitdir) + runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d) + runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir) + runfetchcmd(ud.basecmd + " checkout -f " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir) + runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir) + self._set_relative_paths(tmpclonedir) + runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir) + os.rename(gitdir, ud.clonedir,) + bb.utils.remove(tmpclonedir, True) + + def download(self, ud, d): + Git.download(self, ud, d) + + if not ud.shallow or ud.localpath != ud.fullshallow: + submodules = self.uses_submodules(ud, d, ud.clonedir) + if submodules: + self.update_submodules(ud, d) + + def clone_shallow_local(self, ud, dest, d): + super(GitSM, self).clone_shallow_local(ud, dest, d) + + runfetchcmd('cp -fpPRH "%s/modules" "%s/"' % (ud.clonedir, os.path.join(dest, '.git')), d) + + def unpack(self, ud, destdir, d): + Git.unpack(self, ud, destdir, d) + + if self.uses_submodules(ud, d, ud.destdir): + runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir) + runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir) diff --git a/poky/bitbake/lib/bb/fetch2/hg.py b/poky/bitbake/lib/bb/fetch2/hg.py new file mode 100644 index 0000000000..d0857e63f7 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/hg.py @@ -0,0 +1,270 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementation for mercurial DRCS (hg). + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2004 Marcin Juszkiewicz +# Copyright (C) 2007 Robert Schuster +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import sys +import logging +import bb +import errno +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import MissingParameterError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Hg(FetchMethod): + """Class to fetch from mercurial repositories""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with mercurial. + """ + return ud.type in ['hg'] + + def supports_checksum(self, urldata): + """ + Don't require checksums for local archives created from + repository checkouts. + """ + return False + + def urldata_init(self, ud, d): + """ + init hg specific variable within url data + """ + if not "module" in ud.parm: + raise MissingParameterError('module', ud.url) + + ud.module = ud.parm["module"] + + if 'protocol' in ud.parm: + ud.proto = ud.parm['protocol'] + elif not ud.host: + ud.proto = 'file' + else: + ud.proto = "hg" + + ud.setup_revisions(d) + + if 'rev' in ud.parm: + ud.revision = ud.parm['rev'] + elif not ud.revision: + ud.revision = self.latest_revision(ud, d) + + # Create paths to mercurial checkouts + hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \ + ud.host, ud.path.replace('/', '.')) + mirrortarball = 'hg_%s.tar.gz' % hgsrcname + ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball) + ud.mirrortarballs = [mirrortarball] + + hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg/") + ud.pkgdir = os.path.join(hgdir, hgsrcname) + ud.moddir = os.path.join(ud.pkgdir, ud.module) + ud.localfile = ud.moddir + ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg" + + ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") + + def need_update(self, ud, d): + revTag = ud.parm.get('rev', 'tip') + if revTag == "tip": + return True + if not os.path.exists(ud.localpath): + return True + return False + + def try_premirror(self, ud, d): + # If we don't do this, updating an existing checkout with only premirrors + # is not possible + if d.getVar("BB_FETCH_PREMIRRORONLY") is not None: + return True + if os.path.exists(ud.moddir): + return False + return True + + def _buildhgcommand(self, ud, d, command): + """ + Build up an hg commandline based on ud + command is "fetch", "update", "info" + """ + + proto = ud.parm.get('protocol', 'http') + + host = ud.host + if proto == "file": + host = "/" + ud.host = "localhost" + + if not ud.user: + hgroot = host + ud.path + else: + if ud.pswd: + hgroot = ud.user + ":" + ud.pswd + "@" + host + ud.path + else: + hgroot = ud.user + "@" + host + ud.path + + if command == "info": + return "%s identify -i %s://%s/%s" % (ud.basecmd, proto, hgroot, ud.module) + + options = []; + + # Don't specify revision for the fetch; clone the entire repo. + # This avoids an issue if the specified revision is a tag, because + # the tag actually exists in the specified revision + 1, so it won't + # be available when used in any successive commands. + if ud.revision and command != "fetch": + options.append("-r %s" % ud.revision) + + if command == "fetch": + if ud.user and ud.pswd: + cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" clone %s %s://%s/%s %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options), proto, hgroot, ud.module, ud.module) + else: + cmd = "%s clone %s %s://%s/%s %s" % (ud.basecmd, " ".join(options), proto, hgroot, ud.module, ud.module) + elif command == "pull": + # do not pass options list; limiting pull to rev causes the local + # repo not to contain it and immediately following "update" command + # will crash + if ud.user and ud.pswd: + cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (ud.basecmd, ud.user, ud.pswd, proto) + else: + cmd = "%s pull" % (ud.basecmd) + elif command == "update": + if ud.user and ud.pswd: + cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" update -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options)) + else: + cmd = "%s update -C %s" % (ud.basecmd, " ".join(options)) + else: + raise FetchError("Invalid hg command %s" % command, ud.url) + + return cmd + + def download(self, ud, d): + """Fetch url""" + + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") + + # If the checkout doesn't exist and the mirror tarball does, extract it + if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror): + bb.utils.mkdirhier(ud.pkgdir) + runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.pkgdir) + + if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK): + # Found the source, check whether need pull + updatecmd = self._buildhgcommand(ud, d, "update") + logger.debug(1, "Running %s", updatecmd) + try: + runfetchcmd(updatecmd, d, workdir=ud.moddir) + except bb.fetch2.FetchError: + # Runnning pull in the repo + pullcmd = self._buildhgcommand(ud, d, "pull") + logger.info("Pulling " + ud.url) + # update sources there + logger.debug(1, "Running %s", pullcmd) + bb.fetch2.check_network_access(d, pullcmd, ud.url) + runfetchcmd(pullcmd, d, workdir=ud.moddir) + try: + os.unlink(ud.fullmirror) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + # No source found, clone it. + if not os.path.exists(ud.moddir): + fetchcmd = self._buildhgcommand(ud, d, "fetch") + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(ud.pkgdir) + logger.debug(1, "Running %s", fetchcmd) + bb.fetch2.check_network_access(d, fetchcmd, ud.url) + runfetchcmd(fetchcmd, d, workdir=ud.pkgdir) + + # Even when we clone (fetch), we still need to update as hg's clone + # won't checkout the specified revision if its on a branch + updatecmd = self._buildhgcommand(ud, d, "update") + logger.debug(1, "Running %s", updatecmd) + runfetchcmd(updatecmd, d, workdir=ud.moddir) + + def clean(self, ud, d): + """ Clean the hg dir """ + + bb.utils.remove(ud.localpath, True) + bb.utils.remove(ud.fullmirror) + bb.utils.remove(ud.fullmirror + ".done") + + def supports_srcrev(self): + return True + + def _latest_revision(self, ud, d, name): + """ + Compute tip revision for the url + """ + bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url) + output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d) + return output.strip() + + def _build_revision(self, ud, d, name): + return ud.revision + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "hg:" + ud.moddir + + def build_mirror_data(self, ud, d): + # Generate a mirror tarball if needed + if ud.write_tarballs == "1" and not os.path.exists(ud.fullmirror): + # it's possible that this symlink points to read-only filesystem with PREMIRROR + if os.path.islink(ud.fullmirror): + os.unlink(ud.fullmirror) + + logger.info("Creating tarball of hg repository") + runfetchcmd("tar -czf %s %s" % (ud.fullmirror, ud.module), d, workdir=ud.pkgdir) + runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.pkgdir) + + def localpath(self, ud, d): + return ud.pkgdir + + def unpack(self, ud, destdir, d): + """ + Make a local clone or export for the url + """ + + revflag = "-r %s" % ud.revision + subdir = ud.parm.get("destsuffix", ud.module) + codir = "%s/%s" % (destdir, subdir) + + scmdata = ud.parm.get("scmdata", "") + if scmdata != "nokeep": + if not os.access(os.path.join(codir, '.hg'), os.R_OK): + logger.debug(2, "Unpack: creating new hg repository in '" + codir + "'") + runfetchcmd("%s init %s" % (ud.basecmd, codir), d) + logger.debug(2, "Unpack: updating source in '" + codir + "'") + runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir), d, workdir=codir) + runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir) + else: + logger.debug(2, "Unpack: extracting source to '" + codir + "'") + runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir) diff --git a/poky/bitbake/lib/bb/fetch2/local.py b/poky/bitbake/lib/bb/fetch2/local.py new file mode 100644 index 0000000000..a114ac12e5 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/local.py @@ -0,0 +1,119 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import urllib.request, urllib.parse, urllib.error +import bb +import bb.utils +from bb.fetch2 import FetchMethod, FetchError +from bb.fetch2 import logger + +class Local(FetchMethod): + def supports(self, urldata, d): + """ + Check to see if a given url represents a local fetch. + """ + return urldata.type in ['file'] + + def urldata_init(self, ud, d): + # We don't set localfile as for this fetcher the file is already local! + ud.decodedurl = urllib.parse.unquote(ud.url.split("://")[1].split(";")[0]) + ud.basename = os.path.basename(ud.decodedurl) + ud.basepath = ud.decodedurl + ud.needdonestamp = False + return + + def localpath(self, urldata, d): + """ + Return the local filename of a given url assuming a successful fetch. + """ + return self.localpaths(urldata, d)[-1] + + def localpaths(self, urldata, d): + """ + Return the local filename of a given url assuming a successful fetch. + """ + searched = [] + path = urldata.decodedurl + newpath = path + if path[0] == "/": + return [path] + filespath = d.getVar('FILESPATH') + if filespath: + logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":")))) + newpath, hist = bb.utils.which(filespath, path, history=True) + searched.extend(hist) + if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1: + # For expressions using '*', best we can do is take the first directory in FILESPATH that exists + newpath, hist = bb.utils.which(filespath, ".", history=True) + searched.extend(hist) + logger.debug(2, "Searching for %s in path: %s" % (path, newpath)) + return searched + if not os.path.exists(newpath): + dldirfile = os.path.join(d.getVar("DL_DIR"), path) + logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path)) + bb.utils.mkdirhier(os.path.dirname(dldirfile)) + searched.append(dldirfile) + return searched + return searched + + def need_update(self, ud, d): + if ud.url.find("*") != -1: + return False + if os.path.exists(ud.localpath): + return False + return True + + def download(self, urldata, d): + """Fetch urls (no-op for Local method)""" + # no need to fetch local files, we'll deal with them in place. + if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath): + locations = [] + filespath = d.getVar('FILESPATH') + if filespath: + locations = filespath.split(":") + locations.append(d.getVar("DL_DIR")) + + msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations) + raise FetchError(msg) + + return True + + def checkstatus(self, fetch, urldata, d): + """ + Check the status of the url + """ + if urldata.localpath.find("*") != -1: + logger.info("URL %s looks like a glob and was therefore not checked.", urldata.url) + return True + if os.path.exists(urldata.localpath): + return True + return False + + def clean(self, urldata, d): + return + diff --git a/poky/bitbake/lib/bb/fetch2/npm.py b/poky/bitbake/lib/bb/fetch2/npm.py new file mode 100644 index 0000000000..730c346a93 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/npm.py @@ -0,0 +1,309 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' NPM implementation + +The NPM fetcher is used to retrieve files from the npmjs repository + +Usage in the recipe: + + SRC_URI = "npm://registry.npmjs.org/;name=${PN};version=${PV}" + Suported SRC_URI options are: + + - name + - version + + npm://registry.npmjs.org/${PN}/-/${PN}-${PV}.tgz would become npm://registry.npmjs.org;name=${PN};version=${PV} + The fetcher all triggers off the existence of ud.localpath. If that exists and has the ".done" stamp, its assumed the fetch is good/done + +""" + +import os +import sys +import urllib.request, urllib.parse, urllib.error +import json +import subprocess +import signal +import bb +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import ChecksumError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger +from bb.fetch2 import UnpackError +from bb.fetch2 import ParameterError +from distutils import spawn + +def subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + # SIGPIPE errors are known issues with gzip/bash + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +class Npm(FetchMethod): + + """Class to fetch urls via 'npm'""" + def init(self, d): + pass + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with npm + """ + return ud.type in ['npm'] + + def debug(self, msg): + logger.debug(1, "NpmFetch: %s", msg) + + def clean(self, ud, d): + logger.debug(2, "Calling cleanup %s" % ud.pkgname) + bb.utils.remove(ud.localpath, False) + bb.utils.remove(ud.pkgdatadir, True) + bb.utils.remove(ud.fullmirror, False) + + def urldata_init(self, ud, d): + """ + init NPM specific variable within url data + """ + if 'downloadfilename' in ud.parm: + ud.basename = ud.parm['downloadfilename'] + else: + ud.basename = os.path.basename(ud.path) + + # can't call it ud.name otherwise fetcher base class will start doing sha1stuff + # TODO: find a way to get an sha1/sha256 manifest of pkg & all deps + ud.pkgname = ud.parm.get("name", None) + if not ud.pkgname: + raise ParameterError("NPM fetcher requires a name parameter", ud.url) + ud.version = ud.parm.get("version", None) + if not ud.version: + raise ParameterError("NPM fetcher requires a version parameter", ud.url) + ud.bbnpmmanifest = "%s-%s.deps.json" % (ud.pkgname, ud.version) + ud.bbnpmmanifest = ud.bbnpmmanifest.replace('/', '-') + ud.registry = "http://%s" % (ud.url.replace('npm://', '', 1).split(';'))[0] + prefixdir = "npm/%s" % ud.pkgname + ud.pkgdatadir = d.expand("${DL_DIR}/%s" % prefixdir) + if not os.path.exists(ud.pkgdatadir): + bb.utils.mkdirhier(ud.pkgdatadir) + ud.localpath = d.expand("${DL_DIR}/npm/%s" % ud.bbnpmmanifest) + + self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate " + ud.prefixdir = prefixdir + + ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0") + mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version) + mirrortarball = mirrortarball.replace('/', '-') + ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball) + ud.mirrortarballs = [mirrortarball] + + def need_update(self, ud, d): + if os.path.exists(ud.localpath): + return False + return True + + def _runwget(self, ud, d, command, quiet): + logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command)) + bb.fetch2.check_network_access(d, command, ud.url) + dldir = d.getVar("DL_DIR") + runfetchcmd(command, d, quiet, workdir=dldir) + + def _unpackdep(self, ud, pkg, data, destdir, dldir, d): + file = data[pkg]['tgz'] + logger.debug(2, "file to extract is %s" % file) + if file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'): + cmd = 'tar xz --strip 1 --no-same-owner --warning=no-unknown-keyword -f %s/%s' % (dldir, file) + else: + bb.fatal("NPM package %s downloaded not a tarball!" % file) + + # Change to subdir before executing command + if not os.path.exists(destdir): + os.makedirs(destdir) + path = d.getVar('PATH') + if path: + cmd = "PATH=\"%s\" %s" % (path, cmd) + bb.note("Unpacking %s to %s/" % (file, destdir)) + ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=destdir) + + if ret != 0: + raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url) + + if 'deps' not in data[pkg]: + return + for dep in data[pkg]['deps']: + self._unpackdep(ud, dep, data[pkg]['deps'], "%s/node_modules/%s" % (destdir, dep), dldir, d) + + + def unpack(self, ud, destdir, d): + dldir = d.getVar("DL_DIR") + with open("%s/npm/%s" % (dldir, ud.bbnpmmanifest)) as datafile: + workobj = json.load(datafile) + dldir = "%s/%s" % (os.path.dirname(ud.localpath), ud.pkgname) + + if 'subdir' in ud.parm: + unpackdir = '%s/%s' % (destdir, ud.parm.get('subdir')) + else: + unpackdir = '%s/npmpkg' % destdir + + self._unpackdep(ud, ud.pkgname, workobj, unpackdir, dldir, d) + + def _parse_view(self, output): + ''' + Parse the output of npm view --json; the last JSON result + is assumed to be the one that we're interested in. + ''' + pdata = None + outdeps = {} + datalines = [] + bracelevel = 0 + for line in output.splitlines(): + if bracelevel: + datalines.append(line) + elif '{' in line: + datalines = [] + datalines.append(line) + bracelevel = bracelevel + line.count('{') - line.count('}') + if datalines: + pdata = json.loads('\n'.join(datalines)) + return pdata + + def _getdependencies(self, pkg, data, version, d, ud, optional=False, fetchedlist=None): + if fetchedlist is None: + fetchedlist = [] + pkgfullname = pkg + if version != '*' and not '/' in version: + pkgfullname += "@'%s'" % version + logger.debug(2, "Calling getdeps on %s" % pkg) + fetchcmd = "npm view %s --json --registry %s" % (pkgfullname, ud.registry) + output = runfetchcmd(fetchcmd, d, True) + pdata = self._parse_view(output) + if not pdata: + raise FetchError("The command '%s' returned no output" % fetchcmd) + if optional: + pkg_os = pdata.get('os', None) + if pkg_os: + if not isinstance(pkg_os, list): + pkg_os = [pkg_os] + blacklist = False + for item in pkg_os: + if item.startswith('!'): + blacklist = True + break + if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os: + logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg) + return + #logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile)) + outputurl = pdata['dist']['tarball'] + data[pkg] = {} + data[pkg]['tgz'] = os.path.basename(outputurl) + if outputurl in fetchedlist: + return + + self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False) + fetchedlist.append(outputurl) + + dependencies = pdata.get('dependencies', {}) + optionalDependencies = pdata.get('optionalDependencies', {}) + dependencies.update(optionalDependencies) + depsfound = {} + optdepsfound = {} + data[pkg]['deps'] = {} + for dep in dependencies: + if dep in optionalDependencies: + optdepsfound[dep] = dependencies[dep] + else: + depsfound[dep] = dependencies[dep] + for dep, version in optdepsfound.items(): + self._getdependencies(dep, data[pkg]['deps'], version, d, ud, optional=True, fetchedlist=fetchedlist) + for dep, version in depsfound.items(): + self._getdependencies(dep, data[pkg]['deps'], version, d, ud, fetchedlist=fetchedlist) + + def _getshrinkeddependencies(self, pkg, data, version, d, ud, lockdown, manifest, toplevel=True): + logger.debug(2, "NPM shrinkwrap file is %s" % data) + if toplevel: + name = data.get('name', None) + if name and name != pkg: + for obj in data.get('dependencies', []): + if obj == pkg: + self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest, False) + return + outputurl = "invalid" + if ('resolved' not in data) or (not data['resolved'].startswith('http')): + # will be the case for ${PN} + fetchcmd = "npm view %s@%s dist.tarball --registry %s" % (pkg, version, ud.registry) + logger.debug(2, "Found this matching URL: %s" % str(fetchcmd)) + outputurl = runfetchcmd(fetchcmd, d, True) + else: + outputurl = data['resolved'] + self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False) + manifest[pkg] = {} + manifest[pkg]['tgz'] = os.path.basename(outputurl).rstrip() + manifest[pkg]['deps'] = {} + + if pkg in lockdown: + sha1_expected = lockdown[pkg][version] + sha1_data = bb.utils.sha1_file("npm/%s/%s" % (ud.pkgname, manifest[pkg]['tgz'])) + if sha1_expected != sha1_data: + msg = "\nFile: '%s' has %s checksum %s when %s was expected" % (manifest[pkg]['tgz'], 'sha1', sha1_data, sha1_expected) + raise ChecksumError('Checksum mismatch!%s' % msg) + else: + logger.debug(2, "No lockdown data for %s@%s" % (pkg, version)) + + if 'dependencies' in data: + for obj in data['dependencies']: + logger.debug(2, "Found dep is %s" % str(obj)) + self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest[pkg]['deps'], False) + + def download(self, ud, d): + """Fetch url""" + jsondepobj = {} + shrinkobj = {} + lockdown = {} + + if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror): + dest = d.getVar("DL_DIR") + bb.utils.mkdirhier(dest) + runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest) + return + + if ud.parm.get("noverify", None) != '1': + shwrf = d.getVar('NPM_SHRINKWRAP') + logger.debug(2, "NPM shrinkwrap file is %s" % shwrf) + if shwrf: + try: + with open(shwrf) as datafile: + shrinkobj = json.load(datafile) + except Exception as e: + raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e))) + elif not ud.ignore_checksums: + logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname) + lckdf = d.getVar('NPM_LOCKDOWN') + logger.debug(2, "NPM lockdown file is %s" % lckdf) + if lckdf: + try: + with open(lckdf) as datafile: + lockdown = json.load(datafile) + except Exception as e: + raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e))) + elif not ud.ignore_checksums: + logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname) + + if ('name' not in shrinkobj): + self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud) + else: + self._getshrinkeddependencies(ud.pkgname, shrinkobj, ud.version, d, ud, lockdown, jsondepobj) + + with open(ud.localpath, 'w') as outfile: + json.dump(jsondepobj, outfile) + + def build_mirror_data(self, ud, d): + # Generate a mirror tarball if needed + if ud.write_tarballs and not os.path.exists(ud.fullmirror): + # it's possible that this symlink points to read-only filesystem with PREMIRROR + if os.path.islink(ud.fullmirror): + os.unlink(ud.fullmirror) + + dldir = d.getVar("DL_DIR") + logger.info("Creating tarball of npm data") + runfetchcmd("tar -cJf %s npm/%s npm/%s" % (ud.fullmirror, ud.bbnpmmanifest, ud.pkgname), d, + workdir=dldir) + runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=dldir) diff --git a/poky/bitbake/lib/bb/fetch2/osc.py b/poky/bitbake/lib/bb/fetch2/osc.py new file mode 100644 index 0000000000..2b4f7d9c13 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/osc.py @@ -0,0 +1,132 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +Bitbake "Fetch" implementation for osc (Opensuse build service client). +Based on the svn "Fetch" implementation. + +""" + +import os +import sys +import logging +import bb +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import MissingParameterError +from bb.fetch2 import runfetchcmd + +class Osc(FetchMethod): + """Class to fetch a module or modules from Opensuse build server + repositories.""" + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with osc. + """ + return ud.type in ['osc'] + + def urldata_init(self, ud, d): + if not "module" in ud.parm: + raise MissingParameterError('module', ud.url) + + ud.module = ud.parm["module"] + + # Create paths to osc checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(d.getVar('OSCDIR'), ud.host) + ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module) + + if 'rev' in ud.parm: + ud.revision = ud.parm['rev'] + else: + pv = d.getVar("PV", False) + rev = bb.fetch2.srcrev_internal_helper(ud, d) + if rev and rev != True: + ud.revision = rev + else: + ud.revision = "" + + ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision)) + + def _buildosccommand(self, ud, d, command): + """ + Build up an ocs commandline based on ud + command is "fetch", "update", "info" + """ + + basecmd = d.expand('${FETCHCMD_osc}') + + proto = ud.parm.get('protocol', 'ocs') + + options = [] + + config = "-c %s" % self.generate_config(ud, d) + + if ud.revision: + options.append("-r %s" % ud.revision) + + coroot = self._strip_leading_slashes(ud.path) + + if command == "fetch": + osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options)) + elif command == "update": + osccmd = "%s %s up %s" % (basecmd, config, " ".join(options)) + else: + raise FetchError("Invalid osc command %s" % command, ud.url) + + return osccmd + + def download(self, ud, d): + """ + Fetch url + """ + + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") + + if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK): + oscupdatecmd = self._buildosccommand(ud, d, "update") + logger.info("Update "+ ud.url) + # update sources there + logger.debug(1, "Running %s", oscupdatecmd) + bb.fetch2.check_network_access(d, oscupdatecmd, ud.url) + runfetchcmd(oscupdatecmd, d, workdir=ud.moddir) + else: + oscfetchcmd = self._buildosccommand(ud, d, "fetch") + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(ud.pkgdir) + logger.debug(1, "Running %s", oscfetchcmd) + bb.fetch2.check_network_access(d, oscfetchcmd, ud.url) + runfetchcmd(oscfetchcmd, d, workdir=ud.pkgdir) + + # tar them up to a defined filename + runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, + cleanup=[ud.localpath], workdir=os.path.join(ud.pkgdir + ud.path)) + + def supports_srcrev(self): + return False + + def generate_config(self, ud, d): + """ + Generate a .oscrc to be used for this run. + """ + + config_path = os.path.join(d.getVar('OSCDIR'), "oscrc") + if (os.path.exists(config_path)): + os.remove(config_path) + + f = open(config_path, 'w') + f.write("[general]\n") + f.write("apisrv = %s\n" % ud.host) + f.write("scheme = http\n") + f.write("su-wrapper = su -c\n") + f.write("build-root = %s\n" % d.getVar('WORKDIR')) + f.write("urllist = %s\n" % d.getVar("OSCURLLIST")) + f.write("extra-pkgs = gzip\n") + f.write("\n") + f.write("[%s]\n" % ud.host) + f.write("user = %s\n" % ud.parm["user"]) + f.write("pass = %s\n" % ud.parm["pswd"]) + f.close() + + return config_path diff --git a/poky/bitbake/lib/bb/fetch2/perforce.py b/poky/bitbake/lib/bb/fetch2/perforce.py new file mode 100644 index 0000000000..3debad59f4 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/perforce.py @@ -0,0 +1,209 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementation for perforce + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2016 Kodak Alaris, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import logging +import bb +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import logger +from bb.fetch2 import runfetchcmd + +class Perforce(FetchMethod): + """ Class to fetch from perforce repositories """ + def supports(self, ud, d): + """ Check to see if a given url can be fetched with perforce. """ + return ud.type in ['p4'] + + def urldata_init(self, ud, d): + """ + Initialize perforce specific variables within url data. If P4CONFIG is + provided by the env, use it. If P4PORT is specified by the recipe, use + its values, which may override the settings in P4CONFIG. + """ + ud.basecmd = d.getVar('FETCHCMD_p4') + if not ud.basecmd: + ud.basecmd = "/usr/bin/env p4" + + ud.dldir = d.getVar('P4DIR') + if not ud.dldir: + ud.dldir = '%s/%s' % (d.getVar('DL_DIR'), 'p4') + + path = ud.url.split('://')[1] + path = path.split(';')[0] + delim = path.find('@'); + if delim != -1: + (ud.user, ud.pswd) = path.split('@')[0].split(':') + ud.path = path.split('@')[1] + else: + ud.path = path + + ud.usingp4config = False + p4port = d.getVar('P4PORT') + + if p4port: + logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port) + ud.host = p4port + else: + logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...') + ud.usingp4config = True + p4cmd = '%s info | grep "Server address"' % ud.basecmd + bb.fetch2.check_network_access(d, p4cmd, ud.url) + ud.host = runfetchcmd(p4cmd, d, True) + ud.host = ud.host.split(': ')[1].strip() + logger.debug(1, 'Determined P4PORT to be: %s' % ud.host) + if not ud.host: + raise FetchError('Could not determine P4PORT from P4CONFIG') + + if ud.path.find('/...') >= 0: + ud.pathisdir = True + else: + ud.pathisdir = False + + cleanedpath = ud.path.replace('/...', '').replace('/', '.') + cleanedhost = ud.host.replace(':', '.') + ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath) + + ud.setup_revisions(d) + + ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision)) + + def _buildp4command(self, ud, d, command, depot_filename=None): + """ + Build a p4 commandline. Valid commands are "changes", "print", and + "files". depot_filename is the full path to the file in the depot + including the trailing '#rev' value. + """ + p4opt = "" + + if ud.user: + p4opt += ' -u "%s"' % (ud.user) + + if ud.pswd: + p4opt += ' -P "%s"' % (ud.pswd) + + if ud.host and not ud.usingp4config: + p4opt += ' -p %s' % (ud.host) + + if hasattr(ud, 'revision') and ud.revision: + pathnrev = '%s@%s' % (ud.path, ud.revision) + else: + pathnrev = '%s' % (ud.path) + + if depot_filename: + if ud.pathisdir: # Remove leading path to obtain filename + filename = depot_filename[len(ud.path)-1:] + else: + filename = depot_filename[depot_filename.rfind('/'):] + filename = filename[:filename.find('#')] # Remove trailing '#rev' + + if command == 'changes': + p4cmd = '%s%s changes -m 1 //%s' % (ud.basecmd, p4opt, pathnrev) + elif command == 'print': + if depot_filename != None: + p4cmd = '%s%s print -o "p4/%s" "%s"' % (ud.basecmd, p4opt, filename, depot_filename) + else: + raise FetchError('No depot file name provided to p4 %s' % command, ud.url) + elif command == 'files': + p4cmd = '%s%s files //%s' % (ud.basecmd, p4opt, pathnrev) + else: + raise FetchError('Invalid p4 command %s' % command, ud.url) + + return p4cmd + + def _p4listfiles(self, ud, d): + """ + Return a list of the file names which are present in the depot using the + 'p4 files' command, including trailing '#rev' file revision indicator + """ + p4cmd = self._buildp4command(ud, d, 'files') + bb.fetch2.check_network_access(d, p4cmd, ud.url) + p4fileslist = runfetchcmd(p4cmd, d, True) + p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()] + + if not p4fileslist: + raise FetchError('Unable to fetch listing of p4 files from %s@%s' % (ud.host, ud.path)) + + count = 0 + filelist = [] + + for filename in p4fileslist: + item = filename.split(' - ') + lastaction = item[1].split() + logger.debug(1, 'File: %s Last Action: %s' % (item[0], lastaction[0])) + if lastaction[0] == 'delete': + continue + filelist.append(item[0]) + + return filelist + + def download(self, ud, d): + """ Get the list of files, fetch each one """ + filelist = self._p4listfiles(ud, d) + if not filelist: + raise FetchError('No files found in depot %s@%s' % (ud.host, ud.path)) + + bb.utils.remove(ud.pkgdir, True) + bb.utils.mkdirhier(ud.pkgdir) + + for afile in filelist: + p4fetchcmd = self._buildp4command(ud, d, 'print', afile) + bb.fetch2.check_network_access(d, p4fetchcmd, ud.url) + runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir) + + runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir) + + def clean(self, ud, d): + """ Cleanup p4 specific files and dirs""" + bb.utils.remove(ud.localpath) + bb.utils.remove(ud.pkgdir, True) + + def supports_srcrev(self): + return True + + def _revision_key(self, ud, d, name): + """ Return a unique key for the url """ + return 'p4:%s' % ud.pkgdir + + def _latest_revision(self, ud, d, name): + """ Return the latest upstream scm revision number """ + p4cmd = self._buildp4command(ud, d, "changes") + bb.fetch2.check_network_access(d, p4cmd, ud.url) + tip = runfetchcmd(p4cmd, d, True) + + if not tip: + raise FetchError('Could not determine the latest perforce changelist') + + tipcset = tip.split(' ')[1] + logger.debug(1, 'p4 tip found to be changelist %s' % tipcset) + return tipcset + + def sortable_revision(self, ud, d, name): + """ Return a sortable revision number """ + return False, self._build_revision(ud, d) + + def _build_revision(self, ud, d): + return ud.revision + diff --git a/poky/bitbake/lib/bb/fetch2/repo.py b/poky/bitbake/lib/bb/fetch2/repo.py new file mode 100644 index 0000000000..c22d9b5578 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/repo.py @@ -0,0 +1,97 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake "Fetch" repo (git) implementation + +""" + +# Copyright (C) 2009 Tom Rini +# +# Based on git.py which is: +#Copyright (C) 2005 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb.fetch2 import FetchMethod +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Repo(FetchMethod): + """Class to fetch a module or modules from repo (git) repositories""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with repo. + """ + return ud.type in ["repo"] + + def urldata_init(self, ud, d): + """ + We don"t care about the git rev of the manifests repository, but + we do care about the manifest to use. The default is "default". + We also care about the branch or tag to be used. The default is + "master". + """ + + ud.proto = ud.parm.get('protocol', 'git') + ud.branch = ud.parm.get('branch', 'master') + ud.manifest = ud.parm.get('manifest', 'default.xml') + if not ud.manifest.endswith('.xml'): + ud.manifest += '.xml' + + ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch)) + + def download(self, ud, d): + """Fetch url""" + + if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK): + logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath) + return + + gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", ".")) + repodir = d.getVar("REPODIR") or os.path.join(d.getVar("DL_DIR"), "repo") + codir = os.path.join(repodir, gitsrcname, ud.manifest) + + if ud.user: + username = ud.user + "@" + else: + username = "" + + repodir = os.path.join(codir, "repo") + bb.utils.mkdirhier(repodir) + if not os.path.exists(os.path.join(repodir, ".repo")): + bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url) + runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir) + + bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url) + runfetchcmd("repo sync", d, workdir=repodir) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude='.repo' --exclude='.git'" + + # Create a cache + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d, workdir=codir) + + def supports_srcrev(self): + return False + + def _build_revision(self, ud, d): + return ud.manifest + + def _want_sortable_revision(self, ud, d): + return False diff --git a/poky/bitbake/lib/bb/fetch2/s3.py b/poky/bitbake/lib/bb/fetch2/s3.py new file mode 100644 index 0000000000..1629288622 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/s3.py @@ -0,0 +1,98 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementation for Amazon AWS S3. + +Class for fetching files from Amazon S3 using the AWS Command Line Interface. +The aws tool must be correctly installed and configured prior to use. + +""" + +# Copyright (C) 2017, Andre McCurdy +# +# Based in part on bb.fetch2.wget: +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import bb +import urllib.request, urllib.parse, urllib.error +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import runfetchcmd + +class S3(FetchMethod): + """Class to fetch urls via 'aws s3'""" + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with s3. + """ + return ud.type in ['s3'] + + def recommends_checksum(self, urldata): + return True + + def urldata_init(self, ud, d): + if 'downloadfilename' in ud.parm: + ud.basename = ud.parm['downloadfilename'] + else: + ud.basename = os.path.basename(ud.path) + + ud.localfile = d.expand(urllib.parse.unquote(ud.basename)) + + ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3" + + def download(self, ud, d): + """ + Fetch urls + Assumes localpath was called first + """ + + cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath) + bb.fetch2.check_network_access(d, cmd, ud.url) + runfetchcmd(cmd, d) + + # Additional sanity checks copied from the wget class (although there + # are no known issues which mean these are required, treat the aws cli + # tool with a little healthy suspicion). + + if not os.path.exists(ud.localpath): + raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath)) + + if os.path.getsize(ud.localpath) == 0: + os.remove(ud.localpath) + raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path)) + + return True + + def checkstatus(self, fetch, ud, d): + """ + Check the status of a URL + """ + + cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path) + bb.fetch2.check_network_access(d, cmd, ud.url) + output = runfetchcmd(cmd, d) + + # "aws s3 ls s3://mybucket/foo" will exit with success even if the file + # is not found, so check output of the command to confirm success. + + if not output: + raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path)) + + return True diff --git a/poky/bitbake/lib/bb/fetch2/sftp.py b/poky/bitbake/lib/bb/fetch2/sftp.py new file mode 100644 index 0000000000..81884a6aa4 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/sftp.py @@ -0,0 +1,125 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake SFTP Fetch implementation + +Class for fetching files via SFTP. It tries to adhere to the (now +expired) IETF Internet Draft for "Uniform Resource Identifier (URI) +Scheme for Secure File Transfer Protocol (SFTP) and Secure Shell +(SSH)" (SECSH URI). + +It uses SFTP (as to adhere to the SECSH URI specification). It only +supports key based authentication, not password. This class, unlike +the SSH fetcher, does not support fetching a directory tree from the +remote. + + http://tools.ietf.org/html/draft-ietf-secsh-scp-sftp-ssh-uri-04 + https://www.iana.org/assignments/uri-schemes/prov/sftp + https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13 + +Please note that '/' is used as host path seperator, and not ":" +as you may be used to from the scp/sftp commands. You can use a +~ (tilde) to specify a path relative to your home directory. +(The /~user/ syntax, for specyfing a path relative to another +user's home directory is not supported.) Note that the tilde must +still follow the host path seperator ("/"). See exampels below. + +Example SRC_URIs: + +SRC_URI = "sftp://host.example.com/dir/path.file.txt" + +A path relative to your home directory. + +SRC_URI = "sftp://host.example.com/~/dir/path.file.txt" + +You can also specify a username (specyfing password in the +URI is not supported, use SSH keys to authenticate): + +SRC_URI = "sftp://user@host.example.com/dir/path.file.txt" + +""" + +# Copyright (C) 2013, Olof Johansson +# +# Based in part on bb.fetch2.wget: +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import bb +import urllib.request, urllib.parse, urllib.error +from bb.fetch2 import URI +from bb.fetch2 import FetchMethod +from bb.fetch2 import runfetchcmd + +class SFTP(FetchMethod): + """Class to fetch urls via 'sftp'""" + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with sftp. + """ + return ud.type in ['sftp'] + + def recommends_checksum(self, urldata): + return True + + def urldata_init(self, ud, d): + if 'protocol' in ud.parm and ud.parm['protocol'] == 'git': + raise bb.fetch2.ParameterError( + "Invalid protocol - if you wish to fetch from a " + + "git repository using ssh, you need to use the " + + "git:// prefix with protocol=ssh", ud.url) + + if 'downloadfilename' in ud.parm: + ud.basename = ud.parm['downloadfilename'] + else: + ud.basename = os.path.basename(ud.path) + + ud.localfile = d.expand(urllib.parse.unquote(ud.basename)) + + def download(self, ud, d): + """Fetch urls""" + + urlo = URI(ud.url) + basecmd = 'sftp -oBatchMode=yes' + port = '' + if urlo.port: + port = '-P %d' % urlo.port + urlo.port = None + + dldir = d.getVar('DL_DIR') + lpath = os.path.join(dldir, ud.localfile) + + user = '' + if urlo.userinfo: + user = urlo.userinfo + '@' + + path = urlo.path + + # Supoprt URIs relative to the user's home directory, with + # the tilde syntax. (E.g. ). + if path[:3] == '/~/': + path = path[3:] + + remote = '%s%s:%s' % (user, urlo.hostname, path) + + cmd = '%s %s %s %s' % (basecmd, port, remote, lpath) + + bb.fetch2.check_network_access(d, cmd, ud.url) + runfetchcmd(cmd, d) + return True diff --git a/poky/bitbake/lib/bb/fetch2/ssh.py b/poky/bitbake/lib/bb/fetch2/ssh.py new file mode 100644 index 0000000000..6047ee417a --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/ssh.py @@ -0,0 +1,125 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +''' +BitBake 'Fetch' implementations + +This implementation is for Secure Shell (SSH), and attempts to comply with the +IETF secsh internet draft: + http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/ + + Currently does not support the sftp parameters, as this uses scp + Also does not support the 'fingerprint' connection parameter. + + Please note that '/' is used as host, path separator not ':' as you may + be used to, also '~' can be used to specify user HOME, but again after '/' + + Example SRC_URI: + SRC_URI = "ssh://user@host.example.com/dir/path/file.txt" + SRC_URI = "ssh://user@host.example.com/~/file.txt" +''' + +# Copyright (C) 2006 OpenedHand Ltd. +# +# +# Based in part on svk.py: +# Copyright (C) 2006 Holger Hans Peter Freyther +# Based on svn.py: +# Copyright (C) 2003, 2004 Chris Larson +# Based on functions from the base bb module: +# Copyright 2003 Holger Schurig +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import re, os +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import logger +from bb.fetch2 import runfetchcmd + + +__pattern__ = re.compile(r''' + \s* # Skip leading whitespace + ssh:// # scheme + ( # Optional username/password block + (?P\S+) # username + (:(?P\S+))? # colon followed by the password (optional) + )? + (?P(;[^;]+)*)? # connection parameters block (optional) + @ + (?P\S+?) # non-greedy match of the host + (:(?P[0-9]+))? # colon followed by the port (optional) + / + (?P[^;]+) # path on the remote system, may be absolute or relative, + # and may include the use of '~' to reference the remote home + # directory + (?P(;[^;]+)*)? # parameters block (optional) + $ +''', re.VERBOSE) + +class SSH(FetchMethod): + '''Class to fetch a module or modules via Secure Shell''' + + def supports(self, urldata, d): + return __pattern__.match(urldata.url) != None + + def supports_checksum(self, urldata): + return False + + def urldata_init(self, urldata, d): + if 'protocol' in urldata.parm and urldata.parm['protocol'] == 'git': + raise bb.fetch2.ParameterError( + "Invalid protocol - if you wish to fetch from a git " + + "repository using ssh, you need to use " + + "git:// prefix with protocol=ssh", urldata.url) + m = __pattern__.match(urldata.url) + path = m.group('path') + host = m.group('host') + urldata.localpath = os.path.join(d.getVar('DL_DIR'), + os.path.basename(os.path.normpath(path))) + + def download(self, urldata, d): + dldir = d.getVar('DL_DIR') + + m = __pattern__.match(urldata.url) + path = m.group('path') + host = m.group('host') + port = m.group('port') + user = m.group('user') + password = m.group('pass') + + if port: + portarg = '-P %s' % port + else: + portarg = '' + + if user: + fr = user + if password: + fr += ':%s' % password + fr += '@%s' % host + else: + fr = host + fr += ':%s' % path + + cmd = 'scp -B -r %s %s %s/' % ( + portarg, + fr, + dldir + ) + + bb.fetch2.check_network_access(d, cmd, urldata.url) + + runfetchcmd(cmd, d) + diff --git a/poky/bitbake/lib/bb/fetch2/svn.py b/poky/bitbake/lib/bb/fetch2/svn.py new file mode 100644 index 0000000000..3f172eec9b --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/svn.py @@ -0,0 +1,193 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementation for svn. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2004 Marcin Juszkiewicz +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import sys +import logging +import bb +import re +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import MissingParameterError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Svn(FetchMethod): + """Class to fetch a module or modules from svn repositories""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with svn. + """ + return ud.type in ['svn'] + + def urldata_init(self, ud, d): + """ + init svn specific variable within url data + """ + if not "module" in ud.parm: + raise MissingParameterError('module', ud.url) + + ud.basecmd = d.getVar('FETCHCMD_svn') + + ud.module = ud.parm["module"] + + if not "path_spec" in ud.parm: + ud.path_spec = ud.module + else: + ud.path_spec = ud.parm["path_spec"] + + # Create paths to svn checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(d.expand('${SVNDIR}'), ud.host, relpath) + ud.moddir = os.path.join(ud.pkgdir, ud.module) + + ud.setup_revisions(d) + + if 'rev' in ud.parm: + ud.revision = ud.parm['rev'] + + ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision)) + + def _buildsvncommand(self, ud, d, command): + """ + Build up an svn commandline based on ud + command is "fetch", "update", "info" + """ + + proto = ud.parm.get('protocol', 'svn') + + svn_ssh = None + if proto == "svn+ssh" and "ssh" in ud.parm: + svn_ssh = ud.parm["ssh"] + + svnroot = ud.host + ud.path + + options = [] + + options.append("--no-auth-cache") + + if ud.user: + options.append("--username %s" % ud.user) + + if ud.pswd: + options.append("--password %s" % ud.pswd) + + if command == "info": + svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module) + elif command == "log1": + svncmd = "%s log --limit 1 %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module) + else: + suffix = "" + if ud.revision: + options.append("-r %s" % ud.revision) + suffix = "@%s" % (ud.revision) + + if command == "fetch": + transportuser = ud.parm.get("transportuser", "") + svncmd = "%s co %s %s://%s%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, transportuser, svnroot, ud.module, suffix, ud.path_spec) + elif command == "update": + svncmd = "%s update %s" % (ud.basecmd, " ".join(options)) + else: + raise FetchError("Invalid svn command %s" % command, ud.url) + + if svn_ssh: + svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd) + + return svncmd + + def download(self, ud, d): + """Fetch url""" + + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") + + if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK): + svnupdatecmd = self._buildsvncommand(ud, d, "update") + logger.info("Update " + ud.url) + # We need to attempt to run svn upgrade first in case its an older working format + try: + runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir) + except FetchError: + pass + logger.debug(1, "Running %s", svnupdatecmd) + bb.fetch2.check_network_access(d, svnupdatecmd, ud.url) + runfetchcmd(svnupdatecmd, d, workdir=ud.moddir) + else: + svnfetchcmd = self._buildsvncommand(ud, d, "fetch") + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(ud.pkgdir) + logger.debug(1, "Running %s", svnfetchcmd) + bb.fetch2.check_network_access(d, svnfetchcmd, ud.url) + runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude='.svn'" + + # tar them up to a defined filename + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d, + cleanup=[ud.localpath], workdir=ud.pkgdir) + + def clean(self, ud, d): + """ Clean SVN specific files and dirs """ + + bb.utils.remove(ud.localpath) + bb.utils.remove(ud.moddir, True) + + + def supports_srcrev(self): + return True + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "svn:" + ud.moddir + + def _latest_revision(self, ud, d, name): + """ + Return the latest upstream revision number + """ + bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url) + + output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True) + + # skip the first line, as per output of svn log + # then we expect the revision on the 2nd line + revision = re.search('^r([0-9]*)', output.splitlines()[1]).group(1) + + return revision + + def sortable_revision(self, ud, d, name): + """ + Return a sortable revision number which in our case is the revision number + """ + + return False, self._build_revision(ud, d) + + def _build_revision(self, ud, d): + return ud.revision diff --git a/poky/bitbake/lib/bb/fetch2/wget.py b/poky/bitbake/lib/bb/fetch2/wget.py new file mode 100644 index 0000000000..8f505b6de9 --- /dev/null +++ b/poky/bitbake/lib/bb/fetch2/wget.py @@ -0,0 +1,626 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import re +import tempfile +import subprocess +import os +import logging +import errno +import bb +import bb.progress +import urllib.request, urllib.parse, urllib.error +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import logger +from bb.fetch2 import runfetchcmd +from bb.utils import export_proxies +from bs4 import BeautifulSoup +from bs4 import SoupStrainer + +class WgetProgressHandler(bb.progress.LineFilterProgressHandler): + """ + Extract progress information from wget output. + Note: relies on --progress=dot (with -v or without -q/-nv) being + specified on the wget command line. + """ + def __init__(self, d): + super(WgetProgressHandler, self).__init__(d) + # Send an initial progress event so the bar gets shown + self._fire_progress(0) + + def writeline(self, line): + percs = re.findall(r'(\d+)%\s+([\d.]+[A-Z])', line) + if percs: + progress = int(percs[-1][0]) + rate = percs[-1][1] + '/s' + self.update(progress, rate) + return False + return True + + +class Wget(FetchMethod): + """Class to fetch urls via 'wget'""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with wget. + """ + return ud.type in ['http', 'https', 'ftp'] + + def recommends_checksum(self, urldata): + return True + + def urldata_init(self, ud, d): + if 'protocol' in ud.parm: + if ud.parm['protocol'] == 'git': + raise bb.fetch2.ParameterError("Invalid protocol - if you wish to fetch from a git repository using http, you need to instead use the git:// prefix with protocol=http", ud.url) + + if 'downloadfilename' in ud.parm: + ud.basename = ud.parm['downloadfilename'] + else: + ud.basename = os.path.basename(ud.path) + + ud.localfile = d.expand(urllib.parse.unquote(ud.basename)) + if not ud.localfile: + ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", ".")) + + self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate" + + def _runwget(self, ud, d, command, quiet, workdir=None): + + progresshandler = WgetProgressHandler(d) + + logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command)) + bb.fetch2.check_network_access(d, command, ud.url) + runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir) + + def download(self, ud, d): + """Fetch urls""" + + fetchcmd = self.basecmd + + if 'downloadfilename' in ud.parm: + dldir = d.getVar("DL_DIR") + bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile)) + fetchcmd += " -O " + dldir + os.sep + ud.localfile + + if ud.user and ud.pswd: + fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd) + + uri = ud.url.split(";")[0] + if os.path.exists(ud.localpath): + # file exists, but we didnt complete it.. trying again.. + fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri) + else: + fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri) + + self._runwget(ud, d, fetchcmd, False) + + # Sanity check since wget can pretend it succeed when it didn't + # Also, this used to happen if sourceforge sent us to the mirror page + if not os.path.exists(ud.localpath): + raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri) + + if os.path.getsize(ud.localpath) == 0: + os.remove(ud.localpath) + raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri) + + return True + + def checkstatus(self, fetch, ud, d, try_again=True): + import urllib.request, urllib.error, urllib.parse, socket, http.client + from urllib.response import addinfourl + from bb.fetch2 import FetchConnectionCache + + class HTTPConnectionCache(http.client.HTTPConnection): + if fetch.connection_cache: + def connect(self): + """Connect to the host and port specified in __init__.""" + + sock = fetch.connection_cache.get_connection(self.host, self.port) + if sock: + self.sock = sock + else: + self.sock = socket.create_connection((self.host, self.port), + self.timeout, self.source_address) + fetch.connection_cache.add_connection(self.host, self.port, self.sock) + + if self._tunnel_host: + self._tunnel() + + class CacheHTTPHandler(urllib.request.HTTPHandler): + def http_open(self, req): + return self.do_open(HTTPConnectionCache, req) + + def do_open(self, http_class, req): + """Return an addinfourl object for the request, using http_class. + + http_class must implement the HTTPConnection API from httplib. + The addinfourl return value is a file-like object. It also + has methods and attributes including: + - info(): return a mimetools.Message object for the headers + - geturl(): return the original request URL + - code: HTTP status code + """ + host = req.host + if not host: + raise urlllib2.URLError('no host given') + + h = http_class(host, timeout=req.timeout) # will parse host:port + h.set_debuglevel(self._debuglevel) + + headers = dict(req.unredirected_hdrs) + headers.update(dict((k, v) for k, v in list(req.headers.items()) + if k not in headers)) + + # We want to make an HTTP/1.1 request, but the addinfourl + # class isn't prepared to deal with a persistent connection. + # It will try to read all remaining data from the socket, + # which will block while the server waits for the next request. + # So make sure the connection gets closed after the (only) + # request. + + # Don't close connection when connection_cache is enabled, + if fetch.connection_cache is None: + headers["Connection"] = "close" + else: + headers["Connection"] = "Keep-Alive" # Works for HTTP/1.0 + + headers = dict( + (name.title(), val) for name, val in list(headers.items())) + + if req._tunnel_host: + tunnel_headers = {} + proxy_auth_hdr = "Proxy-Authorization" + if proxy_auth_hdr in headers: + tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] + # Proxy-Authorization should not be sent to origin + # server. + del headers[proxy_auth_hdr] + h.set_tunnel(req._tunnel_host, headers=tunnel_headers) + + try: + h.request(req.get_method(), req.selector, req.data, headers) + except socket.error as err: # XXX what error? + # Don't close connection when cache is enabled. + # Instead, try to detect connections that are no longer + # usable (for example, closed unexpectedly) and remove + # them from the cache. + if fetch.connection_cache is None: + h.close() + elif isinstance(err, OSError) and err.errno == errno.EBADF: + # This happens when the server closes the connection despite the Keep-Alive. + # Apparently urllib then uses the file descriptor, expecting it to be + # connected, when in reality the connection is already gone. + # We let the request fail and expect it to be + # tried once more ("try_again" in check_status()), + # with the dead connection removed from the cache. + # If it still fails, we give up, which can happend for bad + # HTTP proxy settings. + fetch.connection_cache.remove_connection(h.host, h.port) + raise urllib.error.URLError(err) + else: + try: + r = h.getresponse(buffering=True) + except TypeError: # buffering kw not supported + r = h.getresponse() + + # Pick apart the HTTPResponse object to get the addinfourl + # object initialized properly. + + # Wrap the HTTPResponse object in socket's file object adapter + # for Windows. That adapter calls recv(), so delegate recv() + # to read(). This weird wrapping allows the returned object to + # have readline() and readlines() methods. + + # XXX It might be better to extract the read buffering code + # out of socket._fileobject() and into a base class. + r.recv = r.read + + # no data, just have to read + r.read() + class fp_dummy(object): + def read(self): + return "" + def readline(self): + return "" + def close(self): + pass + closed = False + + resp = addinfourl(fp_dummy(), r.msg, req.get_full_url()) + resp.code = r.status + resp.msg = r.reason + + # Close connection when server request it. + if fetch.connection_cache is not None: + if 'Connection' in r.msg and r.msg['Connection'] == 'close': + fetch.connection_cache.remove_connection(h.host, h.port) + + return resp + + class HTTPMethodFallback(urllib.request.BaseHandler): + """ + Fallback to GET if HEAD is not allowed (405 HTTP error) + """ + def http_error_405(self, req, fp, code, msg, headers): + fp.read() + fp.close() + + newheaders = dict((k,v) for k,v in list(req.headers.items()) + if k.lower() not in ("content-length", "content-type")) + return self.parent.open(urllib.request.Request(req.get_full_url(), + headers=newheaders, + origin_req_host=req.origin_req_host, + unverifiable=True)) + + """ + Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403 + Forbidden when they actually mean 405 Method Not Allowed. + """ + http_error_403 = http_error_405 + + + class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler): + """ + urllib2.HTTPRedirectHandler resets the method to GET on redirect, + when we want to follow redirects using the original method. + """ + def redirect_request(self, req, fp, code, msg, headers, newurl): + newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl) + newreq.get_method = lambda: req.get_method() + return newreq + exported_proxies = export_proxies(d) + + handlers = [FixedHTTPRedirectHandler, HTTPMethodFallback] + if export_proxies: + handlers.append(urllib.request.ProxyHandler()) + handlers.append(CacheHTTPHandler()) + # XXX: Since Python 2.7.9 ssl cert validation is enabled by default + # see PEP-0476, this causes verification errors on some https servers + # so disable by default. + import ssl + if hasattr(ssl, '_create_unverified_context'): + handlers.append(urllib.request.HTTPSHandler(context=ssl._create_unverified_context())) + opener = urllib.request.build_opener(*handlers) + + try: + uri = ud.url.split(";")[0] + r = urllib.request.Request(uri) + r.get_method = lambda: "HEAD" + # Some servers (FusionForge, as used on Alioth) require that the + # optional Accept header is set. + r.add_header("Accept", "*/*") + def add_basic_auth(login_str, request): + '''Adds Basic auth to http request, pass in login:password as string''' + import base64 + encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8") + authheader = "Basic %s" % encodeuser + r.add_header("Authorization", authheader) + + if ud.user: + add_basic_auth(ud.user, r) + + try: + import netrc, urllib.parse + n = netrc.netrc() + login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname) + add_basic_auth("%s:%s" % (login, password), r) + except (TypeError, ImportError, IOError, netrc.NetrcParseError): + pass + + with opener.open(r) as response: + pass + except urllib.error.URLError as e: + if try_again: + logger.debug(2, "checkstatus: trying again") + return self.checkstatus(fetch, ud, d, False) + else: + # debug for now to avoid spamming the logs in e.g. remote sstate searches + logger.debug(2, "checkstatus() urlopen failed: %s" % e) + return False + return True + + def _parse_path(self, regex, s): + """ + Find and group name, version and archive type in the given string s + """ + + m = regex.search(s) + if m: + pname = '' + pver = '' + ptype = '' + + mdict = m.groupdict() + if 'name' in mdict.keys(): + pname = mdict['name'] + if 'pver' in mdict.keys(): + pver = mdict['pver'] + if 'type' in mdict.keys(): + ptype = mdict['type'] + + bb.debug(3, "_parse_path: %s, %s, %s" % (pname, pver, ptype)) + + return (pname, pver, ptype) + + return None + + def _modelate_version(self, version): + if version[0] in ['.', '-']: + if version[1].isdigit(): + version = version[1] + version[0] + version[2:len(version)] + else: + version = version[1:len(version)] + + version = re.sub('-', '.', version) + version = re.sub('_', '.', version) + version = re.sub('(rc)+', '.1000.', version) + version = re.sub('(beta)+', '.100.', version) + version = re.sub('(alpha)+', '.10.', version) + if version[0] == 'v': + version = version[1:len(version)] + return version + + def _vercmp(self, old, new): + """ + Check whether 'new' is newer than 'old' version. We use existing vercmp() for the + purpose. PE is cleared in comparison as it's not for build, and PR is cleared too + for simplicity as it's somehow difficult to get from various upstream format + """ + + (oldpn, oldpv, oldsuffix) = old + (newpn, newpv, newsuffix) = new + + """ + Check for a new suffix type that we have never heard of before + """ + if (newsuffix): + m = self.suffix_regex_comp.search(newsuffix) + if not m: + bb.warn("%s has a possible unknown suffix: %s" % (newpn, newsuffix)) + return False + + """ + Not our package so ignore it + """ + if oldpn != newpn: + return False + + oldpv = self._modelate_version(oldpv) + newpv = self._modelate_version(newpv) + + return bb.utils.vercmp(("0", oldpv, ""), ("0", newpv, "")) + + def _fetch_index(self, uri, ud, d): + """ + Run fetch checkstatus to get directory information + """ + f = tempfile.NamedTemporaryFile() + with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f: + agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12" + fetchcmd = self.basecmd + fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'" + try: + self._runwget(ud, d, fetchcmd, True, workdir=workdir) + fetchresult = f.read() + except bb.fetch2.BBFetchException: + fetchresult = "" + + return fetchresult + + def _check_latest_version(self, url, package, package_regex, current_version, ud, d): + """ + Return the latest version of a package inside a given directory path + If error or no version, return "" + """ + valid = 0 + version = ['', '', ''] + + bb.debug(3, "VersionURL: %s" % (url)) + soup = BeautifulSoup(self._fetch_index(url, ud, d), "html.parser", parse_only=SoupStrainer("a")) + if not soup: + bb.debug(3, "*** %s NO SOUP" % (url)) + return "" + + for line in soup.find_all('a', href=True): + bb.debug(3, "line['href'] = '%s'" % (line['href'])) + bb.debug(3, "line = '%s'" % (str(line))) + + newver = self._parse_path(package_regex, line['href']) + if not newver: + newver = self._parse_path(package_regex, str(line)) + + if newver: + bb.debug(3, "Upstream version found: %s" % newver[1]) + if valid == 0: + version = newver + valid = 1 + elif self._vercmp(version, newver) < 0: + version = newver + + pupver = re.sub('_', '.', version[1]) + + bb.debug(3, "*** %s -> UpstreamVersion = %s (CurrentVersion = %s)" % + (package, pupver or "N/A", current_version[1])) + + if valid: + return pupver + + return "" + + def _check_latest_version_by_dir(self, dirver, package, package_regex, + current_version, ud, d): + """ + Scan every directory in order to get upstream version. + """ + version_dir = ['', '', ''] + version = ['', '', ''] + + dirver_regex = re.compile("(?P\D*)(?P(\d+[\.\-_])+(\d+))") + s = dirver_regex.search(dirver) + if s: + version_dir[1] = s.group('ver') + else: + version_dir[1] = dirver + + dirs_uri = bb.fetch.encodeurl([ud.type, ud.host, + ud.path.split(dirver)[0], ud.user, ud.pswd, {}]) + bb.debug(3, "DirURL: %s, %s" % (dirs_uri, package)) + + soup = BeautifulSoup(self._fetch_index(dirs_uri, ud, d), "html.parser", parse_only=SoupStrainer("a")) + if not soup: + return version[1] + + for line in soup.find_all('a', href=True): + s = dirver_regex.search(line['href'].strip("/")) + if s: + sver = s.group('ver') + + # When prefix is part of the version directory it need to + # ensure that only version directory is used so remove previous + # directories if exists. + # + # Example: pfx = '/dir1/dir2/v' and version = '2.5' the expected + # result is v2.5. + spfx = s.group('pfx').split('/')[-1] + + version_dir_new = ['', sver, ''] + if self._vercmp(version_dir, version_dir_new) <= 0: + dirver_new = spfx + sver + path = ud.path.replace(dirver, dirver_new, True) \ + .split(package)[0] + uri = bb.fetch.encodeurl([ud.type, ud.host, path, + ud.user, ud.pswd, {}]) + + pupver = self._check_latest_version(uri, + package, package_regex, current_version, ud, d) + if pupver: + version[1] = pupver + + version_dir = version_dir_new + + return version[1] + + def _init_regexes(self, package, ud, d): + """ + Match as many patterns as possible such as: + gnome-common-2.20.0.tar.gz (most common format) + gtk+-2.90.1.tar.gz + xf86-input-synaptics-12.6.9.tar.gz + dri2proto-2.3.tar.gz + blktool_4.orig.tar.gz + libid3tag-0.15.1b.tar.gz + unzip552.tar.gz + icu4c-3_6-src.tgz + genext2fs_1.3.orig.tar.gz + gst-fluendo-mp3 + """ + # match most patterns which uses "-" as separator to version digits + pn_prefix1 = "[a-zA-Z][a-zA-Z0-9]*([-_][a-zA-Z]\w+)*\+?[-_]" + # a loose pattern such as for unzip552.tar.gz + pn_prefix2 = "[a-zA-Z]+" + # a loose pattern such as for 80325-quicky-0.4.tar.gz + pn_prefix3 = "[0-9]+[-]?[a-zA-Z]+" + # Save the Package Name (pn) Regex for use later + pn_regex = "(%s|%s|%s)" % (pn_prefix1, pn_prefix2, pn_prefix3) + + # match version + pver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)" + + # match arch + parch_regex = "-source|_all_" + + # src.rpm extension was added only for rpm package. Can be removed if the rpm + # packaged will always be considered as having to be manually upgraded + psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)" + + # match name, version and archive type of a package + package_regex_comp = re.compile("(?P%s?\.?v?)(?P%s)(?P%s)?[\.-](?P%s$)" + % (pn_regex, pver_regex, parch_regex, psuffix_regex)) + self.suffix_regex_comp = re.compile(psuffix_regex) + + # compile regex, can be specific by package or generic regex + pn_regex = d.getVar('UPSTREAM_CHECK_REGEX') + if pn_regex: + package_custom_regex_comp = re.compile(pn_regex) + else: + version = self._parse_path(package_regex_comp, package) + if version: + package_custom_regex_comp = re.compile( + "(?P%s)(?P%s)(?P%s)?[\.-](?P%s)" % + (re.escape(version[0]), pver_regex, parch_regex, psuffix_regex)) + else: + package_custom_regex_comp = None + + return package_custom_regex_comp + + def latest_versionstring(self, ud, d): + """ + Manipulate the URL and try to obtain the latest package version + + sanity check to ensure same name and type. + """ + package = ud.path.split("/")[-1] + current_version = ['', d.getVar('PV'), ''] + + """possible to have no version in pkg name, such as spectrum-fw""" + if not re.search("\d+", package): + current_version[1] = re.sub('_', '.', current_version[1]) + current_version[1] = re.sub('-', '.', current_version[1]) + return (current_version[1], '') + + package_regex = self._init_regexes(package, ud, d) + if package_regex is None: + bb.warn("latest_versionstring: package %s don't match pattern" % (package)) + return ('', '') + bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern)) + + uri = "" + regex_uri = d.getVar("UPSTREAM_CHECK_URI") + if not regex_uri: + path = ud.path.split(package)[0] + + # search for version matches on folders inside the path, like: + # "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz + dirver_regex = re.compile("(?P[^/]*(\d+\.)*\d+([-_]r\d+)*)/") + m = dirver_regex.search(path) + if m: + pn = d.getVar('PN') + dirver = m.group('dirver') + + dirver_pn_regex = re.compile("%s\d?" % (re.escape(pn))) + if not dirver_pn_regex.search(dirver): + return (self._check_latest_version_by_dir(dirver, + package, package_regex, current_version, ud, d), '') + + uri = bb.fetch.encodeurl([ud.type, ud.host, path, ud.user, ud.pswd, {}]) + else: + uri = regex_uri + + return (self._check_latest_version(uri, package, package_regex, + current_version, ud, d), '') diff --git a/poky/bitbake/lib/bb/main.py b/poky/bitbake/lib/bb/main.py new file mode 100755 index 0000000000..f4474e410f --- /dev/null +++ b/poky/bitbake/lib/bb/main.py @@ -0,0 +1,508 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import sys +import logging +import optparse +import warnings +import fcntl +import time +import traceback + +import bb +from bb import event +import bb.msg +from bb import cooker +from bb import ui +from bb import server +from bb import cookerdata + +import bb.server.process +import bb.server.xmlrpcclient + +logger = logging.getLogger("BitBake") + +class BBMainException(Exception): + pass + +class BBMainFatal(bb.BBHandledException): + pass + +def present_options(optionlist): + if len(optionlist) > 1: + return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]]) + else: + return optionlist[0] + +class BitbakeHelpFormatter(optparse.IndentedHelpFormatter): + def format_option(self, option): + # We need to do this here rather than in the text we supply to + # add_option() because we don't want to call list_extension_modules() + # on every execution (since it imports all of the modules) + # Note also that we modify option.help rather than the returned text + # - this is so that we don't have to re-format the text ourselves + if option.dest == 'ui': + valid_uis = list_extension_modules(bb.ui, 'main') + option.help = option.help.replace('@CHOICES@', present_options(valid_uis)) + + return optparse.IndentedHelpFormatter.format_option(self, option) + +def list_extension_modules(pkg, checkattr): + """ + Lists extension modules in a specific Python package + (e.g. UIs, servers). NOTE: Calling this function will import all of the + submodules of the specified module in order to check for the specified + attribute; this can have unusual side-effects. As a result, this should + only be called when displaying help text or error messages. + Parameters: + pkg: previously imported Python package to list + checkattr: attribute to look for in module to determine if it's valid + as the type of extension you are looking for + """ + import pkgutil + pkgdir = os.path.dirname(pkg.__file__) + + modules = [] + for _, modulename, _ in pkgutil.iter_modules([pkgdir]): + if os.path.isdir(os.path.join(pkgdir, modulename)): + # ignore directories + continue + try: + module = __import__(pkg.__name__, fromlist=[modulename]) + except: + # If we can't import it, it's not valid + continue + module_if = getattr(module, modulename) + if getattr(module_if, 'hidden_extension', False): + continue + if not checkattr or hasattr(module_if, checkattr): + modules.append(modulename) + return modules + +def import_extension_module(pkg, modulename, checkattr): + try: + # Dynamically load the UI based on the ui name. Although we + # suggest a fixed set this allows you to have flexibility in which + # ones are available. + module = __import__(pkg.__name__, fromlist=[modulename]) + return getattr(module, modulename) + except AttributeError: + modules = present_options(list_extension_modules(pkg, checkattr)) + raise BBMainException('FATAL: Unable to import extension module "%s" from %s. ' + 'Valid extension modules: %s' % (modulename, pkg.__name__, modules)) + +# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others""" +warnlog = logging.getLogger("BitBake.Warnings") +_warnings_showwarning = warnings.showwarning +def _showwarning(message, category, filename, lineno, file=None, line=None): + if file is not None: + if _warnings_showwarning is not None: + _warnings_showwarning(message, category, filename, lineno, file, line) + else: + s = warnings.formatwarning(message, category, filename, lineno) + warnlog.warning(s) + +warnings.showwarning = _showwarning +warnings.filterwarnings("ignore") +warnings.filterwarnings("default", module="($|(oe|bb)\.)") +warnings.filterwarnings("ignore", category=PendingDeprecationWarning) +warnings.filterwarnings("ignore", category=ImportWarning) +warnings.filterwarnings("ignore", category=DeprecationWarning, module="$") +warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers") + +class BitBakeConfigParameters(cookerdata.ConfigParameters): + + def parseCommandLine(self, argv=sys.argv): + parser = optparse.OptionParser( + formatter=BitbakeHelpFormatter(), + version="BitBake Build Tool Core version %s" % bb.__version__, + usage="""%prog [options] [recipename/target recipe:do_task ...] + + Executes the specified task (default is 'build') for a given set of target recipes (.bb files). + It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which + will provide the layer, BBFILES and other configuration information.""") + + parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None, + help="Execute tasks from a specific .bb recipe directly. WARNING: Does " + "not handle any dependencies from other recipes.") + + parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True, + help="Continue as much as possible after an error. While the target that " + "failed and anything depending on it cannot be built, as much as " + "possible will be built before stopping.") + + parser.add_option("-f", "--force", action="store_true", dest="force", default=False, + help="Force the specified targets/task to run (invalidating any " + "existing stamp file).") + + parser.add_option("-c", "--cmd", action="store", dest="cmd", + help="Specify the task to execute. The exact options available " + "depend on the metadata. Some examples might be 'compile'" + " or 'populate_sysroot' or 'listtasks' may give a list of " + "the tasks available.") + + parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp", + help="Invalidate the stamp for the specified task such as 'compile' " + "and then run the default task for the specified target(s).") + + parser.add_option("-r", "--read", action="append", dest="prefile", default=[], + help="Read the specified file before bitbake.conf.") + + parser.add_option("-R", "--postread", action="append", dest="postfile", default=[], + help="Read the specified file after bitbake.conf.") + + parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, + help="Enable tracing of shell tasks (with 'set -x'). " + "Also print bb.note(...) messages to stdout (in " + "addition to writing them to ${T}/log.do_).") + + parser.add_option("-D", "--debug", action="count", dest="debug", default=0, + help="Increase the debug level. You can specify this " + "more than once. -D sets the debug level to 1, " + "where only bb.debug(1, ...) messages are printed " + "to stdout; -DD sets the debug level to 2, where " + "both bb.debug(1, ...) and bb.debug(2, ...) " + "messages are printed; etc. Without -D, no debug " + "messages are printed. Note that -D only affects " + "output to stdout. All debug messages are written " + "to ${T}/log.do_taskname, regardless of the debug " + "level.") + + parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0, + help="Output less log message data to the terminal. You can specify this more than once.") + + parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False, + help="Don't execute, just go through the motions.") + + parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures", + default=[], metavar="SIGNATURE_HANDLER", + help="Dump out the signature construction information, with no task " + "execution. The SIGNATURE_HANDLER parameter is passed to the " + "handler. Two common values are none and printdiff but the handler " + "may define more/less. none means only dump the signature, printdiff" + " means compare the dumped signature with the cached one.") + + parser.add_option("-p", "--parse-only", action="store_true", + dest="parse_only", default=False, + help="Quit after parsing the BB recipes.") + + parser.add_option("-s", "--show-versions", action="store_true", + dest="show_versions", default=False, + help="Show current and preferred versions of all recipes.") + + parser.add_option("-e", "--environment", action="store_true", + dest="show_environment", default=False, + help="Show the global or per-recipe environment complete with information" + " about where variables were set/changed.") + + parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False, + help="Save dependency tree information for the specified " + "targets in the dot syntax.") + + parser.add_option("-I", "--ignore-deps", action="append", + dest="extra_assume_provided", default=[], + help="Assume these dependencies don't exist and are already provided " + "(equivalent to ASSUME_PROVIDED). Useful to make dependency " + "graphs more appealing") + + parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[], + help="Show debug logging for the specified logging domains") + + parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False, + help="Profile the command and save reports.") + + # @CHOICES@ is substituted out by BitbakeHelpFormatter above + parser.add_option("-u", "--ui", action="store", dest="ui", + default=os.environ.get('BITBAKE_UI', 'knotty'), + help="The user interface to use (@CHOICES@ - default %default).") + + parser.add_option("", "--token", action="store", dest="xmlrpctoken", + default=os.environ.get("BBTOKEN"), + help="Specify the connection token to be used when connecting " + "to a remote server.") + + parser.add_option("", "--revisions-changed", action="store_true", + dest="revisions_changed", default=False, + help="Set the exit code depending on whether upstream floating " + "revisions have changed or not.") + + parser.add_option("", "--server-only", action="store_true", + dest="server_only", default=False, + help="Run bitbake without a UI, only starting a server " + "(cooker) process.") + + parser.add_option("-B", "--bind", action="store", dest="bind", default=False, + help="The name/address for the bitbake xmlrpc server to bind to.") + + parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout", + default=os.getenv("BB_SERVER_TIMEOUT"), + help="Set timeout to unload bitbake server due to inactivity, " + "set to -1 means no unload, " + "default: Environment variable BB_SERVER_TIMEOUT.") + + parser.add_option("", "--no-setscene", action="store_true", + dest="nosetscene", default=False, + help="Do not run any setscene tasks. sstate will be ignored and " + "everything needed, built.") + + parser.add_option("", "--setscene-only", action="store_true", + dest="setsceneonly", default=False, + help="Only run setscene tasks, don't run any real tasks.") + + parser.add_option("", "--remote-server", action="store", dest="remote_server", + default=os.environ.get("BBSERVER"), + help="Connect to the specified server.") + + parser.add_option("-m", "--kill-server", action="store_true", + dest="kill_server", default=False, + help="Terminate any running bitbake server.") + + parser.add_option("", "--observe-only", action="store_true", + dest="observe_only", default=False, + help="Connect to a server as an observing-only client.") + + parser.add_option("", "--status-only", action="store_true", + dest="status_only", default=False, + help="Check the status of the remote bitbake server.") + + parser.add_option("-w", "--write-log", action="store", dest="writeeventlog", + default=os.environ.get("BBEVENTLOG"), + help="Writes the event log of the build to a bitbake event json file. " + "Use '' (empty string) to assign the name automatically.") + + parser.add_option("", "--runall", action="append", dest="runall", + help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).") + + parser.add_option("", "--runonly", action="append", dest="runonly", + help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).") + + + options, targets = parser.parse_args(argv) + + if options.quiet and options.verbose: + parser.error("options --quiet and --verbose are mutually exclusive") + + if options.quiet and options.debug: + parser.error("options --quiet and --debug are mutually exclusive") + + # use configuration files from environment variables + if "BBPRECONF" in os.environ: + options.prefile.append(os.environ["BBPRECONF"]) + + if "BBPOSTCONF" in os.environ: + options.postfile.append(os.environ["BBPOSTCONF"]) + + # fill in proper log name if not supplied + if options.writeeventlog is not None and len(options.writeeventlog) == 0: + from datetime import datetime + eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S") + options.writeeventlog = eventlog + + if options.bind: + try: + #Checking that the port is a number and is a ':' delimited value + (host, port) = options.bind.split(':') + port = int(port) + except (ValueError,IndexError): + raise BBMainException("FATAL: Malformed host:port bind parameter") + options.xmlrpcinterface = (host, port) + else: + options.xmlrpcinterface = (None, 0) + + return options, targets[1:] + + +def bitbake_main(configParams, configuration): + + # Python multiprocessing requires /dev/shm on Linux + if sys.platform.startswith('linux') and not os.access('/dev/shm', os.W_OK | os.X_OK): + raise BBMainException("FATAL: /dev/shm does not exist or is not writable") + + # Unbuffer stdout to avoid log truncation in the event + # of an unorderly exit as well as to provide timely + # updates to log files for use with tail + try: + if sys.stdout.name == '': + # Reopen with O_SYNC (unbuffered) + fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL) + fl |= os.O_SYNC + fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl) + except: + pass + + configuration.setConfigParameters(configParams) + + if configParams.server_only and configParams.remote_server: + raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" % + ("the BBSERVER environment variable" if "BBSERVER" in os.environ \ + else "the '--remote-server' option")) + + if configParams.observe_only and not (configParams.remote_server or configParams.bind): + raise BBMainException("FATAL: '--observe-only' can only be used by UI clients " + "connecting to a server.\n") + + if "BBDEBUG" in os.environ: + level = int(os.environ["BBDEBUG"]) + if level > configuration.debug: + configuration.debug = level + + bb.msg.init_msgconfig(configParams.verbose, configuration.debug, + configuration.debug_domains) + + server_connection, ui_module = setup_bitbake(configParams, configuration) + # No server connection + if server_connection is None: + if configParams.status_only: + return 1 + if configParams.kill_server: + return 0 + + if not configParams.server_only: + if configParams.status_only: + server_connection.terminate() + return 0 + + try: + for event in bb.event.ui_queue: + server_connection.events.queue_event(event) + bb.event.ui_queue = [] + + return ui_module.main(server_connection.connection, server_connection.events, + configParams) + finally: + server_connection.terminate() + else: + return 0 + + return 1 + +def setup_bitbake(configParams, configuration, extrafeatures=None): + # Ensure logging messages get sent to the UI as events + handler = bb.event.LogHandler() + if not configParams.status_only: + # In status only mode there are no logs and no UI + logger.addHandler(handler) + + # Clear away any spurious environment variables while we stoke up the cooker + cleanedvars = bb.utils.clean_environment() + + if configParams.server_only: + featureset = [] + ui_module = None + else: + ui_module = import_extension_module(bb.ui, configParams.ui, 'main') + # Collect the feature set for the UI + featureset = getattr(ui_module, "featureSet", []) + + if extrafeatures: + for feature in extrafeatures: + if not feature in featureset: + featureset.append(feature) + + server_connection = None + + if configParams.remote_server: + # Connect to a remote XMLRPC server + server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset, + configParams.observe_only, configParams.xmlrpctoken) + else: + retries = 8 + while retries: + try: + topdir, lock = lockBitbake() + sockname = topdir + "/bitbake.sock" + if lock: + if configParams.status_only or configParams.kill_server: + logger.info("bitbake server is not running.") + lock.close() + return None, None + # we start a server with a given configuration + logger.info("Starting bitbake server...") + # Clear the event queue since we already displayed messages + bb.event.ui_queue = [] + server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset) + + else: + logger.info("Reconnecting to bitbake server...") + if not os.path.exists(sockname): + print("Previous bitbake instance shutting down?, waiting to retry...") + i = 0 + lock = None + # Wait for 5s or until we can get the lock + while not lock and i < 50: + time.sleep(0.1) + _, lock = lockBitbake() + i += 1 + if lock: + bb.utils.unlockfile(lock) + raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?") + if not configParams.server_only: + try: + server_connection = bb.server.process.connectProcessServer(sockname, featureset) + except EOFError: + # The server may have been shutting down but not closed the socket yet. If that happened, + # ignore it. + pass + + if server_connection or configParams.server_only: + break + except BBMainFatal: + raise + except (Exception, bb.server.process.ProcessTimeout) as e: + if not retries: + raise + retries -= 1 + if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)): + logger.info("Retrying server connection...") + else: + logger.info("Retrying server connection... (%s)" % traceback.format_exc()) + if not retries: + bb.fatal("Unable to connect to bitbake server, or start one") + if retries < 5: + time.sleep(5) + + if configParams.kill_server: + server_connection.connection.terminateServer() + server_connection.terminate() + bb.event.ui_queue = [] + logger.info("Terminated bitbake server.") + return None, None + + # Restore the environment in case the UI needs it + for k in cleanedvars: + os.environ[k] = cleanedvars[k] + + logger.removeHandler(handler) + + return server_connection, ui_module + +def lockBitbake(): + topdir = bb.cookerdata.findTopdir() + if not topdir: + bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?") + raise BBMainFatal + lockfile = topdir + "/bitbake.lock" + return topdir, bb.utils.lockfile(lockfile, False, False) + diff --git a/poky/bitbake/lib/bb/methodpool.py b/poky/bitbake/lib/bb/methodpool.py new file mode 100644 index 0000000000..49aed3338b --- /dev/null +++ b/poky/bitbake/lib/bb/methodpool.py @@ -0,0 +1,40 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# +# Copyright (C) 2006 Holger Hans Peter Freyther +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from bb.utils import better_compile, better_exec + +def insert_method(modulename, code, fn, lineno): + """ + Add code of a module should be added. The methods + will be simply added, no checking will be done + """ + comp = better_compile(code, modulename, fn, lineno=lineno) + better_exec(comp, None, code, fn) + +compilecache = {} + +def compile_cache(code): + h = hash(code) + if h in compilecache: + return compilecache[h] + return None + +def compile_cache_add(code, compileobj): + h = hash(code) + compilecache[h] = compileobj diff --git a/poky/bitbake/lib/bb/monitordisk.py b/poky/bitbake/lib/bb/monitordisk.py new file mode 100644 index 0000000000..833cd3d344 --- /dev/null +++ b/poky/bitbake/lib/bb/monitordisk.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2012 Robert Yang +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os, logging, re, sys +import bb +logger = logging.getLogger("BitBake.Monitor") + +def printErr(info): + logger.error("%s\n Disk space monitor will NOT be enabled" % info) + +def convertGMK(unit): + + """ Convert the space unit G, M, K, the unit is case-insensitive """ + + unitG = re.match('([1-9][0-9]*)[gG]\s?$', unit) + if unitG: + return int(unitG.group(1)) * (1024 ** 3) + unitM = re.match('([1-9][0-9]*)[mM]\s?$', unit) + if unitM: + return int(unitM.group(1)) * (1024 ** 2) + unitK = re.match('([1-9][0-9]*)[kK]\s?$', unit) + if unitK: + return int(unitK.group(1)) * 1024 + unitN = re.match('([1-9][0-9]*)\s?$', unit) + if unitN: + return int(unitN.group(1)) + else: + return None + +def getMountedDev(path): + + """ Get the device mounted at the path, uses /proc/mounts """ + + # Get the mount point of the filesystem containing path + # st_dev is the ID of device containing file + parentDev = os.stat(path).st_dev + currentDev = parentDev + # When the current directory's device is different from the + # parent's, then the current directory is a mount point + while parentDev == currentDev: + mountPoint = path + # Use dirname to get the parent's directory + path = os.path.dirname(path) + # Reach the "/" + if path == mountPoint: + break + parentDev= os.stat(path).st_dev + + try: + with open("/proc/mounts", "r") as ifp: + for line in ifp: + procLines = line.rstrip('\n').split() + if procLines[1] == mountPoint: + return procLines[0] + except EnvironmentError: + pass + return None + +def getDiskData(BBDirs, configuration): + + """Prepare disk data for disk space monitor""" + + # Save the device IDs, need the ID to be unique (the dictionary's key is + # unique), so that when more than one directory is located on the same + # device, we just monitor it once + devDict = {} + for pathSpaceInode in BBDirs.split(): + # The input format is: "dir,space,inode", dir is a must, space + # and inode are optional + pathSpaceInodeRe = re.match('([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode) + if not pathSpaceInodeRe: + printErr("Invalid value in BB_DISKMON_DIRS: %s" % pathSpaceInode) + return None + + action = pathSpaceInodeRe.group(1) + if action not in ("ABORT", "STOPTASKS", "WARN"): + printErr("Unknown disk space monitor action: %s" % action) + return None + + path = os.path.realpath(pathSpaceInodeRe.group(2)) + if not path: + printErr("Invalid path value in BB_DISKMON_DIRS: %s" % pathSpaceInode) + return None + + # The disk space or inode is optional, but it should have a correct + # value once it is specified + minSpace = pathSpaceInodeRe.group(3) + if minSpace: + minSpace = convertGMK(minSpace) + if not minSpace: + printErr("Invalid disk space value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(3)) + return None + else: + # None means that it is not specified + minSpace = None + + minInode = pathSpaceInodeRe.group(4) + if minInode: + minInode = convertGMK(minInode) + if not minInode: + printErr("Invalid inode value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(4)) + return None + else: + # None means that it is not specified + minInode = None + + if minSpace is None and minInode is None: + printErr("No disk space or inode value in found BB_DISKMON_DIRS: %s" % pathSpaceInode) + return None + # mkdir for the directory since it may not exist, for example the + # DL_DIR may not exist at the very beginning + if not os.path.exists(path): + bb.utils.mkdirhier(path) + dev = getMountedDev(path) + # Use path/action as the key + devDict[(path, action)] = [dev, minSpace, minInode] + + return devDict + +def getInterval(configuration): + + """ Get the disk space interval """ + + # The default value is 50M and 5K. + spaceDefault = 50 * 1024 * 1024 + inodeDefault = 5 * 1024 + + interval = configuration.getVar("BB_DISKMON_WARNINTERVAL") + if not interval: + return spaceDefault, inodeDefault + else: + # The disk space or inode interval is optional, but it should + # have a correct value once it is specified + intervalRe = re.match('([^,]*),?\s*(.*)', interval) + if intervalRe: + intervalSpace = intervalRe.group(1) + if intervalSpace: + intervalSpace = convertGMK(intervalSpace) + if not intervalSpace: + printErr("Invalid disk space interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(1)) + return None, None + else: + intervalSpace = spaceDefault + intervalInode = intervalRe.group(2) + if intervalInode: + intervalInode = convertGMK(intervalInode) + if not intervalInode: + printErr("Invalid disk inode interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(2)) + return None, None + else: + intervalInode = inodeDefault + return intervalSpace, intervalInode + else: + printErr("Invalid interval value in BB_DISKMON_WARNINTERVAL: %s" % interval) + return None, None + +class diskMonitor: + + """Prepare the disk space monitor data""" + + def __init__(self, configuration): + + self.enableMonitor = False + self.configuration = configuration + + BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None + if BBDirs: + self.devDict = getDiskData(BBDirs, configuration) + if self.devDict: + self.spaceInterval, self.inodeInterval = getInterval(configuration) + if self.spaceInterval and self.inodeInterval: + self.enableMonitor = True + # These are for saving the previous disk free space and inode, we + # use them to avoid printing too many warning messages + self.preFreeS = {} + self.preFreeI = {} + # This is for STOPTASKS and ABORT, to avoid printing the message + # repeatedly while waiting for the tasks to finish + self.checked = {} + for k in self.devDict: + self.preFreeS[k] = 0 + self.preFreeI[k] = 0 + self.checked[k] = False + if self.spaceInterval is None and self.inodeInterval is None: + self.enableMonitor = False + + def check(self, rq): + + """ Take action for the monitor """ + + if self.enableMonitor: + diskUsage = {} + for k, attributes in self.devDict.items(): + path, action = k + dev, minSpace, minInode = attributes + + st = os.statvfs(path) + + # The available free space, integer number + freeSpace = st.f_bavail * st.f_frsize + + # Send all relevant information in the event. + freeSpaceRoot = st.f_bfree * st.f_frsize + totalSpace = st.f_blocks * st.f_frsize + diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace) + + if minSpace and freeSpace < minSpace: + # Always show warning, the self.checked would always be False if the action is WARN + if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]: + logger.warning("The free space of %s (%s) is running low (%.3fGB left)" % \ + (path, dev, freeSpace / 1024 / 1024 / 1024.0)) + self.preFreeS[k] = freeSpace + + if action == "STOPTASKS" and not self.checked[k]: + logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!") + self.checked[k] = True + rq.finish_runqueue(False) + bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration) + elif action == "ABORT" and not self.checked[k]: + logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!") + self.checked[k] = True + rq.finish_runqueue(True) + bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration) + + # The free inodes, integer number + freeInode = st.f_favail + + if minInode and freeInode < minInode: + # Some filesystems use dynamic inodes so can't run out + # (e.g. btrfs). This is reported by the inode count being 0. + if st.f_files == 0: + self.devDict[k][2] = None + continue + # Always show warning, the self.checked would always be False if the action is WARN + if self.preFreeI[k] == 0 or self.preFreeI[k] - freeInode > self.inodeInterval and not self.checked[k]: + logger.warning("The free inode of %s (%s) is running low (%.3fK left)" % \ + (path, dev, freeInode / 1024.0)) + self.preFreeI[k] = freeInode + + if action == "STOPTASKS" and not self.checked[k]: + logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!") + self.checked[k] = True + rq.finish_runqueue(False) + bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration) + elif action == "ABORT" and not self.checked[k]: + logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!") + self.checked[k] = True + rq.finish_runqueue(True) + bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration) + + bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration) + return diff --git a/poky/bitbake/lib/bb/msg.py b/poky/bitbake/lib/bb/msg.py new file mode 100644 index 0000000000..f1723be797 --- /dev/null +++ b/poky/bitbake/lib/bb/msg.py @@ -0,0 +1,225 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'msg' implementation + +Message handling infrastructure for bitbake + +""" + +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import sys +import copy +import logging +import collections +from itertools import groupby +import warnings +import bb +import bb.event + +class BBLogFormatter(logging.Formatter): + """Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is""" + + DEBUG3 = logging.DEBUG - 2 + DEBUG2 = logging.DEBUG - 1 + DEBUG = logging.DEBUG + VERBOSE = logging.INFO - 1 + NOTE = logging.INFO + PLAIN = logging.INFO + 1 + ERROR = logging.ERROR + WARNING = logging.WARNING + CRITICAL = logging.CRITICAL + + levelnames = { + DEBUG3 : 'DEBUG', + DEBUG2 : 'DEBUG', + DEBUG : 'DEBUG', + VERBOSE: 'NOTE', + NOTE : 'NOTE', + PLAIN : '', + WARNING : 'WARNING', + ERROR : 'ERROR', + CRITICAL: 'ERROR', + } + + color_enabled = False + BASECOLOR, BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(29,38)) + + COLORS = { + DEBUG3 : CYAN, + DEBUG2 : CYAN, + DEBUG : CYAN, + VERBOSE : BASECOLOR, + NOTE : BASECOLOR, + PLAIN : BASECOLOR, + WARNING : YELLOW, + ERROR : RED, + CRITICAL: RED, + } + + BLD = '\033[1;%dm' + STD = '\033[%dm' + RST = '\033[0m' + + def getLevelName(self, levelno): + try: + return self.levelnames[levelno] + except KeyError: + self.levelnames[levelno] = value = 'Level %d' % levelno + return value + + def format(self, record): + record.levelname = self.getLevelName(record.levelno) + if record.levelno == self.PLAIN: + msg = record.getMessage() + else: + if self.color_enabled: + record = self.colorize(record) + msg = logging.Formatter.format(self, record) + if hasattr(record, 'bb_exc_formatted'): + msg += '\n' + ''.join(record.bb_exc_formatted) + elif hasattr(record, 'bb_exc_info'): + etype, value, tb = record.bb_exc_info + formatted = bb.exceptions.format_exception(etype, value, tb, limit=5) + msg += '\n' + ''.join(formatted) + return msg + + def colorize(self, record): + color = self.COLORS[record.levelno] + if self.color_enabled and color is not None: + record = copy.copy(record) + record.levelname = "".join([self.BLD % color, record.levelname, self.RST]) + record.msg = "".join([self.STD % color, record.msg, self.RST]) + return record + + def enable_color(self): + self.color_enabled = True + +class BBLogFilter(object): + def __init__(self, handler, level, debug_domains): + self.stdlevel = level + self.debug_domains = debug_domains + loglevel = level + for domain in debug_domains: + if debug_domains[domain] < loglevel: + loglevel = debug_domains[domain] + handler.setLevel(loglevel) + handler.addFilter(self) + + def filter(self, record): + if record.levelno >= self.stdlevel: + return True + if record.name in self.debug_domains and record.levelno >= self.debug_domains[record.name]: + return True + return False + +class BBLogFilterStdErr(BBLogFilter): + def filter(self, record): + if not BBLogFilter.filter(self, record): + return False + if record.levelno >= logging.ERROR: + return True + return False + +class BBLogFilterStdOut(BBLogFilter): + def filter(self, record): + if not BBLogFilter.filter(self, record): + return False + if record.levelno < logging.ERROR: + return True + return False + +# Message control functions +# + +loggerDefaultDebugLevel = 0 +loggerDefaultVerbose = False +loggerVerboseLogs = False +loggerDefaultDomains = [] + +def init_msgconfig(verbose, debug, debug_domains=None): + """ + Set default verbosity and debug levels config the logger + """ + bb.msg.loggerDefaultDebugLevel = debug + bb.msg.loggerDefaultVerbose = verbose + if verbose: + bb.msg.loggerVerboseLogs = True + if debug_domains: + bb.msg.loggerDefaultDomains = debug_domains + else: + bb.msg.loggerDefaultDomains = [] + +def constructLogOptions(): + debug = loggerDefaultDebugLevel + verbose = loggerDefaultVerbose + domains = loggerDefaultDomains + + if debug: + level = BBLogFormatter.DEBUG - debug + 1 + elif verbose: + level = BBLogFormatter.VERBOSE + else: + level = BBLogFormatter.NOTE + + debug_domains = {} + for (domainarg, iterator) in groupby(domains): + dlevel = len(tuple(iterator)) + debug_domains["BitBake.%s" % domainarg] = logging.DEBUG - dlevel + 1 + return level, debug_domains + +def addDefaultlogFilter(handler, cls = BBLogFilter, forcelevel=None): + level, debug_domains = constructLogOptions() + + if forcelevel is not None: + level = forcelevel + + cls(handler, level, debug_domains) + +# +# Message handling functions +# + +def fatal(msgdomain, msg): + if msgdomain: + logger = logging.getLogger("BitBake.%s" % msgdomain) + else: + logger = logging.getLogger("BitBake") + logger.critical(msg) + sys.exit(1) + +def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'): + """Standalone logger creation function""" + logger = logging.getLogger(name) + console = logging.StreamHandler(output) + format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s") + if color == 'always' or (color == 'auto' and output.isatty()): + format.enable_color() + console.setFormatter(format) + if preserve_handlers: + logger.addHandler(console) + else: + logger.handlers = [console] + logger.setLevel(level) + return logger + +def has_console_handler(logger): + for handler in logger.handlers: + if isinstance(handler, logging.StreamHandler): + if handler.stream in [sys.stderr, sys.stdout]: + return True + return False diff --git a/poky/bitbake/lib/bb/namedtuple_with_abc.py b/poky/bitbake/lib/bb/namedtuple_with_abc.py new file mode 100644 index 0000000000..32f2fc642c --- /dev/null +++ b/poky/bitbake/lib/bb/namedtuple_with_abc.py @@ -0,0 +1,255 @@ +# http://code.activestate.com/recipes/577629-namedtupleabc-abstract-base-class-mix-in-for-named/ +#!/usr/bin/env python +# Copyright (c) 2011 Jan Kaliszewski (zuo). Available under the MIT License. + +""" +namedtuple_with_abc.py: +* named tuple mix-in + ABC (abstract base class) recipe, +* works under Python 2.6, 2.7 as well as 3.x. + +Import this module to patch collections.namedtuple() factory function +-- enriching it with the 'abc' attribute (an abstract base class + mix-in +for named tuples) and decorating it with a wrapper that registers each +newly created named tuple as a subclass of namedtuple.abc. + +How to import: + import collections, namedtuple_with_abc +or: + import namedtuple_with_abc + from collections import namedtuple + # ^ in this variant you must import namedtuple function + # *after* importing namedtuple_with_abc module +or simply: + from namedtuple_with_abc import namedtuple + +Simple usage example: + class Credentials(namedtuple.abc): + _fields = 'username password' + def __str__(self): + return ('{0.__class__.__name__}' + '(username={0.username}, password=...)'.format(self)) + print(Credentials("alice", "Alice's password")) + +For more advanced examples -- see below the "if __name__ == '__main__':". +""" + +import collections +from abc import ABCMeta, abstractproperty +from functools import wraps +from sys import version_info + +__all__ = ('namedtuple',) +_namedtuple = collections.namedtuple + + +class _NamedTupleABCMeta(ABCMeta): + '''The metaclass for the abstract base class + mix-in for named tuples.''' + def __new__(mcls, name, bases, namespace): + fields = namespace.get('_fields') + for base in bases: + if fields is not None: + break + fields = getattr(base, '_fields', None) + if not isinstance(fields, abstractproperty): + basetuple = _namedtuple(name, fields) + bases = (basetuple,) + bases + namespace.pop('_fields', None) + namespace.setdefault('__doc__', basetuple.__doc__) + namespace.setdefault('__slots__', ()) + return ABCMeta.__new__(mcls, name, bases, namespace) + + +exec( + # Python 2.x metaclass declaration syntax + """class _NamedTupleABC(object): + '''The abstract base class + mix-in for named tuples.''' + __metaclass__ = _NamedTupleABCMeta + _fields = abstractproperty()""" if version_info[0] < 3 else + # Python 3.x metaclass declaration syntax + """class _NamedTupleABC(metaclass=_NamedTupleABCMeta): + '''The abstract base class + mix-in for named tuples.''' + _fields = abstractproperty()""" +) + + +_namedtuple.abc = _NamedTupleABC +#_NamedTupleABC.register(type(version_info)) # (and similar, in the future...) + +@wraps(_namedtuple) +def namedtuple(*args, **kwargs): + '''Named tuple factory with namedtuple.abc subclass registration.''' + cls = _namedtuple(*args, **kwargs) + _NamedTupleABC.register(cls) + return cls + +collections.namedtuple = namedtuple + + + + +if __name__ == '__main__': + + '''Examples and explanations''' + + # Simple usage + + class MyRecord(namedtuple.abc): + _fields = 'x y z' # such form will be transformed into ('x', 'y', 'z') + def _my_custom_method(self): + return list(self._asdict().items()) + # (the '_fields' attribute belongs to the named tuple public API anyway) + + rec = MyRecord(1, 2, 3) + print(rec) + print(rec._my_custom_method()) + print(rec._replace(y=222)) + print(rec._replace(y=222)._my_custom_method()) + + # Custom abstract classes... + + class MyAbstractRecord(namedtuple.abc): + def _my_custom_method(self): + return list(self._asdict().items()) + + try: + MyAbstractRecord() # (abstract classes cannot be instantiated) + except TypeError as exc: + print(exc) + + class AnotherAbstractRecord(MyAbstractRecord): + def __str__(self): + return '<<<{0}>>>'.format(super(AnotherAbstractRecord, + self).__str__()) + + # ...and their non-abstract subclasses + + class MyRecord2(MyAbstractRecord): + _fields = 'a, b' + + class MyRecord3(AnotherAbstractRecord): + _fields = 'p', 'q', 'r' + + rec2 = MyRecord2('foo', 'bar') + print(rec2) + print(rec2._my_custom_method()) + print(rec2._replace(b=222)) + print(rec2._replace(b=222)._my_custom_method()) + + rec3 = MyRecord3('foo', 'bar', 'baz') + print(rec3) + print(rec3._my_custom_method()) + print(rec3._replace(q=222)) + print(rec3._replace(q=222)._my_custom_method()) + + # You can also subclass non-abstract ones... + + class MyRecord33(MyRecord3): + def __str__(self): + return '< {0!r}, ..., {0!r} >'.format(self.p, self.r) + + rec33 = MyRecord33('foo', 'bar', 'baz') + print(rec33) + print(rec33._my_custom_method()) + print(rec33._replace(q=222)) + print(rec33._replace(q=222)._my_custom_method()) + + # ...and even override the magic '_fields' attribute again + + class MyRecord345(MyRecord3): + _fields = 'e f g h i j k' + + rec345 = MyRecord345(1, 2, 3, 4, 3, 2, 1) + print(rec345) + print(rec345._my_custom_method()) + print(rec345._replace(f=222)) + print(rec345._replace(f=222)._my_custom_method()) + + # Mixing-in some other classes is also possible: + + class MyMixIn(object): + def method(self): + return "MyMixIn.method() called" + def _my_custom_method(self): + return "MyMixIn._my_custom_method() called" + def count(self, item): + return "MyMixIn.count({0}) called".format(item) + def _asdict(self): # (cannot override a namedtuple method, see below) + return "MyMixIn._asdict() called" + + class MyRecord4(MyRecord33, MyMixIn): # mix-in on the right + _fields = 'j k l x' + + class MyRecord5(MyMixIn, MyRecord33): # mix-in on the left + _fields = 'j k l x y' + + rec4 = MyRecord4(1, 2, 3, 2) + print(rec4) + print(rec4.method()) + print(rec4._my_custom_method()) # MyRecord33's + print(rec4.count(2)) # tuple's + print(rec4._replace(k=222)) + print(rec4._replace(k=222).method()) + print(rec4._replace(k=222)._my_custom_method()) # MyRecord33's + print(rec4._replace(k=222).count(8)) # tuple's + + rec5 = MyRecord5(1, 2, 3, 2, 1) + print(rec5) + print(rec5.method()) + print(rec5._my_custom_method()) # MyMixIn's + print(rec5.count(2)) # MyMixIn's + print(rec5._replace(k=222)) + print(rec5._replace(k=222).method()) + print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's + print(rec5._replace(k=222).count(2)) # MyMixIn's + + # Note that behavior: the standard namedtuple methods cannot be + # overridden by a foreign mix-in -- even if the mix-in is declared + # as the leftmost base class (but, obviously, you can override them + # in the defined class or its subclasses): + + print(rec4._asdict()) # (returns a dict, not "MyMixIn._asdict() called") + print(rec5._asdict()) # (returns a dict, not "MyMixIn._asdict() called") + + class MyRecord6(MyRecord33): + _fields = 'j k l x y z' + def _asdict(self): + return "MyRecord6._asdict() called" + rec6 = MyRecord6(1, 2, 3, 1, 2, 3) + print(rec6._asdict()) # (this returns "MyRecord6._asdict() called") + + # All that record classes are real subclasses of namedtuple.abc: + + assert issubclass(MyRecord, namedtuple.abc) + assert issubclass(MyAbstractRecord, namedtuple.abc) + assert issubclass(AnotherAbstractRecord, namedtuple.abc) + assert issubclass(MyRecord2, namedtuple.abc) + assert issubclass(MyRecord3, namedtuple.abc) + assert issubclass(MyRecord33, namedtuple.abc) + assert issubclass(MyRecord345, namedtuple.abc) + assert issubclass(MyRecord4, namedtuple.abc) + assert issubclass(MyRecord5, namedtuple.abc) + assert issubclass(MyRecord6, namedtuple.abc) + + # ...but abstract ones are not subclasses of tuple + # (and this is what you probably want): + + assert not issubclass(MyAbstractRecord, tuple) + assert not issubclass(AnotherAbstractRecord, tuple) + + assert issubclass(MyRecord, tuple) + assert issubclass(MyRecord2, tuple) + assert issubclass(MyRecord3, tuple) + assert issubclass(MyRecord33, tuple) + assert issubclass(MyRecord345, tuple) + assert issubclass(MyRecord4, tuple) + assert issubclass(MyRecord5, tuple) + assert issubclass(MyRecord6, tuple) + + # Named tuple classes created with namedtuple() factory function + # (in the "traditional" way) are registered as "virtual" subclasses + # of namedtuple.abc: + + MyTuple = namedtuple('MyTuple', 'a b c') + mt = MyTuple(1, 2, 3) + assert issubclass(MyTuple, namedtuple.abc) + assert isinstance(mt, namedtuple.abc) diff --git a/poky/bitbake/lib/bb/parse/__init__.py b/poky/bitbake/lib/bb/parse/__init__.py new file mode 100644 index 0000000000..5397d57a51 --- /dev/null +++ b/poky/bitbake/lib/bb/parse/__init__.py @@ -0,0 +1,175 @@ +""" +BitBake Parsers + +File parsers for the BitBake build tools. + +""" + + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +handlers = [] + +import errno +import logging +import os +import stat +import bb +import bb.utils +import bb.siggen + +logger = logging.getLogger("BitBake.Parsing") + +class ParseError(Exception): + """Exception raised when parsing fails""" + def __init__(self, msg, filename, lineno=0): + self.msg = msg + self.filename = filename + self.lineno = lineno + Exception.__init__(self, msg, filename, lineno) + + def __str__(self): + if self.lineno: + return "ParseError at %s:%d: %s" % (self.filename, self.lineno, self.msg) + else: + return "ParseError in %s: %s" % (self.filename, self.msg) + +class SkipRecipe(Exception): + """Exception raised to skip this recipe""" + +class SkipPackage(SkipRecipe): + """Exception raised to skip this recipe (use SkipRecipe in new code)""" + +__mtime_cache = {} +def cached_mtime(f): + if f not in __mtime_cache: + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + return __mtime_cache[f] + +def cached_mtime_noerror(f): + if f not in __mtime_cache: + try: + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + except OSError: + return 0 + return __mtime_cache[f] + +def update_mtime(f): + try: + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + except OSError: + if f in __mtime_cache: + del __mtime_cache[f] + return 0 + return __mtime_cache[f] + +def update_cache(f): + if f in __mtime_cache: + logger.debug(1, "Updating mtime cache for %s" % f) + update_mtime(f) + +def clear_cache(): + global __mtime_cache + __mtime_cache = {} + +def mark_dependency(d, f): + if f.startswith('./'): + f = "%s/%s" % (os.getcwd(), f[2:]) + deps = (d.getVar('__depends', False) or []) + s = (f, cached_mtime_noerror(f)) + if s not in deps: + deps.append(s) + d.setVar('__depends', deps) + +def check_dependency(d, f): + s = (f, cached_mtime_noerror(f)) + deps = (d.getVar('__depends', False) or []) + return s in deps + +def supports(fn, data): + """Returns true if we have a handler for this file, false otherwise""" + for h in handlers: + if h['supports'](fn, data): + return 1 + return 0 + +def handle(fn, data, include = 0): + """Call the handler that is appropriate for this file""" + for h in handlers: + if h['supports'](fn, data): + with data.inchistory.include(fn): + return h['handle'](fn, data, include) + raise ParseError("not a BitBake file", fn) + +def init(fn, data): + for h in handlers: + if h['supports'](fn): + return h['init'](data) + +def init_parser(d): + bb.parse.siggen = bb.siggen.init(d) + +def resolve_file(fn, d): + if not os.path.isabs(fn): + bbpath = d.getVar("BBPATH") + newfn, attempts = bb.utils.which(bbpath, fn, history=True) + for af in attempts: + mark_dependency(d, af) + if not newfn: + raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath)) + fn = newfn + else: + mark_dependency(d, fn) + + if not os.path.isfile(fn): + raise IOError(errno.ENOENT, "file %s not found" % fn) + + return fn + +# Used by OpenEmbedded metadata +__pkgsplit_cache__={} +def vars_from_file(mypkg, d): + if not mypkg or not mypkg.endswith((".bb", ".bbappend")): + return (None, None, None) + if mypkg in __pkgsplit_cache__: + return __pkgsplit_cache__[mypkg] + + myfile = os.path.splitext(os.path.basename(mypkg)) + parts = myfile[0].split('_') + __pkgsplit_cache__[mypkg] = parts + if len(parts) > 3: + raise ParseError("Unable to generate default variables from filename (too many underscores)", mypkg) + exp = 3 - len(parts) + tmplist = [] + while exp != 0: + exp -= 1 + tmplist.append(None) + parts.extend(tmplist) + return parts + +def get_file_depends(d): + '''Return the dependent files''' + dep_files = [] + depends = d.getVar('__base_depends', False) or [] + depends = depends + (d.getVar('__depends', False) or []) + for (fn, _) in depends: + dep_files.append(os.path.abspath(fn)) + return " ".join(dep_files) + +from bb.parse.parse_py import __version__, ConfHandler, BBHandler diff --git a/poky/bitbake/lib/bb/parse/ast.py b/poky/bitbake/lib/bb/parse/ast.py new file mode 100644 index 0000000000..6690dc51c2 --- /dev/null +++ b/poky/bitbake/lib/bb/parse/ast.py @@ -0,0 +1,442 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" + AbstractSyntaxTree classes for the Bitbake language +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2009 Holger Hans Peter Freyther +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +import re +import string +import logging +import bb +import itertools +from bb import methodpool +from bb.parse import logger + +class StatementGroup(list): + def eval(self, data): + for statement in self: + statement.eval(data) + +class AstNode(object): + def __init__(self, filename, lineno): + self.filename = filename + self.lineno = lineno + +class IncludeNode(AstNode): + def __init__(self, filename, lineno, what_file, force): + AstNode.__init__(self, filename, lineno) + self.what_file = what_file + self.force = force + + def eval(self, data): + """ + Include the file and evaluate the statements + """ + s = data.expand(self.what_file) + logger.debug(2, "CONF %s:%s: including %s", self.filename, self.lineno, s) + + # TODO: Cache those includes... maybe not here though + if self.force: + bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, "include required") + else: + bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False) + +class ExportNode(AstNode): + def __init__(self, filename, lineno, var): + AstNode.__init__(self, filename, lineno) + self.var = var + + def eval(self, data): + data.setVarFlag(self.var, "export", 1, op = 'exported') + +class UnsetNode(AstNode): + def __init__(self, filename, lineno, var): + AstNode.__init__(self, filename, lineno) + self.var = var + + def eval(self, data): + loginfo = { + 'variable': self.var, + 'file': self.filename, + 'line': self.lineno, + } + data.delVar(self.var,**loginfo) + +class UnsetFlagNode(AstNode): + def __init__(self, filename, lineno, var, flag): + AstNode.__init__(self, filename, lineno) + self.var = var + self.flag = flag + + def eval(self, data): + loginfo = { + 'variable': self.var, + 'file': self.filename, + 'line': self.lineno, + } + data.delVarFlag(self.var, self.flag, **loginfo) + +class DataNode(AstNode): + """ + Various data related updates. For the sake of sanity + we have one class doing all this. This means that all + this need to be re-evaluated... we might be able to do + that faster with multiple classes. + """ + def __init__(self, filename, lineno, groupd): + AstNode.__init__(self, filename, lineno) + self.groupd = groupd + + def getFunc(self, key, data): + if 'flag' in self.groupd and self.groupd['flag'] != None: + return data.getVarFlag(key, self.groupd['flag'], expand=False, noweakdefault=True) + else: + return data.getVar(key, False, noweakdefault=True, parsing=True) + + def eval(self, data): + groupd = self.groupd + key = groupd["var"] + loginfo = { + 'variable': key, + 'file': self.filename, + 'line': self.lineno, + } + if "exp" in groupd and groupd["exp"] != None: + data.setVarFlag(key, "export", 1, op = 'exported', **loginfo) + + op = "set" + if "ques" in groupd and groupd["ques"] != None: + val = self.getFunc(key, data) + op = "set?" + if val == None: + val = groupd["value"] + elif "colon" in groupd and groupd["colon"] != None: + e = data.createCopy() + op = "immediate" + val = e.expand(groupd["value"], key + "[:=]") + elif "append" in groupd and groupd["append"] != None: + op = "append" + val = "%s %s" % ((self.getFunc(key, data) or ""), groupd["value"]) + elif "prepend" in groupd and groupd["prepend"] != None: + op = "prepend" + val = "%s %s" % (groupd["value"], (self.getFunc(key, data) or "")) + elif "postdot" in groupd and groupd["postdot"] != None: + op = "postdot" + val = "%s%s" % ((self.getFunc(key, data) or ""), groupd["value"]) + elif "predot" in groupd and groupd["predot"] != None: + op = "predot" + val = "%s%s" % (groupd["value"], (self.getFunc(key, data) or "")) + else: + val = groupd["value"] + + flag = None + if 'flag' in groupd and groupd['flag'] != None: + flag = groupd['flag'] + elif groupd["lazyques"]: + flag = "_defaultval" + + loginfo['op'] = op + loginfo['detail'] = groupd["value"] + + if flag: + data.setVarFlag(key, flag, val, **loginfo) + else: + data.setVar(key, val, parsing=True, **loginfo) + +class MethodNode(AstNode): + tr_tbl = str.maketrans('/.+-@%&', '_______') + + def __init__(self, filename, lineno, func_name, body, python, fakeroot): + AstNode.__init__(self, filename, lineno) + self.func_name = func_name + self.body = body + self.python = python + self.fakeroot = fakeroot + + def eval(self, data): + text = '\n'.join(self.body) + funcname = self.func_name + if self.func_name == "__anonymous": + funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(MethodNode.tr_tbl))) + self.python = True + text = "def %s(d):\n" % (funcname) + text + bb.methodpool.insert_method(funcname, text, self.filename, self.lineno - len(self.body)) + anonfuncs = data.getVar('__BBANONFUNCS', False) or [] + anonfuncs.append(funcname) + data.setVar('__BBANONFUNCS', anonfuncs) + if data.getVar(funcname, False): + # clean up old version of this piece of metadata, as its + # flags could cause problems + data.delVarFlag(funcname, 'python') + data.delVarFlag(funcname, 'fakeroot') + if self.python: + data.setVarFlag(funcname, "python", "1") + if self.fakeroot: + data.setVarFlag(funcname, "fakeroot", "1") + data.setVarFlag(funcname, "func", 1) + data.setVar(funcname, text, parsing=True) + data.setVarFlag(funcname, 'filename', self.filename) + data.setVarFlag(funcname, 'lineno', str(self.lineno - len(self.body))) + +class PythonMethodNode(AstNode): + def __init__(self, filename, lineno, function, modulename, body): + AstNode.__init__(self, filename, lineno) + self.function = function + self.modulename = modulename + self.body = body + + def eval(self, data): + # Note we will add root to parsedmethods after having parse + # 'this' file. This means we will not parse methods from + # bb classes twice + text = '\n'.join(self.body) + bb.methodpool.insert_method(self.modulename, text, self.filename, self.lineno - len(self.body) - 1) + data.setVarFlag(self.function, "func", 1) + data.setVarFlag(self.function, "python", 1) + data.setVar(self.function, text, parsing=True) + data.setVarFlag(self.function, 'filename', self.filename) + data.setVarFlag(self.function, 'lineno', str(self.lineno - len(self.body) - 1)) + +class ExportFuncsNode(AstNode): + def __init__(self, filename, lineno, fns, classname): + AstNode.__init__(self, filename, lineno) + self.n = fns.split() + self.classname = classname + + def eval(self, data): + + for func in self.n: + calledfunc = self.classname + "_" + func + + if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False): + continue + + if data.getVar(func, False): + data.setVarFlag(func, 'python', None) + data.setVarFlag(func, 'func', None) + + for flag in [ "func", "python" ]: + if data.getVarFlag(calledfunc, flag, False): + data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False)) + for flag in [ "dirs" ]: + if data.getVarFlag(func, flag, False): + data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False)) + data.setVarFlag(func, "filename", "autogenerated") + data.setVarFlag(func, "lineno", 1) + + if data.getVarFlag(calledfunc, "python", False): + data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True) + else: + if "-" in self.classname: + bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc)) + data.setVar(func, " " + calledfunc + "\n", parsing=True) + data.setVarFlag(func, 'export_func', '1') + +class AddTaskNode(AstNode): + def __init__(self, filename, lineno, func, before, after): + AstNode.__init__(self, filename, lineno) + self.func = func + self.before = before + self.after = after + + def eval(self, data): + bb.build.addtask(self.func, self.before, self.after, data) + +class DelTaskNode(AstNode): + def __init__(self, filename, lineno, func): + AstNode.__init__(self, filename, lineno) + self.func = func + + def eval(self, data): + bb.build.deltask(self.func, data) + +class BBHandlerNode(AstNode): + def __init__(self, filename, lineno, fns): + AstNode.__init__(self, filename, lineno) + self.hs = fns.split() + + def eval(self, data): + bbhands = data.getVar('__BBHANDLERS', False) or [] + for h in self.hs: + bbhands.append(h) + data.setVarFlag(h, "handler", 1) + data.setVar('__BBHANDLERS', bbhands) + +class InheritNode(AstNode): + def __init__(self, filename, lineno, classes): + AstNode.__init__(self, filename, lineno) + self.classes = classes + + def eval(self, data): + bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data) + +def handleInclude(statements, filename, lineno, m, force): + statements.append(IncludeNode(filename, lineno, m.group(1), force)) + +def handleExport(statements, filename, lineno, m): + statements.append(ExportNode(filename, lineno, m.group(1))) + +def handleUnset(statements, filename, lineno, m): + statements.append(UnsetNode(filename, lineno, m.group(1))) + +def handleUnsetFlag(statements, filename, lineno, m): + statements.append(UnsetFlagNode(filename, lineno, m.group(1), m.group(2))) + +def handleData(statements, filename, lineno, groupd): + statements.append(DataNode(filename, lineno, groupd)) + +def handleMethod(statements, filename, lineno, func_name, body, python, fakeroot): + statements.append(MethodNode(filename, lineno, func_name, body, python, fakeroot)) + +def handlePythonMethod(statements, filename, lineno, funcname, modulename, body): + statements.append(PythonMethodNode(filename, lineno, funcname, modulename, body)) + +def handleExportFuncs(statements, filename, lineno, m, classname): + statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname)) + +def handleAddTask(statements, filename, lineno, m): + func = m.group("func") + before = m.group("before") + after = m.group("after") + if func is None: + return + + statements.append(AddTaskNode(filename, lineno, func, before, after)) + +def handleDelTask(statements, filename, lineno, m): + func = m.group("func") + if func is None: + return + + statements.append(DelTaskNode(filename, lineno, func)) + +def handleBBHandlers(statements, filename, lineno, m): + statements.append(BBHandlerNode(filename, lineno, m.group(1))) + +def handleInherit(statements, filename, lineno, m): + classes = m.group(1) + statements.append(InheritNode(filename, lineno, classes)) + +def runAnonFuncs(d): + code = [] + for funcname in d.getVar("__BBANONFUNCS", False) or []: + code.append("%s(d)" % funcname) + bb.utils.better_exec("\n".join(code), {"d": d}) + +def finalize(fn, d, variant = None): + saved_handlers = bb.event.get_handlers().copy() + + for var in d.getVar('__BBHANDLERS', False) or []: + # try to add the handler + handlerfn = d.getVarFlag(var, "filename", False) + if not handlerfn: + bb.fatal("Undefined event handler function '%s'" % var) + handlerln = int(d.getVarFlag(var, "lineno", False)) + bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln) + + bb.event.fire(bb.event.RecipePreFinalise(fn), d) + + bb.data.expandKeys(d) + runAnonFuncs(d) + + tasklist = d.getVar('__BBTASKS', False) or [] + bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d) + bb.build.add_tasks(tasklist, d) + + bb.parse.siggen.finalise(fn, d, variant) + + d.setVar('BBINCLUDED', bb.parse.get_file_depends(d)) + + bb.event.fire(bb.event.RecipeParsed(fn), d) + bb.event.set_handlers(saved_handlers) + +def _create_variants(datastores, names, function, onlyfinalise): + def create_variant(name, orig_d, arg = None): + if onlyfinalise and name not in onlyfinalise: + return + new_d = bb.data.createCopy(orig_d) + function(arg or name, new_d) + datastores[name] = new_d + + for variant in list(datastores.keys()): + for name in names: + if not variant: + # Based on main recipe + create_variant(name, datastores[""]) + else: + create_variant("%s-%s" % (variant, name), datastores[variant], name) + +def multi_finalize(fn, d): + appends = (d.getVar("__BBAPPEND") or "").split() + for append in appends: + logger.debug(1, "Appending .bbappend file %s to %s", append, fn) + bb.parse.BBHandler.handle(append, d, True) + + onlyfinalise = d.getVar("__ONLYFINALISE", False) + + safe_d = d + d = bb.data.createCopy(safe_d) + try: + finalize(fn, d) + except bb.parse.SkipRecipe as e: + d.setVar("__SKIPPED", e.args[0]) + datastores = {"": safe_d} + + extended = d.getVar("BBCLASSEXTEND") or "" + if extended: + # the following is to support bbextends with arguments, for e.g. multilib + # an example is as follows: + # BBCLASSEXTEND = "multilib:lib32" + # it will create foo-lib32, inheriting multilib.bbclass and set + # BBEXTENDCURR to "multilib" and BBEXTENDVARIANT to "lib32" + extendedmap = {} + variantmap = {} + + for ext in extended.split(): + eext = ext.split(':', 2) + if len(eext) > 1: + extendedmap[ext] = eext[0] + variantmap[ext] = eext[1] + else: + extendedmap[ext] = ext + + pn = d.getVar("PN") + def extendfunc(name, d): + if name != extendedmap[name]: + d.setVar("BBEXTENDCURR", extendedmap[name]) + d.setVar("BBEXTENDVARIANT", variantmap[name]) + else: + d.setVar("PN", "%s-%s" % (pn, name)) + bb.parse.BBHandler.inherit(extendedmap[name], fn, 0, d) + + safe_d.setVar("BBCLASSEXTEND", extended) + _create_variants(datastores, extendedmap.keys(), extendfunc, onlyfinalise) + + for variant in datastores.keys(): + if variant: + try: + if not onlyfinalise or variant in onlyfinalise: + finalize(fn, datastores[variant], variant) + except bb.parse.SkipRecipe as e: + datastores[variant].setVar("__SKIPPED", e.args[0]) + + datastores[""] = d + return datastores diff --git a/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py b/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py new file mode 100644 index 0000000000..e5039e3bd1 --- /dev/null +++ b/poky/bitbake/lib/bb/parse/parse_py/BBHandler.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" + class for handling .bb files + + Reads a .bb file and obtains its metadata + +""" + + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +import re, bb, os +import logging +import bb.build, bb.utils +from bb import data + +from . import ConfHandler +from .. import resolve_file, ast, logger, ParseError +from .ConfHandler import include, init + +# For compatibility +bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"]) + +__func_start_regexp__ = re.compile( r"(((?Ppython)|(?Pfakeroot))\s*)*(?P[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" ) +__inherit_regexp__ = re.compile( r"inherit\s+(.+)" ) +__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" ) +__addtask_regexp__ = re.compile("addtask\s+(?P\w+)\s*((before\s*(?P((.*(?=after))|(.*))))|(after\s*(?P((.*(?=before))|(.*)))))*") +__deltask_regexp__ = re.compile("deltask\s+(?P\w+)") +__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" ) +__def_regexp__ = re.compile( r"def\s+(\w+).*:" ) +__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" ) + +__infunc__ = [] +__inpython__ = False +__body__ = [] +__classname__ = "" + +cached_statements = {} + +def supports(fn, d): + """Return True if fn has a supported extension""" + return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"] + +def inherit(files, fn, lineno, d): + __inherit_cache = d.getVar('__inherit_cache', False) or [] + files = d.expand(files).split() + for file in files: + if not os.path.isabs(file) and not file.endswith(".bbclass"): + file = os.path.join('classes', '%s.bbclass' % file) + + if not os.path.isabs(file): + bbpath = d.getVar("BBPATH") + abs_fn, attempts = bb.utils.which(bbpath, file, history=True) + for af in attempts: + if af != abs_fn: + bb.parse.mark_dependency(d, af) + if abs_fn: + file = abs_fn + + if not file in __inherit_cache: + logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno)) + __inherit_cache.append( file ) + d.setVar('__inherit_cache', __inherit_cache) + include(fn, file, lineno, d, "inherit") + __inherit_cache = d.getVar('__inherit_cache', False) or [] + +def get_statements(filename, absolute_filename, base_name): + global cached_statements + + try: + return cached_statements[absolute_filename] + except KeyError: + with open(absolute_filename, 'r') as f: + statements = ast.StatementGroup() + + lineno = 0 + while True: + lineno = lineno + 1 + s = f.readline() + if not s: break + s = s.rstrip() + feeder(lineno, s, filename, base_name, statements) + + if __inpython__: + # add a blank line to close out any python definition + feeder(lineno, "", filename, base_name, statements, eof=True) + + if filename.endswith(".bbclass") or filename.endswith(".inc"): + cached_statements[absolute_filename] = statements + return statements + +def handle(fn, d, include): + global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__ + __body__ = [] + __infunc__ = [] + __classname__ = "" + __residue__ = [] + + base_name = os.path.basename(fn) + (root, ext) = os.path.splitext(base_name) + init(d) + + if ext == ".bbclass": + __classname__ = root + __inherit_cache = d.getVar('__inherit_cache', False) or [] + if not fn in __inherit_cache: + __inherit_cache.append(fn) + d.setVar('__inherit_cache', __inherit_cache) + + if include != 0: + oldfile = d.getVar('FILE', False) + else: + oldfile = None + + abs_fn = resolve_file(fn, d) + + # actual loading + statements = get_statements(fn, abs_fn, base_name) + + # DONE WITH PARSING... time to evaluate + if ext != ".bbclass" and abs_fn != oldfile: + d.setVar('FILE', abs_fn) + + try: + statements.eval(d) + except bb.parse.SkipRecipe: + d.setVar("__SKIPPED", True) + if include == 0: + return { "" : d } + + if __infunc__: + raise ParseError("Shell function %s is never closed" % __infunc__[0], __infunc__[1], __infunc__[2]) + if __residue__: + raise ParseError("Leftover unparsed (incomplete?) data %s from %s" % __residue__, fn) + + if ext != ".bbclass" and include == 0: + return ast.multi_finalize(fn, d) + + if ext != ".bbclass" and oldfile and abs_fn != oldfile: + d.setVar("FILE", oldfile) + + return d + +def feeder(lineno, s, fn, root, statements, eof=False): + global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__ + if __infunc__: + if s == '}': + __body__.append('') + ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4]) + __infunc__ = [] + __body__ = [] + else: + __body__.append(s) + return + + if __inpython__: + m = __python_func_regexp__.match(s) + if m and not eof: + __body__.append(s) + return + else: + ast.handlePythonMethod(statements, fn, lineno, __inpython__, + root, __body__) + __body__ = [] + __inpython__ = False + + if eof: + return + + if s and s[0] == '#': + if len(__residue__) != 0 and __residue__[0][0] != "#": + bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s)) + + if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"): + bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s)) + + if s and s[-1] == '\\': + __residue__.append(s[:-1]) + return + + s = "".join(__residue__) + s + __residue__ = [] + + # Skip empty lines + if s == '': + return + + # Skip comments + if s[0] == '#': + return + + m = __func_start_regexp__.match(s) + if m: + __infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None] + return + + m = __def_regexp__.match(s) + if m: + __body__.append(s) + __inpython__ = m.group(1) + + return + + m = __export_func_regexp__.match(s) + if m: + ast.handleExportFuncs(statements, fn, lineno, m, __classname__) + return + + m = __addtask_regexp__.match(s) + if m: + ast.handleAddTask(statements, fn, lineno, m) + return + + m = __deltask_regexp__.match(s) + if m: + ast.handleDelTask(statements, fn, lineno, m) + return + + m = __addhandler_regexp__.match(s) + if m: + ast.handleBBHandlers(statements, fn, lineno, m) + return + + m = __inherit_regexp__.match(s) + if m: + ast.handleInherit(statements, fn, lineno, m) + return + + return ConfHandler.feeder(lineno, s, fn, statements) + +# Add us to the handlers list +from .. import handlers +handlers.append({'supports': supports, 'handle': handle, 'init': init}) +del handlers diff --git a/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py new file mode 100644 index 0000000000..9d3ebe16f4 --- /dev/null +++ b/poky/bitbake/lib/bb/parse/parse_py/ConfHandler.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" + class for handling configuration data files + + Reads a .conf file and obtains its metadata + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import errno +import re +import os +import bb.utils +from bb.parse import ParseError, resolve_file, ast, logger, handle + +__config_regexp__ = re.compile( r""" + ^ + (?Pexport\s+)? + (?P[a-zA-Z0-9\-_+.${}/~]+?) + (\[(?P[a-zA-Z0-9\-_+.]+)\])? + + \s* ( + (?P:=) | + (?P\?\?=) | + (?P\?=) | + (?P\+=) | + (?P=\+) | + (?P=\.) | + (?P\.=) | + = + ) \s* + + (?!'[^']*'[^']*'$) + (?!\"[^\"]*\"[^\"]*\"$) + (?P['\"]) + (?P.*) + (?P=apo) + $ + """, re.X) +__include_regexp__ = re.compile( r"include\s+(.+)" ) +__require_regexp__ = re.compile( r"require\s+(.+)" ) +__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" ) +__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" ) +__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" ) + +def init(data): + topdir = data.getVar('TOPDIR', False) + if not topdir: + data.setVar('TOPDIR', os.getcwd()) + + +def supports(fn, d): + return fn[-5:] == ".conf" + +def include(parentfn, fns, lineno, data, error_out): + """ + error_out: A string indicating the verb (e.g. "include", "inherit") to be + used in a ParseError that will be raised if the file to be included could + not be included. Specify False to avoid raising an error in this case. + """ + fns = data.expand(fns) + parentfn = data.expand(parentfn) + + # "include" or "require" accept zero to n space-separated file names to include. + for fn in fns.split(): + include_single_file(parentfn, fn, lineno, data, error_out) + +def include_single_file(parentfn, fn, lineno, data, error_out): + """ + Helper function for include() which does not expand or split its parameters. + """ + if parentfn == fn: # prevent infinite recursion + return None + + if not os.path.isabs(fn): + dname = os.path.dirname(parentfn) + bbpath = "%s:%s" % (dname, data.getVar("BBPATH")) + abs_fn, attempts = bb.utils.which(bbpath, fn, history=True) + if abs_fn and bb.parse.check_dependency(data, abs_fn): + logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE'))) + for af in attempts: + bb.parse.mark_dependency(data, af) + if abs_fn: + fn = abs_fn + elif bb.parse.check_dependency(data, fn): + logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE'))) + + try: + bb.parse.handle(fn, data, True) + except (IOError, OSError) as exc: + if exc.errno == errno.ENOENT: + if error_out: + raise ParseError("Could not %s file %s" % (error_out, fn), parentfn, lineno) + logger.debug(2, "CONF file '%s' not found", fn) + else: + if error_out: + raise ParseError("Could not %s file %s: %s" % (error_out, fn, exc.strerror), parentfn, lineno) + else: + raise ParseError("Error parsing %s: %s" % (fn, exc.strerror), parentfn, lineno) + +# We have an issue where a UI might want to enforce particular settings such as +# an empty DISTRO variable. If configuration files do something like assigning +# a weak default, it turns out to be very difficult to filter out these changes, +# particularly when the weak default might appear half way though parsing a chain +# of configuration files. We therefore let the UIs hook into configuration file +# parsing. This turns out to be a hard problem to solve any other way. +confFilters = [] + +def handle(fn, data, include): + init(data) + + if include == 0: + oldfile = None + else: + oldfile = data.getVar('FILE', False) + + abs_fn = resolve_file(fn, data) + f = open(abs_fn, 'r') + + statements = ast.StatementGroup() + lineno = 0 + while True: + lineno = lineno + 1 + s = f.readline() + if not s: + break + w = s.strip() + # skip empty lines + if not w: + continue + s = s.rstrip() + while s[-1] == '\\': + s2 = f.readline().strip() + lineno = lineno + 1 + if (not s2 or s2 and s2[0] != "#") and s[0] == "#" : + bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s)) + s = s[:-1] + s2 + # skip comments + if s[0] == '#': + continue + feeder(lineno, s, abs_fn, statements) + + # DONE WITH PARSING... time to evaluate + data.setVar('FILE', abs_fn) + statements.eval(data) + if oldfile: + data.setVar('FILE', oldfile) + + f.close() + + for f in confFilters: + f(fn, data) + + return data + +def feeder(lineno, s, fn, statements): + m = __config_regexp__.match(s) + if m: + groupd = m.groupdict() + ast.handleData(statements, fn, lineno, groupd) + return + + m = __include_regexp__.match(s) + if m: + ast.handleInclude(statements, fn, lineno, m, False) + return + + m = __require_regexp__.match(s) + if m: + ast.handleInclude(statements, fn, lineno, m, True) + return + + m = __export_regexp__.match(s) + if m: + ast.handleExport(statements, fn, lineno, m) + return + + m = __unset_regexp__.match(s) + if m: + ast.handleUnset(statements, fn, lineno, m) + return + + m = __unset_flag_regexp__.match(s) + if m: + ast.handleUnsetFlag(statements, fn, lineno, m) + return + + raise ParseError("unparsed line: '%s'" % s, fn, lineno); + +# Add us to the handlers list +from bb.parse import handlers +handlers.append({'supports': supports, 'handle': handle, 'init': init}) +del handlers diff --git a/poky/bitbake/lib/bb/parse/parse_py/__init__.py b/poky/bitbake/lib/bb/parse/parse_py/__init__.py new file mode 100644 index 0000000000..3e658d0de9 --- /dev/null +++ b/poky/bitbake/lib/bb/parse/parse_py/__init__.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake Parsers + +File parsers for the BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +from __future__ import absolute_import +from . import ConfHandler +from . import BBHandler + +__version__ = '1.0' diff --git a/poky/bitbake/lib/bb/persist_data.py b/poky/bitbake/lib/bb/persist_data.py new file mode 100644 index 0000000000..bef7018614 --- /dev/null +++ b/poky/bitbake/lib/bb/persist_data.py @@ -0,0 +1,214 @@ +"""BitBake Persistent Data Store + +Used to store data in a central location such that other threads/tasks can +access them at some future date. Acts as a convenience wrapper around sqlite, +currently, providing a key/value store accessed by 'domain'. +""" + +# Copyright (C) 2007 Richard Purdie +# Copyright (C) 2010 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import collections +import logging +import os.path +import sys +import warnings +from bb.compat import total_ordering +from collections import Mapping +import sqlite3 + +sqlversion = sqlite3.sqlite_version_info +if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): + raise Exception("sqlite3 version 3.3.0 or later is required.") + + +logger = logging.getLogger("BitBake.PersistData") +if hasattr(sqlite3, 'enable_shared_cache'): + try: + sqlite3.enable_shared_cache(True) + except sqlite3.OperationalError: + pass + + +@total_ordering +class SQLTable(collections.MutableMapping): + """Object representing a table/domain in the database""" + def __init__(self, cachefile, table): + self.cachefile = cachefile + self.table = table + self.cursor = connect(self.cachefile) + + self._execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);" + % table) + + def _execute(self, *query): + """Execute a query, waiting to acquire a lock if necessary""" + count = 0 + while True: + try: + return self.cursor.execute(*query) + except sqlite3.OperationalError as exc: + if 'database is locked' in str(exc) and count < 500: + count = count + 1 + self.cursor.close() + self.cursor = connect(self.cachefile) + continue + raise + + def __enter__(self): + self.cursor.__enter__() + return self + + def __exit__(self, *excinfo): + self.cursor.__exit__(*excinfo) + + def __getitem__(self, key): + data = self._execute("SELECT * from %s where key=?;" % + self.table, [key]) + for row in data: + return row[1] + raise KeyError(key) + + def __delitem__(self, key): + if key not in self: + raise KeyError(key) + self._execute("DELETE from %s where key=?;" % self.table, [key]) + + def __setitem__(self, key, value): + if not isinstance(key, str): + raise TypeError('Only string keys are supported') + elif not isinstance(value, str): + raise TypeError('Only string values are supported') + + data = self._execute("SELECT * from %s where key=?;" % + self.table, [key]) + exists = len(list(data)) + if exists: + self._execute("UPDATE %s SET value=? WHERE key=?;" % self.table, + [value, key]) + else: + self._execute("INSERT into %s(key, value) values (?, ?);" % + self.table, [key, value]) + + def __contains__(self, key): + return key in set(self) + + def __len__(self): + data = self._execute("SELECT COUNT(key) FROM %s;" % self.table) + for row in data: + return row[0] + + def __iter__(self): + data = self._execute("SELECT key FROM %s;" % self.table) + return (row[0] for row in data) + + def __lt__(self, other): + if not isinstance(other, Mapping): + raise NotImplemented + + return len(self) < len(other) + + def get_by_pattern(self, pattern): + data = self._execute("SELECT * FROM %s WHERE key LIKE ?;" % + self.table, [pattern]) + return [row[1] for row in data] + + def values(self): + return list(self.itervalues()) + + def itervalues(self): + data = self._execute("SELECT value FROM %s;" % self.table) + return (row[0] for row in data) + + def items(self): + return list(self.iteritems()) + + def iteritems(self): + return self._execute("SELECT * FROM %s;" % self.table) + + def clear(self): + self._execute("DELETE FROM %s;" % self.table) + + def has_key(self, key): + return key in self + + +class PersistData(object): + """Deprecated representation of the bitbake persistent data store""" + def __init__(self, d): + warnings.warn("Use of PersistData is deprecated. Please use " + "persist(domain, d) instead.", + category=DeprecationWarning, + stacklevel=2) + + self.data = persist(d) + logger.debug(1, "Using '%s' as the persistent data cache", + self.data.filename) + + def addDomain(self, domain): + """ + Add a domain (pending deprecation) + """ + return self.data[domain] + + def delDomain(self, domain): + """ + Removes a domain and all the data it contains + """ + del self.data[domain] + + def getKeyValues(self, domain): + """ + Return a list of key + value pairs for a domain + """ + return list(self.data[domain].items()) + + def getValue(self, domain, key): + """ + Return the value of a key for a domain + """ + return self.data[domain][key] + + def setValue(self, domain, key, value): + """ + Sets the value of a key for a domain + """ + self.data[domain][key] = value + + def delValue(self, domain, key): + """ + Deletes a key/value pair + """ + del self.data[domain][key] + +def connect(database): + connection = sqlite3.connect(database, timeout=5, isolation_level=None) + connection.execute("pragma synchronous = off;") + connection.text_factory = str + return connection + +def persist(domain, d): + """Convenience factory for SQLTable objects based upon metadata""" + import bb.utils + cachedir = (d.getVar("PERSISTENT_DIR") or + d.getVar("CACHE")) + if not cachedir: + logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable") + sys.exit(1) + + bb.utils.mkdirhier(cachedir) + cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3") + return SQLTable(cachefile, domain) diff --git a/poky/bitbake/lib/bb/process.py b/poky/bitbake/lib/bb/process.py new file mode 100644 index 0000000000..e69697cb68 --- /dev/null +++ b/poky/bitbake/lib/bb/process.py @@ -0,0 +1,179 @@ +import logging +import signal +import subprocess +import errno +import select + +logger = logging.getLogger('BitBake.Process') + +def subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +class CmdError(RuntimeError): + def __init__(self, command, msg=None): + self.command = command + self.msg = msg + + def __str__(self): + if not isinstance(self.command, str): + cmd = subprocess.list2cmdline(self.command) + else: + cmd = self.command + + msg = "Execution of '%s' failed" % cmd + if self.msg: + msg += ': %s' % self.msg + return msg + +class NotFoundError(CmdError): + def __str__(self): + return CmdError.__str__(self) + ": command not found" + +class ExecutionError(CmdError): + def __init__(self, command, exitcode, stdout = None, stderr = None): + CmdError.__init__(self, command) + self.exitcode = exitcode + self.stdout = stdout + self.stderr = stderr + + def __str__(self): + message = "" + if self.stderr: + message += self.stderr + if self.stdout: + message += self.stdout + if message: + message = ":\n" + message + return (CmdError.__str__(self) + + " with exit code %s" % self.exitcode + message) + +class Popen(subprocess.Popen): + defaults = { + "close_fds": True, + "preexec_fn": subprocess_setup, + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + "stdin": subprocess.PIPE, + "shell": False, + } + + def __init__(self, *args, **kwargs): + options = dict(self.defaults) + options.update(kwargs) + subprocess.Popen.__init__(self, *args, **options) + +def _logged_communicate(pipe, log, input, extrafiles): + if pipe.stdin: + if input is not None: + pipe.stdin.write(input) + pipe.stdin.close() + + outdata, errdata = [], [] + rin = [] + + if pipe.stdout is not None: + bb.utils.nonblockingfd(pipe.stdout.fileno()) + rin.append(pipe.stdout) + if pipe.stderr is not None: + bb.utils.nonblockingfd(pipe.stderr.fileno()) + rin.append(pipe.stderr) + for fobj, _ in extrafiles: + bb.utils.nonblockingfd(fobj.fileno()) + rin.append(fobj) + + def readextras(selected): + for fobj, func in extrafiles: + if fobj in selected: + try: + data = fobj.read() + except IOError as err: + if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK: + data = None + if data is not None: + func(data) + + def read_all_pipes(log, rin, outdata, errdata): + rlist = rin + stdoutbuf = b"" + stderrbuf = b"" + + try: + r,w,e = select.select (rlist, [], [], 1) + except OSError as e: + if e.errno != errno.EINTR: + raise + + readextras(r) + + if pipe.stdout in r: + data = stdoutbuf + pipe.stdout.read() + if data is not None and len(data) > 0: + try: + data = data.decode("utf-8") + outdata.append(data) + log.write(data) + log.flush() + stdoutbuf = b"" + except UnicodeDecodeError: + stdoutbuf = data + + if pipe.stderr in r: + data = stderrbuf + pipe.stderr.read() + if data is not None and len(data) > 0: + try: + data = data.decode("utf-8") + errdata.append(data) + log.write(data) + log.flush() + stderrbuf = b"" + except UnicodeDecodeError: + stderrbuf = data + + try: + # Read all pipes while the process is open + while pipe.poll() is None: + read_all_pipes(log, rin, outdata, errdata) + + # Pocess closed, drain all pipes... + read_all_pipes(log, rin, outdata, errdata) + finally: + log.flush() + + if pipe.stdout is not None: + pipe.stdout.close() + if pipe.stderr is not None: + pipe.stderr.close() + return ''.join(outdata), ''.join(errdata) + +def run(cmd, input=None, log=None, extrafiles=None, **options): + """Convenience function to run a command and return its output, raising an + exception when the command fails""" + + if not extrafiles: + extrafiles = [] + + if isinstance(cmd, str) and not "shell" in options: + options["shell"] = True + + try: + pipe = Popen(cmd, **options) + except OSError as exc: + if exc.errno == 2: + raise NotFoundError(cmd) + else: + raise CmdError(cmd, exc) + + if log: + stdout, stderr = _logged_communicate(pipe, log, input, extrafiles) + else: + stdout, stderr = pipe.communicate(input) + if not stdout is None: + stdout = stdout.decode("utf-8") + if not stderr is None: + stderr = stderr.decode("utf-8") + + if pipe.returncode != 0: + raise ExecutionError(cmd, pipe.returncode, stdout, stderr) + return stdout, stderr diff --git a/poky/bitbake/lib/bb/progress.py b/poky/bitbake/lib/bb/progress.py new file mode 100644 index 0000000000..f54d1c76f8 --- /dev/null +++ b/poky/bitbake/lib/bb/progress.py @@ -0,0 +1,276 @@ +""" +BitBake progress handling code +""" + +# Copyright (C) 2016 Intel Corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import sys +import re +import time +import inspect +import bb.event +import bb.build + +class ProgressHandler(object): + """ + Base class that can pretend to be a file object well enough to be + used to build objects to intercept console output and determine the + progress of some operation. + """ + def __init__(self, d, outfile=None): + self._progress = 0 + self._data = d + self._lastevent = 0 + if outfile: + self._outfile = outfile + else: + self._outfile = sys.stdout + + def _fire_progress(self, taskprogress, rate=None): + """Internal function to fire the progress event""" + bb.event.fire(bb.build.TaskProgress(taskprogress, rate), self._data) + + def write(self, string): + self._outfile.write(string) + + def flush(self): + self._outfile.flush() + + def update(self, progress, rate=None): + ts = time.time() + if progress > 100: + progress = 100 + if progress != self._progress or self._lastevent + 1 < ts: + self._fire_progress(progress, rate) + self._lastevent = ts + self._progress = progress + +class LineFilterProgressHandler(ProgressHandler): + """ + A ProgressHandler variant that provides the ability to filter out + the lines if they contain progress information. Additionally, it + filters out anything before the last line feed on a line. This can + be used to keep the logs clean of output that we've only enabled for + getting progress, assuming that that can be done on a per-line + basis. + """ + def __init__(self, d, outfile=None): + self._linebuffer = '' + super(LineFilterProgressHandler, self).__init__(d, outfile) + + def write(self, string): + self._linebuffer += string + while True: + breakpos = self._linebuffer.find('\n') + 1 + if breakpos == 0: + break + line = self._linebuffer[:breakpos] + self._linebuffer = self._linebuffer[breakpos:] + # Drop any line feeds and anything that precedes them + lbreakpos = line.rfind('\r') + 1 + if lbreakpos: + line = line[lbreakpos:] + if self.writeline(line): + super(LineFilterProgressHandler, self).write(line) + + def writeline(self, line): + return True + +class BasicProgressHandler(ProgressHandler): + def __init__(self, d, regex=r'(\d+)%', outfile=None): + super(BasicProgressHandler, self).__init__(d, outfile) + self._regex = re.compile(regex) + # Send an initial progress event so the bar gets shown + self._fire_progress(0) + + def write(self, string): + percs = self._regex.findall(string) + if percs: + progress = int(percs[-1]) + self.update(progress) + super(BasicProgressHandler, self).write(string) + +class OutOfProgressHandler(ProgressHandler): + def __init__(self, d, regex, outfile=None): + super(OutOfProgressHandler, self).__init__(d, outfile) + self._regex = re.compile(regex) + # Send an initial progress event so the bar gets shown + self._fire_progress(0) + + def write(self, string): + nums = self._regex.findall(string) + if nums: + progress = (float(nums[-1][0]) / float(nums[-1][1])) * 100 + self.update(progress) + super(OutOfProgressHandler, self).write(string) + +class MultiStageProgressReporter(object): + """ + Class which allows reporting progress without the caller + having to know where they are in the overall sequence. Useful + for tasks made up of python code spread across multiple + classes / functions - the progress reporter object can + be passed around or stored at the object level and calls + to next_stage() and update() made whereever needed. + """ + def __init__(self, d, stage_weights, debug=False): + """ + Initialise the progress reporter. + + Parameters: + * d: the datastore (needed for firing the events) + * stage_weights: a list of weight values, one for each stage. + The value is scaled internally so you only need to specify + values relative to other values in the list, so if there + are two stages and the first takes 2s and the second takes + 10s you would specify [2, 10] (or [1, 5], it doesn't matter). + * debug: specify True (and ensure you call finish() at the end) + in order to show a printout of the calculated stage weights + based on timing each stage. Use this to determine what the + weights should be when you're not sure. + """ + self._data = d + total = sum(stage_weights) + self._stage_weights = [float(x)/total for x in stage_weights] + self._stage = -1 + self._base_progress = 0 + # Send an initial progress event so the bar gets shown + self._fire_progress(0) + self._debug = debug + self._finished = False + if self._debug: + self._last_time = time.time() + self._stage_times = [] + self._stage_total = None + self._callers = [] + + def _fire_progress(self, taskprogress): + bb.event.fire(bb.build.TaskProgress(taskprogress), self._data) + + def next_stage(self, stage_total=None): + """ + Move to the next stage. + Parameters: + * stage_total: optional total for progress within the stage, + see update() for details + NOTE: you need to call this before the first stage. + """ + self._stage += 1 + self._stage_total = stage_total + if self._stage == 0: + # First stage + if self._debug: + self._last_time = time.time() + else: + if self._stage < len(self._stage_weights): + self._base_progress = sum(self._stage_weights[:self._stage]) * 100 + if self._debug: + currtime = time.time() + self._stage_times.append(currtime - self._last_time) + self._last_time = currtime + self._callers.append(inspect.getouterframes(inspect.currentframe())[1]) + elif not self._debug: + bb.warn('ProgressReporter: current stage beyond declared number of stages') + self._base_progress = 100 + self._fire_progress(self._base_progress) + + def update(self, stage_progress): + """ + Update progress within the current stage. + Parameters: + * stage_progress: progress value within the stage. If stage_total + was specified when next_stage() was last called, then this + value is considered to be out of stage_total, otherwise it should + be a percentage value from 0 to 100. + """ + if self._stage_total: + stage_progress = (float(stage_progress) / self._stage_total) * 100 + if self._stage < 0: + bb.warn('ProgressReporter: update called before first call to next_stage()') + elif self._stage < len(self._stage_weights): + progress = self._base_progress + (stage_progress * self._stage_weights[self._stage]) + else: + progress = self._base_progress + if progress > 100: + progress = 100 + self._fire_progress(progress) + + def finish(self): + if self._finished: + return + self._finished = True + if self._debug: + import math + self._stage_times.append(time.time() - self._last_time) + mintime = max(min(self._stage_times), 0.01) + self._callers.append(None) + stage_weights = [int(math.ceil(x / mintime)) for x in self._stage_times] + bb.warn('Stage weights: %s' % stage_weights) + out = [] + for stage_weight, caller in zip(stage_weights, self._callers): + if caller: + out.append('Up to %s:%d: %d' % (caller[1], caller[2], stage_weight)) + else: + out.append('Up to finish: %d' % stage_weight) + bb.warn('Stage times:\n %s' % '\n '.join(out)) + +class MultiStageProcessProgressReporter(MultiStageProgressReporter): + """ + Version of MultiStageProgressReporter intended for use with + standalone processes (such as preparing the runqueue) + """ + def __init__(self, d, processname, stage_weights, debug=False): + self._processname = processname + self._started = False + MultiStageProgressReporter.__init__(self, d, stage_weights, debug) + + def start(self): + if not self._started: + bb.event.fire(bb.event.ProcessStarted(self._processname, 100), self._data) + self._started = True + + def _fire_progress(self, taskprogress): + if taskprogress == 0: + self.start() + return + bb.event.fire(bb.event.ProcessProgress(self._processname, taskprogress), self._data) + + def finish(self): + MultiStageProgressReporter.finish(self) + bb.event.fire(bb.event.ProcessFinished(self._processname), self._data) + +class DummyMultiStageProcessProgressReporter(MultiStageProgressReporter): + """ + MultiStageProcessProgressReporter that takes the calls and does nothing + with them (to avoid a bunch of "if progress_reporter:" checks) + """ + def __init__(self): + MultiStageProcessProgressReporter.__init__(self, "", None, []) + + def _fire_progress(self, taskprogress, rate=None): + pass + + def start(self): + pass + + def next_stage(self, stage_total=None): + pass + + def update(self, stage_progress): + pass + + def finish(self): + pass diff --git a/poky/bitbake/lib/bb/providers.py b/poky/bitbake/lib/bb/providers.py new file mode 100644 index 0000000000..c2aa98c065 --- /dev/null +++ b/poky/bitbake/lib/bb/providers.py @@ -0,0 +1,430 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import re +import logging +from bb import data, utils +from collections import defaultdict +import bb + +logger = logging.getLogger("BitBake.Provider") + +class NoProvider(bb.BBHandledException): + """Exception raised when no provider of a build dependency can be found""" + +class NoRProvider(bb.BBHandledException): + """Exception raised when no provider of a runtime dependency can be found""" + +class MultipleRProvider(bb.BBHandledException): + """Exception raised when multiple providers of a runtime dependency can be found""" + +def findProviders(cfgData, dataCache, pkg_pn = None): + """ + Convenience function to get latest and preferred providers in pkg_pn + """ + + if not pkg_pn: + pkg_pn = dataCache.pkg_pn + + # Need to ensure data store is expanded + localdata = data.createCopy(cfgData) + bb.data.expandKeys(localdata) + + preferred_versions = {} + latest_versions = {} + + for pn in pkg_pn: + (last_ver, last_file, pref_ver, pref_file) = findBestProvider(pn, localdata, dataCache, pkg_pn) + preferred_versions[pn] = (pref_ver, pref_file) + latest_versions[pn] = (last_ver, last_file) + + return (latest_versions, preferred_versions) + + +def allProviders(dataCache): + """ + Find all providers for each pn + """ + all_providers = defaultdict(list) + for (fn, pn) in dataCache.pkg_fn.items(): + ver = dataCache.pkg_pepvpr[fn] + all_providers[pn].append((ver, fn)) + return all_providers + + +def sortPriorities(pn, dataCache, pkg_pn = None): + """ + Reorder pkg_pn by file priority and default preference + """ + + if not pkg_pn: + pkg_pn = dataCache.pkg_pn + + files = pkg_pn[pn] + priorities = {} + for f in files: + priority = dataCache.bbfile_priority[f] + preference = dataCache.pkg_dp[f] + if priority not in priorities: + priorities[priority] = {} + if preference not in priorities[priority]: + priorities[priority][preference] = [] + priorities[priority][preference].append(f) + tmp_pn = [] + for pri in sorted(priorities): + tmp_pref = [] + for pref in sorted(priorities[pri]): + tmp_pref.extend(priorities[pri][pref]) + tmp_pn = [tmp_pref] + tmp_pn + + return tmp_pn + +def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): + """ + Check if the version pe,pv,pr is the preferred one. + If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%' + """ + if (pr == preferred_r or preferred_r == None): + if (pe == preferred_e or preferred_e == None): + if preferred_v == pv: + return True + if preferred_v != None and preferred_v.endswith('%') and pv.startswith(preferred_v[:len(preferred_v)-1]): + return True + return False + +def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): + """ + Find the first provider in pkg_pn with a PREFERRED_VERSION set. + """ + + preferred_file = None + preferred_ver = None + + # pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot + # hence we do this manually rather than use OVERRIDES + preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn) + if not preferred_v: + preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn) + if not preferred_v: + preferred_v = cfgData.getVar("PREFERRED_VERSION") + + if preferred_v: + m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) + if m: + if m.group(1): + preferred_e = m.group(1)[:-1] + else: + preferred_e = None + preferred_v = m.group(2) + if m.group(3): + preferred_r = m.group(3)[1:] + else: + preferred_r = None + else: + preferred_e = None + preferred_r = None + + for file_set in pkg_pn: + for f in file_set: + pe, pv, pr = dataCache.pkg_pepvpr[f] + if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): + preferred_file = f + preferred_ver = (pe, pv, pr) + break + if preferred_file: + break; + if preferred_r: + pv_str = '%s-%s' % (preferred_v, preferred_r) + else: + pv_str = preferred_v + if not (preferred_e is None): + pv_str = '%s:%s' % (preferred_e, pv_str) + itemstr = "" + if item: + itemstr = " (for item %s)" % item + if preferred_file is None: + logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr) + available_vers = [] + for file_set in pkg_pn: + for f in file_set: + pe, pv, pr = dataCache.pkg_pepvpr[f] + ver_str = pv + if pe: + ver_str = "%s:%s" % (pe, ver_str) + if not ver_str in available_vers: + available_vers.append(ver_str) + if available_vers: + available_vers.sort() + logger.info("versions of %s available: %s", pn, ' '.join(available_vers)) + else: + logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) + + return (preferred_ver, preferred_file) + + +def findLatestProvider(pn, cfgData, dataCache, file_set): + """ + Return the highest version of the providers in file_set. + Take default preferences into account. + """ + latest = None + latest_p = 0 + latest_f = None + for file_name in file_set: + pe, pv, pr = dataCache.pkg_pepvpr[file_name] + dp = dataCache.pkg_dp[file_name] + + if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p): + latest = (pe, pv, pr) + latest_f = file_name + latest_p = dp + + return (latest, latest_f) + + +def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): + """ + If there is a PREFERRED_VERSION, find the highest-priority bbfile + providing that version. If not, find the latest version provided by + an bbfile in the highest-priority set. + """ + + sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn) + # Find the highest priority provider with a PREFERRED_VERSION set + (preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item) + # Find the latest version of the highest priority provider + (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0]) + + if preferred_file is None: + preferred_file = latest_f + preferred_ver = latest + + return (latest, latest_f, preferred_ver, preferred_file) + + +def _filterProviders(providers, item, cfgData, dataCache): + """ + Take a list of providers and filter/reorder according to the + environment variables + """ + eligible = [] + preferred_versions = {} + sortpkg_pn = {} + + # The order of providers depends on the order of the files on the disk + # up to here. Sort pkg_pn to make dependency issues reproducible rather + # than effectively random. + providers.sort() + + # Collate providers by PN + pkg_pn = {} + for p in providers: + pn = dataCache.pkg_fn[p] + if pn not in pkg_pn: + pkg_pn[pn] = [] + pkg_pn[pn].append(p) + + logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys()))) + + # First add PREFERRED_VERSIONS + for pn in sorted(pkg_pn): + sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn) + preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item) + if preferred_versions[pn][1]: + eligible.append(preferred_versions[pn][1]) + + # Now add latest versions + for pn in sorted(sortpkg_pn): + if pn in preferred_versions and preferred_versions[pn][1]: + continue + preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0]) + eligible.append(preferred_versions[pn][1]) + + if len(eligible) == 0: + logger.error("no eligible providers for %s", item) + return 0 + + # If pn == item, give it a slight default preference + # This means PREFERRED_PROVIDER_foobar defaults to foobar if available + for p in providers: + pn = dataCache.pkg_fn[p] + if pn != item: + continue + (newvers, fn) = preferred_versions[pn] + if not fn in eligible: + continue + eligible.remove(fn) + eligible = [fn] + eligible + + return eligible + + +def filterProviders(providers, item, cfgData, dataCache): + """ + Take a list of providers and filter/reorder according to the + environment variables + Takes a "normal" target item + """ + + eligible = _filterProviders(providers, item, cfgData, dataCache) + + prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item) + if prefervar: + dataCache.preferred[item] = prefervar + + foundUnique = False + if item in dataCache.preferred: + for p in eligible: + pn = dataCache.pkg_fn[p] + if dataCache.preferred[item] == pn: + logger.verbose("selecting %s to satisfy %s due to PREFERRED_PROVIDERS", pn, item) + eligible.remove(p) + eligible = [p] + eligible + foundUnique = True + break + + logger.debug(1, "sorted providers for %s are: %s", item, eligible) + + return eligible, foundUnique + +def filterProvidersRunTime(providers, item, cfgData, dataCache): + """ + Take a list of providers and filter/reorder according to the + environment variables + Takes a "runtime" target item + """ + + eligible = _filterProviders(providers, item, cfgData, dataCache) + + # First try and match any PREFERRED_RPROVIDER entry + prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item) + foundUnique = False + if prefervar: + for p in eligible: + pn = dataCache.pkg_fn[p] + if prefervar == pn: + logger.verbose("selecting %s to satisfy %s due to PREFERRED_RPROVIDER", pn, item) + eligible.remove(p) + eligible = [p] + eligible + foundUnique = True + numberPreferred = 1 + break + + # If we didn't find an RPROVIDER entry, try and infer the provider from PREFERRED_PROVIDER entries + # by looking through the provides of each eligible recipe and seeing if a PREFERRED_PROVIDER was set. + # This is most useful for virtual/ entries rather than having a RPROVIDER per entry. + if not foundUnique: + # Should use dataCache.preferred here? + preferred = [] + preferred_vars = [] + pns = {} + for p in eligible: + pns[dataCache.pkg_fn[p]] = p + for p in eligible: + pn = dataCache.pkg_fn[p] + provides = dataCache.pn_provides[pn] + for provide in provides: + prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide) + #logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys()) + if prefervar in pns and pns[prefervar] not in preferred: + var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar) + logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var) + preferred_vars.append(var) + pref = pns[prefervar] + eligible.remove(pref) + eligible = [pref] + eligible + preferred.append(pref) + break + + numberPreferred = len(preferred) + + if numberPreferred > 1: + logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s. You could set PREFERRED_RPROVIDER_%s" % (item, preferred, preferred_vars, item)) + + logger.debug(1, "sorted runtime providers for %s are: %s", item, eligible) + + return eligible, numberPreferred + +regexp_cache = {} + +def getRuntimeProviders(dataCache, rdepend): + """ + Return any providers of runtime dependency + """ + rproviders = [] + + if rdepend in dataCache.rproviders: + rproviders += dataCache.rproviders[rdepend] + + if rdepend in dataCache.packages: + rproviders += dataCache.packages[rdepend] + + if rproviders: + return rproviders + + # Only search dynamic packages if we can't find anything in other variables + for pattern in dataCache.packages_dynamic: + pattern = pattern.replace('+', "\+") + if pattern in regexp_cache: + regexp = regexp_cache[pattern] + else: + try: + regexp = re.compile(pattern) + except: + logger.error("Error parsing regular expression '%s'", pattern) + raise + regexp_cache[pattern] = regexp + if regexp.match(rdepend): + rproviders += dataCache.packages_dynamic[pattern] + logger.debug(1, "Assuming %s is a dynamic package, but it may not exist" % rdepend) + + return rproviders + + +def buildWorldTargetList(dataCache, task=None): + """ + Build package list for "bitbake world" + """ + if dataCache.world_target: + return + + logger.debug(1, "collating packages for \"world\"") + for f in dataCache.possible_world: + terminal = True + pn = dataCache.pkg_fn[f] + if task and task not in dataCache.task_deps[f]['tasks']: + logger.debug(2, "World build skipping %s as task %s doesn't exist", f, task) + terminal = False + + for p in dataCache.pn_provides[pn]: + if p.startswith('virtual/'): + logger.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p) + terminal = False + break + for pf in dataCache.providers[p]: + if dataCache.pkg_fn[pf] != pn: + logger.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p) + terminal = False + break + if terminal: + dataCache.world_target.add(pn) diff --git a/poky/bitbake/lib/bb/pysh/__init__.py b/poky/bitbake/lib/bb/pysh/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/poky/bitbake/lib/bb/pysh/builtin.py b/poky/bitbake/lib/bb/pysh/builtin.py new file mode 100644 index 0000000000..a8814dc330 --- /dev/null +++ b/poky/bitbake/lib/bb/pysh/builtin.py @@ -0,0 +1,710 @@ +# builtin.py - builtins and utilities definitions for pysh. +# +# Copyright 2007 Patrick Mezard +# +# This software may be used and distributed according to the terms +# of the GNU General Public License, incorporated herein by reference. + +"""Builtin and internal utilities implementations. + +- Beware not to use python interpreter environment as if it were the shell +environment. For instance, commands working directory must be explicitely handled +through env['PWD'] instead of relying on python working directory. +""" +import errno +import optparse +import os +import re +import subprocess +import sys +import time + +def has_subprocess_bug(): + return getattr(subprocess, 'list2cmdline') and \ + ( subprocess.list2cmdline(['']) == '' or \ + subprocess.list2cmdline(['foo|bar']) == 'foo|bar') + +# Detect python bug 1634343: "subprocess swallows empty arguments under win32" +# +# Also detect: "[ 1710802 ] subprocess must escape redirection characters under win32" +# +if has_subprocess_bug(): + import subprocess_fix + subprocess.list2cmdline = subprocess_fix.list2cmdline + +from sherrors import * + +class NonExitingParser(optparse.OptionParser): + """OptionParser default behaviour upon error is to print the error message and + exit. Raise a utility error instead. + """ + def error(self, msg): + raise UtilityError(msg) + +#------------------------------------------------------------------------------- +# set special builtin +#------------------------------------------------------------------------------- +OPT_SET = NonExitingParser(usage="set - set or unset options and positional parameters") +OPT_SET.add_option( '-f', action='store_true', dest='has_f', default=False, + help='The shell shall disable pathname expansion.') +OPT_SET.add_option('-e', action='store_true', dest='has_e', default=False, + help="""When this option is on, if a simple command fails for any of the \ + reasons listed in Consequences of Shell Errors or returns an exit status \ + value >0, and is not part of the compound list following a while, until, \ + or if keyword, and is not a part of an AND or OR list, and is not a \ + pipeline preceded by the ! reserved word, then the shell shall immediately \ + exit.""") +OPT_SET.add_option('-x', action='store_true', dest='has_x', default=False, + help="""The shell shall write to standard error a trace for each command \ + after it expands the command and before it executes it. It is unspecified \ + whether the command that turns tracing off is traced.""") + +def builtin_set(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_SET.parse_args(args) + env = interp.get_env() + + if option.has_f: + env.set_opt('-f') + if option.has_e: + env.set_opt('-e') + if option.has_x: + env.set_opt('-x') + return 0 + +#------------------------------------------------------------------------------- +# shift special builtin +#------------------------------------------------------------------------------- +def builtin_shift(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + params = interp.get_env().get_positional_args() + if args: + try: + n = int(args[0]) + if n > len(params): + raise ValueError() + except ValueError: + return 1 + else: + n = 1 + + params[:n] = [] + interp.get_env().set_positional_args(params) + return 0 + +#------------------------------------------------------------------------------- +# export special builtin +#------------------------------------------------------------------------------- +OPT_EXPORT = NonExitingParser(usage="set - set or unset options and positional parameters") +OPT_EXPORT.add_option('-p', action='store_true', dest='has_p', default=False) + +def builtin_export(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_EXPORT.parse_args(args) + if option.has_p: + raise NotImplementedError() + + for arg in args: + try: + name, value = arg.split('=', 1) + except ValueError: + name, value = arg, None + env = interp.get_env().export(name, value) + + return 0 + +#------------------------------------------------------------------------------- +# return special builtin +#------------------------------------------------------------------------------- +def builtin_return(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + res = 0 + if args: + try: + res = int(args[0]) + except ValueError: + res = 0 + if not 0<=res<=255: + res = 0 + + # BUG: should be last executed command exit code + raise ReturnSignal(res) + +#------------------------------------------------------------------------------- +# trap special builtin +#------------------------------------------------------------------------------- +def builtin_trap(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + if len(args) < 2: + stderr.write('trap: usage: trap [[arg] signal_spec ...]\n') + return 2 + + action = args[0] + for sig in args[1:]: + try: + env.traps[sig] = action + except Exception as e: + stderr.write('trap: %s\n' % str(e)) + return 0 + +#------------------------------------------------------------------------------- +# unset special builtin +#------------------------------------------------------------------------------- +OPT_UNSET = NonExitingParser("unset - unset values and attributes of variables and functions") +OPT_UNSET.add_option( '-f', action='store_true', dest='has_f', default=False) +OPT_UNSET.add_option( '-v', action='store_true', dest='has_v', default=False) + +def builtin_unset(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_UNSET.parse_args(args) + + status = 0 + env = interp.get_env() + for arg in args: + try: + if option.has_f: + env.remove_function(arg) + else: + del env[arg] + except KeyError: + pass + except VarAssignmentError: + status = 1 + + return status + +#------------------------------------------------------------------------------- +# wait special builtin +#------------------------------------------------------------------------------- +def builtin_wait(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return interp.wait([int(arg) for arg in args]) + +#------------------------------------------------------------------------------- +# cat utility +#------------------------------------------------------------------------------- +def utility_cat(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + if not args: + args = ['-'] + + status = 0 + for arg in args: + if arg == '-': + data = stdin.read() + else: + path = os.path.join(env['PWD'], arg) + try: + f = file(path, 'rb') + try: + data = f.read() + finally: + f.close() + except IOError as e: + if e.errno != errno.ENOENT: + raise + status = 1 + continue + stdout.write(data) + stdout.flush() + return status + +#------------------------------------------------------------------------------- +# cd utility +#------------------------------------------------------------------------------- +OPT_CD = NonExitingParser("cd - change the working directory") + +def utility_cd(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_CD.parse_args(args) + env = interp.get_env() + + directory = None + printdir = False + if not args: + home = env.get('HOME') + if home: + # Unspecified, do nothing + return 0 + else: + directory = home + elif len(args)==1: + directory = args[0] + if directory=='-': + if 'OLDPWD' not in env: + raise UtilityError("OLDPWD not set") + printdir = True + directory = env['OLDPWD'] + else: + raise UtilityError("too many arguments") + + curpath = None + # Absolute directories will be handled correctly by the os.path.join call. + if not directory.startswith('.') and not directory.startswith('..'): + cdpaths = env.get('CDPATH', '.').split(';') + for cdpath in cdpaths: + p = os.path.join(cdpath, directory) + if os.path.isdir(p): + curpath = p + break + + if curpath is None: + curpath = directory + curpath = os.path.join(env['PWD'], directory) + + env['OLDPWD'] = env['PWD'] + env['PWD'] = curpath + if printdir: + stdout.write('%s\n' % curpath) + return 0 + +#------------------------------------------------------------------------------- +# colon utility +#------------------------------------------------------------------------------- +def utility_colon(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + return 0 + +#------------------------------------------------------------------------------- +# echo utility +#------------------------------------------------------------------------------- +def utility_echo(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + # Echo only takes arguments, no options. Use printf if you need fancy stuff. + output = ' '.join(args) + '\n' + stdout.write(output) + stdout.flush() + return 0 + +#------------------------------------------------------------------------------- +# egrep utility +#------------------------------------------------------------------------------- +# egrep is usually a shell script. +# Unfortunately, pysh does not support shell scripts *with arguments* right now, +# so the redirection is implemented here, assuming grep is available. +def utility_egrep(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return run_command('grep', ['-E'] + args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# env utility +#------------------------------------------------------------------------------- +def utility_env(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + if args and args[0]=='-i': + raise NotImplementedError('env: -i option is not implemented') + + i = 0 + for arg in args: + if '=' not in arg: + break + # Update the current environment + name, value = arg.split('=', 1) + env[name] = value + i += 1 + + if args[i:]: + # Find then execute the specified interpreter + utility = env.find_in_path(args[i]) + if not utility: + return 127 + args[i:i+1] = utility + name = args[i] + args = args[i+1:] + try: + return run_command(name, args, interp, env, stdin, stdout, stderr, + debugflags) + except UtilityError: + stderr.write('env: failed to execute %s' % ' '.join([name]+args)) + return 126 + else: + for pair in env.get_variables().iteritems(): + stdout.write('%s=%s\n' % pair) + return 0 + +#------------------------------------------------------------------------------- +# exit utility +#------------------------------------------------------------------------------- +def utility_exit(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + res = None + if args: + try: + res = int(args[0]) + except ValueError: + res = None + if not 0<=res<=255: + res = None + + if res is None: + # BUG: should be last executed command exit code + res = 0 + + raise ExitSignal(res) + +#------------------------------------------------------------------------------- +# fgrep utility +#------------------------------------------------------------------------------- +# see egrep +def utility_fgrep(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return run_command('grep', ['-F'] + args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# gunzip utility +#------------------------------------------------------------------------------- +# see egrep +def utility_gunzip(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return run_command('gzip', ['-d'] + args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# kill utility +#------------------------------------------------------------------------------- +def utility_kill(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + for arg in args: + pid = int(arg) + status = subprocess.call(['pskill', '/T', str(pid)], + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + # pskill is asynchronous, hence the stupid polling loop + while 1: + p = subprocess.Popen(['pslist', str(pid)], + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = p.communicate()[0] + if ('process %d was not' % pid) in output: + break + time.sleep(1) + return status + +#------------------------------------------------------------------------------- +# mkdir utility +#------------------------------------------------------------------------------- +OPT_MKDIR = NonExitingParser("mkdir - make directories.") +OPT_MKDIR.add_option('-p', action='store_true', dest='has_p', default=False) + +def utility_mkdir(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + # TODO: implement umask + # TODO: implement proper utility error report + option, args = OPT_MKDIR.parse_args(args) + for arg in args: + path = os.path.join(env['PWD'], arg) + if option.has_p: + try: + os.makedirs(path) + except IOError as e: + if e.errno != errno.EEXIST: + raise + else: + os.mkdir(path) + return 0 + +#------------------------------------------------------------------------------- +# netstat utility +#------------------------------------------------------------------------------- +def utility_netstat(name, args, interp, env, stdin, stdout, stderr, debugflags): + # Do you really expect me to implement netstat ? + # This empty form is enough for Mercurial tests since it's + # supposed to generate nothing upon success. Faking this test + # is not a big deal either. + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + return 0 + +#------------------------------------------------------------------------------- +# pwd utility +#------------------------------------------------------------------------------- +OPT_PWD = NonExitingParser("pwd - return working directory name") +OPT_PWD.add_option('-L', action='store_true', dest='has_L', default=True, + help="""If the PWD environment variable contains an absolute pathname of \ + the current directory that does not contain the filenames dot or dot-dot, \ + pwd shall write this pathname to standard output. Otherwise, the -L option \ + shall behave as the -P option.""") +OPT_PWD.add_option('-P', action='store_true', dest='has_L', default=False, + help="""The absolute pathname written shall not contain filenames that, in \ + the context of the pathname, refer to files of type symbolic link.""") + +def utility_pwd(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_PWD.parse_args(args) + stdout.write('%s\n' % env['PWD']) + return 0 + +#------------------------------------------------------------------------------- +# printf utility +#------------------------------------------------------------------------------- +RE_UNESCAPE = re.compile(r'(\\x[a-zA-Z0-9]{2}|\\[0-7]{1,3}|\\.)') + +def utility_printf(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + def replace(m): + assert m.group() + g = m.group()[1:] + if g.startswith('x'): + return chr(int(g[1:], 16)) + if len(g) <= 3 and len([c for c in g if c in '01234567']) == len(g): + # Yay, an octal number + return chr(int(g, 8)) + return { + 'a': '\a', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', + 'v': '\v', + '\\': '\\', + }.get(g) + + # Convert escape sequences + format = re.sub(RE_UNESCAPE, replace, args[0]) + stdout.write(format % tuple(args[1:])) + return 0 + +#------------------------------------------------------------------------------- +# true utility +#------------------------------------------------------------------------------- +def utility_true(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + return 0 + +#------------------------------------------------------------------------------- +# sed utility +#------------------------------------------------------------------------------- +RE_SED = re.compile(r'^s(.).*\1[a-zA-Z]*$') + +# cygwin sed fails with some expressions when they do not end with a single space. +# see unit tests for details. Interestingly, the same expressions works perfectly +# in cygwin shell. +def utility_sed(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + # Scan pattern arguments and append a space if necessary + for i in range(len(args)): + if not RE_SED.search(args[i]): + continue + args[i] = args[i] + ' ' + + return run_command(name, args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# sleep utility +#------------------------------------------------------------------------------- +def utility_sleep(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + time.sleep(int(args[0])) + return 0 + +#------------------------------------------------------------------------------- +# sort utility +#------------------------------------------------------------------------------- +OPT_SORT = NonExitingParser("sort - sort, merge, or sequence check text files") + +def utility_sort(name, args, interp, env, stdin, stdout, stderr, debugflags): + + def sort(path): + if path == '-': + lines = stdin.readlines() + else: + try: + f = file(path) + try: + lines = f.readlines() + finally: + f.close() + except IOError as e: + stderr.write(str(e) + '\n') + return 1 + + if lines and lines[-1][-1]!='\n': + lines[-1] = lines[-1] + '\n' + return lines + + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_SORT.parse_args(args) + alllines = [] + + if len(args)<=0: + args += ['-'] + + # Load all files lines + curdir = os.getcwd() + try: + os.chdir(env['PWD']) + for path in args: + alllines += sort(path) + finally: + os.chdir(curdir) + + alllines.sort() + for line in alllines: + stdout.write(line) + return 0 + +#------------------------------------------------------------------------------- +# hg utility +#------------------------------------------------------------------------------- + +hgcommands = [ + 'add', + 'addremove', + 'commit', 'ci', + 'debugrename', + 'debugwalk', + 'falabala', # Dummy command used in a mercurial test + 'incoming', + 'locate', + 'pull', + 'push', + 'qinit', + 'remove', 'rm', + 'rename', 'mv', + 'revert', + 'showconfig', + 'status', 'st', + 'strip', + ] + +def rewriteslashes(name, args): + # Several hg commands output file paths, rewrite the separators + if len(args) > 1 and name.lower().endswith('python') \ + and args[0].endswith('hg'): + for cmd in hgcommands: + if cmd in args[1:]: + return True + + # svn output contains many paths with OS specific separators. + # Normalize these to unix paths. + base = os.path.basename(name) + if base.startswith('svn'): + return True + + return False + +def rewritehg(output): + if not output: + return output + # Rewrite os specific messages + output = output.replace(': The system cannot find the file specified', + ': No such file or directory') + output = re.sub(': Access is denied.*$', ': Permission denied', output) + output = output.replace(': No connection could be made because the target machine actively refused it', + ': Connection refused') + return output + + +def run_command(name, args, interp, env, stdin, stdout, + stderr, debugflags): + # Execute the command + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + hgbin = interp.options().hgbinary + ishg = hgbin and ('hg' in name or args and 'hg' in args[0]) + unixoutput = 'cygwin' in name or ishg + + exec_env = env.get_variables() + try: + # BUG: comparing file descriptor is clearly not a reliable way to tell + # whether they point on the same underlying object. But in pysh limited + # scope this is usually right, we do not expect complicated redirections + # besides usual 2>&1. + # Still there is one case we have but cannot deal with is when stdout + # and stderr are redirected *by pysh caller*. This the reason for the + # --redirect pysh() option. + # Now, we want to know they are the same because we sometimes need to + # transform the command output, mostly remove CR-LF to ensure that + # command output is unix-like. Cygwin utilies are a special case because + # they explicitely set their output streams to binary mode, so we have + # nothing to do. For all others commands, we have to guess whether they + # are sending text data, in which case the transformation must be done. + # Again, the NUL character test is unreliable but should be enough for + # hg tests. + redirected = stdout.fileno()==stderr.fileno() + if not redirected: + p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + else: + p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, err = p.communicate() + except WindowsError as e: + raise UtilityError(str(e)) + + if not unixoutput: + def encode(s): + if '\0' in s: + return s + return s.replace('\r\n', '\n') + else: + encode = lambda s: s + + if rewriteslashes(name, args): + encode1_ = encode + def encode(s): + s = encode1_(s) + s = s.replace('\\\\', '\\') + s = s.replace('\\', '/') + return s + + if ishg: + encode2_ = encode + def encode(s): + return rewritehg(encode2_(s)) + + stdout.write(encode(out)) + if not redirected: + stderr.write(encode(err)) + return p.returncode + diff --git a/poky/bitbake/lib/bb/pysh/interp.py b/poky/bitbake/lib/bb/pysh/interp.py new file mode 100644 index 0000000000..d14ecf3c6d --- /dev/null +++ b/poky/bitbake/lib/bb/pysh/interp.py @@ -0,0 +1,1367 @@ +# interp.py - shell interpreter for pysh. +# +# Copyright 2007 Patrick Mezard +# +# This software may be used and distributed according to the terms +# of the GNU General Public License, incorporated herein by reference. + +"""Implement the shell interpreter. + +Most references are made to "The Open Group Base Specifications Issue 6". + +""" +# TODO: document the fact input streams must implement fileno() so Popen will work correctly. +# it requires non-stdin stream to be implemented as files. Still to be tested... +# DOC: pathsep is used in PATH instead of ':'. Clearly, there are path syntax issues here. +# TODO: stop command execution upon error. +# TODO: sort out the filename/io_number mess. It should be possible to use filenames only. +# TODO: review subshell implementation +# TODO: test environment cloning for non-special builtins +# TODO: set -x should not rebuild commands from tokens, assignments/redirections are lost +# TODO: unit test for variable assignment +# TODO: test error management wrt error type/utility type +# TODO: test for binary output everywhere +# BUG: debug-parsing does not pass log file to PLY. Maybe a PLY upgrade is necessary. +import base64 +import cPickle as pickle +import errno +import glob +import os +import re +import subprocess +import sys +import tempfile + +try: + s = set() + del s +except NameError: + from Set import Set as set + +import builtin +from sherrors import * +import pyshlex +import pyshyacc + +def mappend(func, *args, **kargs): + """Like map but assume func returns a list. Returned lists are merged into + a single one. + """ + return reduce(lambda a,b: a+b, map(func, *args, **kargs), []) + +class FileWrapper: + """File object wrapper to ease debugging. + + Allow mode checking and implement file duplication through a simple + reference counting scheme. Not sure the latter is really useful since + only real file descriptors can be used. + """ + def __init__(self, mode, file, close=True): + if mode not in ('r', 'w', 'a'): + raise IOError('invalid mode: %s' % mode) + self._mode = mode + self._close = close + if isinstance(file, FileWrapper): + if file._refcount[0] <= 0: + raise IOError(0, 'Error') + self._refcount = file._refcount + self._refcount[0] += 1 + self._file = file._file + else: + self._refcount = [1] + self._file = file + + def dup(self): + return FileWrapper(self._mode, self, self._close) + + def fileno(self): + """fileno() should be only necessary for input streams.""" + return self._file.fileno() + + def read(self, size=-1): + if self._mode!='r': + raise IOError(0, 'Error') + return self._file.read(size) + + def readlines(self, *args, **kwargs): + return self._file.readlines(*args, **kwargs) + + def write(self, s): + if self._mode not in ('w', 'a'): + raise IOError(0, 'Error') + return self._file.write(s) + + def flush(self): + self._file.flush() + + def close(self): + if not self._refcount: + return + assert self._refcount[0] > 0 + + self._refcount[0] -= 1 + if self._refcount[0] == 0: + self._mode = 'c' + if self._close: + self._file.close() + self._refcount = None + + def mode(self): + return self._mode + + def __getattr__(self, name): + if name == 'name': + self.name = getattr(self._file, name) + return self.name + else: + raise AttributeError(name) + + def __del__(self): + self.close() + + +def win32_open_devnull(mode): + return open('NUL', mode) + + +class Redirections: + """Stores open files and their mapping to pseudo-sh file descriptor. + """ + # BUG: redirections are not handled correctly: 1>&3 2>&3 3>&4 does + # not make 1 to redirect to 4 + def __init__(self, stdin=None, stdout=None, stderr=None): + self._descriptors = {} + if stdin is not None: + self._add_descriptor(0, stdin) + if stdout is not None: + self._add_descriptor(1, stdout) + if stderr is not None: + self._add_descriptor(2, stderr) + + def add_here_document(self, interp, name, content, io_number=None): + if io_number is None: + io_number = 0 + + if name==pyshlex.unquote_wordtree(name): + content = interp.expand_here_document(('TOKEN', content)) + + # Write document content in a temporary file + tmp = tempfile.TemporaryFile() + try: + tmp.write(content) + tmp.flush() + tmp.seek(0) + self._add_descriptor(io_number, FileWrapper('r', tmp)) + except: + tmp.close() + raise + + def add(self, interp, op, filename, io_number=None): + if op not in ('<', '>', '>|', '>>', '>&'): + # TODO: add descriptor duplication and here_documents + raise RedirectionError('Unsupported redirection operator "%s"' % op) + + if io_number is not None: + io_number = int(io_number) + + if (op == '>&' and filename.isdigit()) or filename=='-': + # No expansion for file descriptors, quote them if you want a filename + fullname = filename + else: + if filename.startswith('/'): + # TODO: win32 kludge + if filename=='/dev/null': + fullname = 'NUL' + else: + # TODO: handle absolute pathnames, they are unlikely to exist on the + # current platform (win32 for instance). + raise NotImplementedError() + else: + fullname = interp.expand_redirection(('TOKEN', filename)) + if not fullname: + raise RedirectionError('%s: ambiguous redirect' % filename) + # Build absolute path based on PWD + fullname = os.path.join(interp.get_env()['PWD'], fullname) + + if op=='<': + return self._add_input_redirection(interp, fullname, io_number) + elif op in ('>', '>|'): + clobber = ('>|'==op) + return self._add_output_redirection(interp, fullname, io_number, clobber) + elif op=='>>': + return self._add_output_appending(interp, fullname, io_number) + elif op=='>&': + return self._dup_output_descriptor(fullname, io_number) + + def close(self): + if self._descriptors is not None: + for desc in self._descriptors.itervalues(): + desc.flush() + desc.close() + self._descriptors = None + + def stdin(self): + return self._descriptors[0] + + def stdout(self): + return self._descriptors[1] + + def stderr(self): + return self._descriptors[2] + + def clone(self): + clone = Redirections() + for desc, fileobj in self._descriptors.iteritems(): + clone._descriptors[desc] = fileobj.dup() + return clone + + def _add_output_redirection(self, interp, filename, io_number, clobber): + if io_number is None: + # io_number default to standard output + io_number = 1 + + if not clobber and interp.get_env().has_opt('-C') and os.path.isfile(filename): + # File already exist in no-clobber mode, bail out + raise RedirectionError('File "%s" already exists' % filename) + + # Open and register + self._add_file_descriptor(io_number, filename, 'w') + + def _add_output_appending(self, interp, filename, io_number): + if io_number is None: + io_number = 1 + self._add_file_descriptor(io_number, filename, 'a') + + def _add_input_redirection(self, interp, filename, io_number): + if io_number is None: + io_number = 0 + self._add_file_descriptor(io_number, filename, 'r') + + def _add_file_descriptor(self, io_number, filename, mode): + try: + if filename.startswith('/'): + if filename=='/dev/null': + f = win32_open_devnull(mode+'b') + else: + # TODO: handle absolute pathnames, they are unlikely to exist on the + # current platform (win32 for instance). + raise NotImplementedError('cannot open absolute path %s' % repr(filename)) + else: + f = file(filename, mode+'b') + except IOError as e: + raise RedirectionError(str(e)) + + wrapper = None + try: + wrapper = FileWrapper(mode, f) + f = None + self._add_descriptor(io_number, wrapper) + except: + if f: f.close() + if wrapper: wrapper.close() + raise + + def _dup_output_descriptor(self, source_fd, dest_fd): + if source_fd is None: + source_fd = 1 + self._dup_file_descriptor(source_fd, dest_fd, 'w') + + def _dup_file_descriptor(self, source_fd, dest_fd, mode): + source_fd = int(source_fd) + if source_fd not in self._descriptors: + raise RedirectionError('"%s" is not a valid file descriptor' % str(source_fd)) + source = self._descriptors[source_fd] + + if source.mode()!=mode: + raise RedirectionError('Descriptor %s cannot be duplicated in mode "%s"' % (str(source), mode)) + + if dest_fd=='-': + # Close the source descriptor + del self._descriptors[source_fd] + source.close() + else: + dest_fd = int(dest_fd) + if dest_fd not in self._descriptors: + raise RedirectionError('Cannot replace file descriptor %s' % str(dest_fd)) + + dest = self._descriptors[dest_fd] + if dest.mode()!=mode: + raise RedirectionError('Descriptor %s cannot be cannot be redirected in mode "%s"' % (str(dest), mode)) + + self._descriptors[dest_fd] = source.dup() + dest.close() + + def _add_descriptor(self, io_number, file): + io_number = int(io_number) + + if io_number in self._descriptors: + # Close the current descriptor + d = self._descriptors[io_number] + del self._descriptors[io_number] + d.close() + + self._descriptors[io_number] = file + + def __str__(self): + names = [('%d=%r' % (k, getattr(v, 'name', None))) for k,v + in self._descriptors.iteritems()] + names = ','.join(names) + return 'Redirections(%s)' % names + + def __del__(self): + self.close() + +def cygwin_to_windows_path(path): + """Turn /cygdrive/c/foo into c:/foo, or return path if it + is not a cygwin path. + """ + if not path.startswith('/cygdrive/'): + return path + path = path[len('/cygdrive/'):] + path = path[:1] + ':' + path[1:] + return path + +def win32_to_unix_path(path): + if path is not None: + path = path.replace('\\', '/') + return path + +_RE_SHEBANG = re.compile(r'^\#!\s?([^\s]+)(?:\s([^\s]+))?') +_SHEBANG_CMDS = { + '/usr/bin/env': 'env', + '/bin/sh': 'pysh', + 'python': 'python', +} + +def resolve_shebang(path, ignoreshell=False): + """Return a list of arguments as shebang interpreter call or an empty list + if path does not refer to an executable script. + See . + + ignoreshell - set to True to ignore sh shebangs. Return an empty list instead. + """ + try: + f = file(path) + try: + # At most 80 characters in the first line + header = f.read(80).splitlines()[0] + finally: + f.close() + + m = _RE_SHEBANG.search(header) + if not m: + return [] + cmd, arg = m.group(1,2) + if os.path.isfile(cmd): + # Keep this one, the hg script for instance contains a weird windows + # shebang referencing the current python install. + cmdfile = os.path.basename(cmd).lower() + if cmdfile == 'python.exe': + cmd = 'python' + pass + elif cmd not in _SHEBANG_CMDS: + raise CommandNotFound('Unknown interpreter "%s" referenced in '\ + 'shebang' % header) + cmd = _SHEBANG_CMDS.get(cmd) + if cmd is None or (ignoreshell and cmd == 'pysh'): + return [] + if arg is None: + return [cmd, win32_to_unix_path(path)] + return [cmd, arg, win32_to_unix_path(path)] + except IOError as e: + if e.errno!=errno.ENOENT and \ + (e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM + raise + return [] + +def win32_find_in_path(name, path): + if isinstance(path, str): + path = path.split(os.pathsep) + + exts = os.environ.get('PATHEXT', '').lower().split(os.pathsep) + for p in path: + p_name = os.path.join(p, name) + + prefix = resolve_shebang(p_name) + if prefix: + return prefix + + for ext in exts: + p_name_ext = p_name + ext + if os.path.exists(p_name_ext): + return [win32_to_unix_path(p_name_ext)] + return [] + +class Traps(dict): + def __setitem__(self, key, value): + if key not in ('EXIT',): + raise NotImplementedError() + super(Traps, self).__setitem__(key, value) + +# IFS white spaces character class +_IFS_WHITESPACES = (' ', '\t', '\n') + +class Environment: + """Environment holds environment variables, export table, function + definitions and whatever is defined in 2.12 "Shell Execution Environment", + redirection excepted. + """ + def __init__(self, pwd): + self._opt = set() #Shell options + + self._functions = {} + self._env = {'?': '0', '#': '0'} + self._exported = set([ + 'HOME', 'IFS', 'PATH' + ]) + + # Set environment vars with side-effects + self._ifs_ws = None # Set of IFS whitespace characters + self._ifs_re = None # Regular expression used to split between words using IFS classes + self['IFS'] = ''.join(_IFS_WHITESPACES) #Default environment values + self['PWD'] = pwd + self.traps = Traps() + + def clone(self, subshell=False): + env = Environment(self['PWD']) + env._opt = set(self._opt) + for k,v in self.get_variables().iteritems(): + if k in self._exported: + env.export(k,v) + elif subshell: + env[k] = v + + if subshell: + env._functions = dict(self._functions) + + return env + + def __getitem__(self, key): + if key in ('@', '*', '-', '$'): + raise NotImplementedError('%s is not implemented' % repr(key)) + return self._env[key] + + def get(self, key, defval=None): + try: + return self[key] + except KeyError: + return defval + + def __setitem__(self, key, value): + if key=='IFS': + # Update the whitespace/non-whitespace classes + self._update_ifs(value) + elif key=='PWD': + pwd = os.path.abspath(value) + if not os.path.isdir(pwd): + raise VarAssignmentError('Invalid directory %s' % value) + value = pwd + elif key in ('?', '!'): + value = str(int(value)) + self._env[key] = value + + def __delitem__(self, key): + if key in ('IFS', 'PWD', '?'): + raise VarAssignmentError('%s cannot be unset' % key) + del self._env[key] + + def __contains__(self, item): + return item in self._env + + def set_positional_args(self, args): + """Set the content of 'args' as positional argument from 1 to len(args). + Return previous argument as a list of strings. + """ + # Save and remove previous arguments + prevargs = [] + for i in range(int(self._env['#'])): + i = str(i+1) + prevargs.append(self._env[i]) + del self._env[i] + self._env['#'] = '0' + + #Set new ones + for i,arg in enumerate(args): + self._env[str(i+1)] = str(arg) + self._env['#'] = str(len(args)) + + return prevargs + + def get_positional_args(self): + return [self._env[str(i+1)] for i in range(int(self._env['#']))] + + def get_variables(self): + return dict(self._env) + + def export(self, key, value=None): + if value is not None: + self[key] = value + self._exported.add(key) + + def get_exported(self): + return [(k,self._env.get(k)) for k in self._exported] + + def split_fields(self, word): + if not self._ifs_ws or not word: + return [word] + return re.split(self._ifs_re, word) + + def _update_ifs(self, value): + """Update the split_fields related variables when IFS character set is + changed. + """ + # TODO: handle NULL IFS + + # Separate characters in whitespace and non-whitespace + chars = set(value) + ws = [c for c in chars if c in _IFS_WHITESPACES] + nws = [c for c in chars if c not in _IFS_WHITESPACES] + + # Keep whitespaces in a string for left and right stripping + self._ifs_ws = ''.join(ws) + + # Build a regexp to split fields + trailing = '[' + ''.join([re.escape(c) for c in ws]) + ']' + if nws: + # First, the single non-whitespace occurence. + nws = '[' + ''.join([re.escape(c) for c in nws]) + ']' + nws = '(?:' + trailing + '*' + nws + trailing + '*' + '|' + trailing + '+)' + else: + # Then mix all parts with quantifiers + nws = trailing + '+' + self._ifs_re = re.compile(nws) + + def has_opt(self, opt, val=None): + return (opt, val) in self._opt + + def set_opt(self, opt, val=None): + self._opt.add((opt, val)) + + def find_in_path(self, name, pwd=False): + path = self._env.get('PATH', '').split(os.pathsep) + if pwd: + path[:0] = [self['PWD']] + if os.name == 'nt': + return win32_find_in_path(name, self._env.get('PATH', '')) + else: + raise NotImplementedError() + + def define_function(self, name, body): + if not is_name(name): + raise ShellSyntaxError('%s is not a valid function name' % repr(name)) + self._functions[name] = body + + def remove_function(self, name): + del self._functions[name] + + def is_function(self, name): + return name in self._functions + + def get_function(self, name): + return self._functions.get(name) + + +name_charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' +name_charset = dict(zip(name_charset,name_charset)) + +def match_name(s): + """Return the length in characters of the longest prefix made of name + allowed characters in s. + """ + for i,c in enumerate(s): + if c not in name_charset: + return s[:i] + return s + +def is_name(s): + return len([c for c in s if c not in name_charset])<=0 + +def is_special_param(c): + return len(c)==1 and c in ('@','*','#','?','-','$','!','0') + +def utility_not_implemented(name, *args, **kwargs): + raise NotImplementedError('%s utility is not implemented' % name) + + +class Utility: + """Define utilities properties: + func -- utility callable. See builtin module for utility samples. + is_special -- see XCU 2.8. + """ + def __init__(self, func, is_special=0): + self.func = func + self.is_special = bool(is_special) + + +def encodeargs(args): + def encodearg(s): + lines = base64.encodestring(s) + lines = [l.splitlines()[0] for l in lines] + return ''.join(lines) + + s = pickle.dumps(args) + return encodearg(s) + +def decodeargs(s): + s = base64.decodestring(s) + return pickle.loads(s) + + +class GlobError(Exception): + pass + +class Options: + def __init__(self): + # True if Mercurial operates with binary streams + self.hgbinary = True + +class Interpreter: + # Implementation is very basic: the execute() method just makes a DFS on the + # AST and execute nodes one by one. Nodes are tuple (name,obj) where name + # is a string identifier and obj the AST element returned by the parser. + # + # Handler are named after the node identifiers. + # TODO: check node names and remove the switch in execute with some + # dynamic getattr() call to find node handlers. + """Shell interpreter. + + The following debugging flags can be passed: + debug-parsing - enable PLY debugging. + debug-tree - print the generated AST. + debug-cmd - trace command execution before word expansion, plus exit status. + debug-utility - trace utility execution. + """ + + # List supported commands. + COMMANDS = { + 'cat': Utility(builtin.utility_cat,), + 'cd': Utility(builtin.utility_cd,), + ':': Utility(builtin.utility_colon,), + 'echo': Utility(builtin.utility_echo), + 'env': Utility(builtin.utility_env), + 'exit': Utility(builtin.utility_exit), + 'export': Utility(builtin.builtin_export, is_special=1), + 'egrep': Utility(builtin.utility_egrep), + 'fgrep': Utility(builtin.utility_fgrep), + 'gunzip': Utility(builtin.utility_gunzip), + 'kill': Utility(builtin.utility_kill), + 'mkdir': Utility(builtin.utility_mkdir), + 'netstat': Utility(builtin.utility_netstat), + 'printf': Utility(builtin.utility_printf), + 'pwd': Utility(builtin.utility_pwd), + 'return': Utility(builtin.builtin_return, is_special=1), + 'sed': Utility(builtin.utility_sed,), + 'set': Utility(builtin.builtin_set,), + 'shift': Utility(builtin.builtin_shift,), + 'sleep': Utility(builtin.utility_sleep,), + 'sort': Utility(builtin.utility_sort,), + 'trap': Utility(builtin.builtin_trap, is_special=1), + 'true': Utility(builtin.utility_true), + 'unset': Utility(builtin.builtin_unset, is_special=1), + 'wait': Utility(builtin.builtin_wait, is_special=1), + } + + def __init__(self, pwd, debugflags = [], env=None, redirs=None, stdin=None, + stdout=None, stderr=None, opts=Options()): + self._env = env + if self._env is None: + self._env = Environment(pwd) + self._children = {} + + self._redirs = redirs + self._close_redirs = False + + if self._redirs is None: + if stdin is None: + stdin = sys.stdin + if stdout is None: + stdout = sys.stdout + if stderr is None: + stderr = sys.stderr + stdin = FileWrapper('r', stdin, False) + stdout = FileWrapper('w', stdout, False) + stderr = FileWrapper('w', stderr, False) + self._redirs = Redirections(stdin, stdout, stderr) + self._close_redirs = True + + self._debugflags = list(debugflags) + self._logfile = sys.stderr + self._options = opts + + def close(self): + """Must be called when the interpreter is no longer used.""" + script = self._env.traps.get('EXIT') + if script: + try: + self.execute_script(script=script) + except: + pass + + if self._redirs is not None and self._close_redirs: + self._redirs.close() + self._redirs = None + + def log(self, s): + self._logfile.write(s) + self._logfile.flush() + + def __getitem__(self, key): + return self._env[key] + + def __setitem__(self, key, value): + self._env[key] = value + + def options(self): + return self._options + + def redirect(self, redirs, ios): + def add_redir(io): + if isinstance(io, pyshyacc.IORedirect): + redirs.add(self, io.op, io.filename, io.io_number) + else: + redirs.add_here_document(self, io.name, io.content, io.io_number) + + map(add_redir, ios) + return redirs + + def execute_script(self, script=None, ast=None, sourced=False, + scriptpath=None): + """If script is not None, parse the input. Otherwise takes the supplied + AST. Then execute the AST. + Return the script exit status. + """ + try: + if scriptpath is not None: + self._env['0'] = os.path.abspath(scriptpath) + + if script is not None: + debug_parsing = ('debug-parsing' in self._debugflags) + cmds, script = pyshyacc.parse(script, True, debug_parsing) + if 'debug-tree' in self._debugflags: + pyshyacc.print_commands(cmds, self._logfile) + self._logfile.flush() + else: + cmds, script = ast, '' + + status = 0 + for cmd in cmds: + try: + status = self.execute(cmd) + except ExitSignal as e: + if sourced: + raise + status = int(e.args[0]) + return status + except ShellError: + self._env['?'] = 1 + raise + if 'debug-utility' in self._debugflags or 'debug-cmd' in self._debugflags: + self.log('returncode ' + str(status)+ '\n') + return status + except CommandNotFound as e: + print >>self._redirs.stderr, str(e) + self._redirs.stderr.flush() + # Command not found by non-interactive shell + # return 127 + raise + except RedirectionError as e: + # TODO: should be handled depending on the utility status + print >>self._redirs.stderr, str(e) + self._redirs.stderr.flush() + # Command not found by non-interactive shell + # return 127 + raise + + def dotcommand(self, env, args): + if len(args) < 1: + raise ShellError('. expects at least one argument') + path = args[0] + if '/' not in path: + found = env.find_in_path(args[0], True) + if found: + path = found[0] + script = file(path).read() + return self.execute_script(script=script, sourced=True) + + def execute(self, token, redirs=None): + """Execute and AST subtree with supplied redirections overriding default + interpreter ones. + Return the exit status. + """ + if not token: + return 0 + + if redirs is None: + redirs = self._redirs + + if isinstance(token, list): + # Commands sequence + res = 0 + for t in token: + res = self.execute(t, redirs) + return res + + type, value = token + status = 0 + if type=='simple_command': + redirs_copy = redirs.clone() + try: + # TODO: define and handle command return values + # TODO: implement set -e + status = self._execute_simple_command(value, redirs_copy) + finally: + redirs_copy.close() + elif type=='pipeline': + status = self._execute_pipeline(value, redirs) + elif type=='and_or': + status = self._execute_and_or(value, redirs) + elif type=='for_clause': + status = self._execute_for_clause(value, redirs) + elif type=='while_clause': + status = self._execute_while_clause(value, redirs) + elif type=='function_definition': + status = self._execute_function_definition(value, redirs) + elif type=='brace_group': + status = self._execute_brace_group(value, redirs) + elif type=='if_clause': + status = self._execute_if_clause(value, redirs) + elif type=='subshell': + status = self.subshell(ast=value.cmds, redirs=redirs) + elif type=='async': + status = self._asynclist(value) + elif type=='redirect_list': + redirs_copy = self.redirect(redirs.clone(), value.redirs) + try: + status = self.execute(value.cmd, redirs_copy) + finally: + redirs_copy.close() + else: + raise NotImplementedError('Unsupported token type ' + type) + + if status < 0: + status = 255 + return status + + def _execute_if_clause(self, if_clause, redirs): + cond_status = self.execute(if_clause.cond, redirs) + if cond_status==0: + return self.execute(if_clause.if_cmds, redirs) + else: + return self.execute(if_clause.else_cmds, redirs) + + def _execute_brace_group(self, group, redirs): + status = 0 + for cmd in group.cmds: + status = self.execute(cmd, redirs) + return status + + def _execute_function_definition(self, fundef, redirs): + self._env.define_function(fundef.name, fundef.body) + return 0 + + def _execute_while_clause(self, while_clause, redirs): + status = 0 + while 1: + cond_status = 0 + for cond in while_clause.condition: + cond_status = self.execute(cond, redirs) + + if cond_status: + break + + for cmd in while_clause.cmds: + status = self.execute(cmd, redirs) + + return status + + def _execute_for_clause(self, for_clause, redirs): + if not is_name(for_clause.name): + raise ShellSyntaxError('%s is not a valid name' % repr(for_clause.name)) + items = mappend(self.expand_token, for_clause.items) + + status = 0 + for item in items: + self._env[for_clause.name] = item + for cmd in for_clause.cmds: + status = self.execute(cmd, redirs) + return status + + def _execute_and_or(self, or_and, redirs): + res = self.execute(or_and.left, redirs) + if (or_and.op=='&&' and res==0) or (or_and.op!='&&' and res!=0): + res = self.execute(or_and.right, redirs) + return res + + def _execute_pipeline(self, pipeline, redirs): + if len(pipeline.commands)==1: + status = self.execute(pipeline.commands[0], redirs) + else: + # Execute all commands one after the other + status = 0 + inpath, outpath = None, None + try: + # Commands inputs and outputs cannot really be plugged as done + # by a real shell. Run commands sequentially and chain their + # input/output throught temporary files. + tmpfd, inpath = tempfile.mkstemp() + os.close(tmpfd) + tmpfd, outpath = tempfile.mkstemp() + os.close(tmpfd) + + inpath = win32_to_unix_path(inpath) + outpath = win32_to_unix_path(outpath) + + for i, cmd in enumerate(pipeline.commands): + call_redirs = redirs.clone() + try: + if i!=0: + call_redirs.add(self, '<', inpath) + if i!=len(pipeline.commands)-1: + call_redirs.add(self, '>', outpath) + + status = self.execute(cmd, call_redirs) + + # Chain inputs/outputs + inpath, outpath = outpath, inpath + finally: + call_redirs.close() + finally: + if inpath: os.remove(inpath) + if outpath: os.remove(outpath) + + if pipeline.reverse_status: + status = int(not status) + self._env['?'] = status + return status + + def _execute_function(self, name, args, interp, env, stdin, stdout, stderr, *others): + assert interp is self + + func = env.get_function(name) + #Set positional parameters + prevargs = None + try: + prevargs = env.set_positional_args(args) + try: + redirs = Redirections(stdin.dup(), stdout.dup(), stderr.dup()) + try: + status = self.execute(func, redirs) + finally: + redirs.close() + except ReturnSignal as e: + status = int(e.args[0]) + env['?'] = status + return status + finally: + #Reset positional parameters + if prevargs is not None: + env.set_positional_args(prevargs) + + def _execute_simple_command(self, token, redirs): + """Can raise ReturnSignal when return builtin is called, ExitSignal when + exit is called, and other shell exceptions upon builtin failures. + """ + debug_command = 'debug-cmd' in self._debugflags + if debug_command: + self.log('word' + repr(token.words) + '\n') + self.log('assigns' + repr(token.assigns) + '\n') + self.log('redirs' + repr(token.redirs) + '\n') + + is_special = None + env = self._env + + try: + # Word expansion + args = [] + for word in token.words: + args += self.expand_token(word) + if is_special is None and args: + is_special = env.is_function(args[0]) or \ + (args[0] in self.COMMANDS and self.COMMANDS[args[0]].is_special) + + if debug_command: + self.log('_execute_simple_command' + str(args) + '\n') + + if not args: + # Redirections happen is a subshell + redirs = redirs.clone() + elif not is_special: + env = self._env.clone() + + # Redirections + self.redirect(redirs, token.redirs) + + # Variables assignments + res = 0 + for type,(k,v) in token.assigns: + status, expanded = self.expand_variable((k,v)) + if status is not None: + res = status + if args: + env.export(k, expanded) + else: + env[k] = expanded + + if args and args[0] in ('.', 'source'): + res = self.dotcommand(env, args[1:]) + elif args: + if args[0] in self.COMMANDS: + command = self.COMMANDS[args[0]] + elif env.is_function(args[0]): + command = Utility(self._execute_function, is_special=True) + else: + if not '/' in args[0].replace('\\', '/'): + cmd = env.find_in_path(args[0]) + if not cmd: + # TODO: test error code on unknown command => 127 + raise CommandNotFound('Unknown command: "%s"' % args[0]) + else: + # Handle commands like '/cygdrive/c/foo.bat' + cmd = cygwin_to_windows_path(args[0]) + if not os.path.exists(cmd): + raise CommandNotFound('%s: No such file or directory' % args[0]) + shebang = resolve_shebang(cmd) + if shebang: + cmd = shebang + else: + cmd = [cmd] + args[0:1] = cmd + command = Utility(builtin.run_command) + + # Command execution + if 'debug-cmd' in self._debugflags: + self.log('redirections ' + str(redirs) + '\n') + + res = command.func(args[0], args[1:], self, env, + redirs.stdin(), redirs.stdout(), + redirs.stderr(), self._debugflags) + + if self._env.has_opt('-x'): + # Trace command execution in shell environment + # BUG: would be hard to reproduce a real shell behaviour since + # the AST is not annotated with source lines/tokens. + self._redirs.stdout().write(' '.join(args)) + + except ReturnSignal: + raise + except ShellError as e: + if is_special or isinstance(e, (ExitSignal, + ShellSyntaxError, ExpansionError)): + raise e + self._redirs.stderr().write(str(e)+'\n') + return 1 + + return res + + def expand_token(self, word): + """Expand a word as specified in [2.6 Word Expansions]. Return the list + of expanded words. + """ + status, wtrees = self._expand_word(word) + return map(pyshlex.wordtree_as_string, wtrees) + + def expand_variable(self, word): + """Return a status code (or None if no command expansion occurred) + and a single word. + """ + status, wtrees = self._expand_word(word, pathname=False, split=False) + words = map(pyshlex.wordtree_as_string, wtrees) + assert len(words)==1 + return status, words[0] + + def expand_here_document(self, word): + """Return the expanded document as a single word. The here document is + assumed to be unquoted. + """ + status, wtrees = self._expand_word(word, pathname=False, + split=False, here_document=True) + words = map(pyshlex.wordtree_as_string, wtrees) + assert len(words)==1 + return words[0] + + def expand_redirection(self, word): + """Return a single word.""" + return self.expand_variable(word)[1] + + def get_env(self): + return self._env + + def _expand_word(self, token, pathname=True, split=True, here_document=False): + wtree = pyshlex.make_wordtree(token[1], here_document=here_document) + + # TODO: implement tilde expansion + def expand(wtree): + """Return a pseudo wordtree: the tree or its subelements can be empty + lists when no value result from the expansion. + """ + status = None + for part in wtree: + if not isinstance(part, list): + continue + if part[0]in ("'", '\\'): + continue + elif part[0] in ('`', '$('): + status, result = self._expand_command(part) + part[:] = result + elif part[0] in ('$', '${'): + part[:] = self._expand_parameter(part, wtree[0]=='"', split) + elif part[0] in ('', '"'): + status, result = expand(part) + part[:] = result + else: + raise NotImplementedError('%s expansion is not implemented' + % part[0]) + # [] is returned when an expansion result in no-field, + # like an empty $@ + wtree = [p for p in wtree if p != []] + if len(wtree) < 3: + return status, [] + return status, wtree + + status, wtree = expand(wtree) + if len(wtree) == 0: + return status, wtree + wtree = pyshlex.normalize_wordtree(wtree) + + if split: + wtrees = self._split_fields(wtree) + else: + wtrees = [wtree] + + if pathname: + wtrees = mappend(self._expand_pathname, wtrees) + + wtrees = map(self._remove_quotes, wtrees) + return status, wtrees + + def _expand_command(self, wtree): + # BUG: there is something to do with backslashes and quoted + # characters here + command = pyshlex.wordtree_as_string(wtree[1:-1]) + status, output = self.subshell_output(command) + return status, ['', output, ''] + + def _expand_parameter(self, wtree, quoted=False, split=False): + """Return a valid wtree or an empty list when no parameter results.""" + # Get the parameter name + # TODO: implement weird expansion rules with ':' + name = pyshlex.wordtree_as_string(wtree[1:-1]) + if not is_name(name) and not is_special_param(name): + raise ExpansionError('Bad substitution "%s"' % name) + # TODO: implement special parameters + if name in ('@', '*'): + args = self._env.get_positional_args() + if len(args) == 0: + return [] + if len(args)<2: + return ['', ''.join(args), ''] + + sep = self._env.get('IFS', '')[:1] + if split and quoted and name=='@': + # Introduce a new token to tell the caller that these parameters + # cause a split as specified in 2.5.2 + return ['@'] + args + [''] + else: + return ['', sep.join(args), ''] + + return ['', self._env.get(name, ''), ''] + + def _split_fields(self, wtree): + def is_empty(split): + return split==['', '', ''] + + def split_positional(quoted): + # Return a list of wtree split according positional parameters rules. + # All remaining '@' groups are removed. + assert quoted[0]=='"' + + splits = [[]] + for part in quoted: + if not isinstance(part, list) or part[0]!='@': + splits[-1].append(part) + else: + # Empty or single argument list were dealt with already + assert len(part)>3 + # First argument must join with the beginning part of the original word + splits[-1].append(part[1]) + # Create double-quotes expressions for every argument after the first + for arg in part[2:-1]: + splits[-1].append('"') + splits.append(['"', arg]) + return splits + + # At this point, all expansions but pathnames have occured. Only quoted + # and positional sequences remain. Thus, all candidates for field splitting + # are in the tree root, or are positional splits ('@') and lie in root + # children. + if not wtree or wtree[0] not in ('', '"'): + # The whole token is quoted or empty, nothing to split + return [wtree] + + if wtree[0]=='"': + wtree = ['', wtree, ''] + + result = [['', '']] + for part in wtree[1:-1]: + if isinstance(part, list): + if part[0]=='"': + splits = split_positional(part) + if len(splits)<=1: + result[-1] += [part, ''] + else: + # Terminate the current split + result[-1] += [splits[0], ''] + result += splits[1:-1] + # Create a new split + result += [['', splits[-1], '']] + else: + result[-1] += [part, ''] + else: + splits = self._env.split_fields(part) + if len(splits)<=1: + # No split + result[-1][-1] += part + else: + # Terminate the current resulting part and create a new one + result[-1][-1] += splits[0] + result[-1].append('') + result += [['', r, ''] for r in splits[1:-1]] + result += [['', splits[-1]]] + result[-1].append('') + + # Leading and trailing empty groups come from leading/trailing blanks + if result and is_empty(result[-1]): + result[-1:] = [] + if result and is_empty(result[0]): + result[:1] = [] + return result + + def _expand_pathname(self, wtree): + """See [2.6.6 Pathname Expansion].""" + if self._env.has_opt('-f'): + return [wtree] + + # All expansions have been performed, only quoted sequences should remain + # in the tree. Generate the pattern by folding the tree, escaping special + # characters when appear quoted + special_chars = '*?[]' + + def make_pattern(wtree): + subpattern = [] + for part in wtree[1:-1]: + if isinstance(part, list): + part = make_pattern(part) + elif wtree[0]!='': + for c in part: + # Meta-characters cannot be quoted + if c in special_chars: + raise GlobError() + subpattern.append(part) + return ''.join(subpattern) + + def pwd_glob(pattern): + cwd = os.getcwd() + os.chdir(self._env['PWD']) + try: + return glob.glob(pattern) + finally: + os.chdir(cwd) + + #TODO: check working directory issues here wrt relative patterns + try: + pattern = make_pattern(wtree) + paths = pwd_glob(pattern) + except GlobError: + # BUG: Meta-characters were found in quoted sequences. The should + # have been used literally but this is unsupported in current glob module. + # Instead we consider the whole tree must be used literally and + # therefore there is no point in globbing. This is wrong when meta + # characters are mixed with quoted meta in the same pattern like: + # < foo*"py*" > + paths = [] + + if not paths: + return [wtree] + return [['', path, ''] for path in paths] + + def _remove_quotes(self, wtree): + """See [2.6.7 Quote Removal].""" + + def unquote(wtree): + unquoted = [] + for part in wtree[1:-1]: + if isinstance(part, list): + part = unquote(part) + unquoted.append(part) + return ''.join(unquoted) + + return ['', unquote(wtree), ''] + + def subshell(self, script=None, ast=None, redirs=None): + """Execute the script or AST in a subshell, with inherited redirections + if redirs is not None. + """ + if redirs: + sub_redirs = redirs + else: + sub_redirs = redirs.clone() + + subshell = None + try: + subshell = Interpreter(None, self._debugflags, self._env.clone(True), + sub_redirs, opts=self._options) + return subshell.execute_script(script, ast) + finally: + if not redirs: sub_redirs.close() + if subshell: subshell.close() + + def subshell_output(self, script): + """Execute the script in a subshell and return the captured output.""" + # Create temporary file to capture subshell output + tmpfd, tmppath = tempfile.mkstemp() + try: + tmpfile = os.fdopen(tmpfd, 'wb') + stdout = FileWrapper('w', tmpfile) + + redirs = Redirections(self._redirs.stdin().dup(), + stdout, + self._redirs.stderr().dup()) + try: + status = self.subshell(script=script, redirs=redirs) + finally: + redirs.close() + redirs = None + + # Extract subshell standard output + tmpfile = open(tmppath, 'rb') + try: + output = tmpfile.read() + return status, output.rstrip('\n') + finally: + tmpfile.close() + finally: + os.remove(tmppath) + + def _asynclist(self, cmd): + args = (self._env.get_variables(), cmd) + arg = encodeargs(args) + assert len(args) < 30*1024 + cmd = ['pysh.bat', '--ast', '-c', arg] + p = subprocess.Popen(cmd, cwd=self._env['PWD']) + self._children[p.pid] = p + self._env['!'] = p.pid + return 0 + + def wait(self, pids=None): + if not pids: + pids = self._children.keys() + + status = 127 + for pid in pids: + if pid not in self._children: + continue + p = self._children.pop(pid) + status = p.wait() + + return status + diff --git a/poky/bitbake/lib/bb/pysh/lsprof.py b/poky/bitbake/lib/bb/pysh/lsprof.py new file mode 100644 index 0000000000..b1831c22a7 --- /dev/null +++ b/poky/bitbake/lib/bb/pysh/lsprof.py @@ -0,0 +1,116 @@ +#! /usr/bin/env python + +import sys +from _lsprof import Profiler, profiler_entry + +__all__ = ['profile', 'Stats'] + +def profile(f, *args, **kwds): + """XXX docstring""" + p = Profiler() + p.enable(subcalls=True, builtins=True) + try: + f(*args, **kwds) + finally: + p.disable() + return Stats(p.getstats()) + + +class Stats(object): + """XXX docstring""" + + def __init__(self, data): + self.data = data + + def sort(self, crit="inlinetime"): + """XXX docstring""" + if crit not in profiler_entry.__dict__: + raise ValueError("Can't sort by %s" % crit) + self.data.sort(lambda b, a: cmp(getattr(a, crit), + getattr(b, crit))) + for e in self.data: + if e.calls: + e.calls.sort(lambda b, a: cmp(getattr(a, crit), + getattr(b, crit))) + + def pprint(self, top=None, file=None, limit=None, climit=None): + """XXX docstring""" + if file is None: + file = sys.stdout + d = self.data + if top is not None: + d = d[:top] + cols = "% 12s %12s %11.4f %11.4f %s\n" + hcols = "% 12s %12s %12s %12s %s\n" + cols2 = "+%12s %12s %11.4f %11.4f + %s\n" + file.write(hcols % ("CallCount", "Recursive", "Total(ms)", + "Inline(ms)", "module:lineno(function)")) + count = 0 + for e in d: + file.write(cols % (e.callcount, e.reccallcount, e.totaltime, + e.inlinetime, label(e.code))) + count += 1 + if limit is not None and count == limit: + return + ccount = 0 + if e.calls: + for se in e.calls: + file.write(cols % ("+%s" % se.callcount, se.reccallcount, + se.totaltime, se.inlinetime, + "+%s" % label(se.code))) + count += 1 + ccount += 1 + if limit is not None and count == limit: + return + if climit is not None and ccount == climit: + break + + def freeze(self): + """Replace all references to code objects with string + descriptions; this makes it possible to pickle the instance.""" + + # this code is probably rather ickier than it needs to be! + for i in range(len(self.data)): + e = self.data[i] + if not isinstance(e.code, str): + self.data[i] = type(e)((label(e.code),) + e[1:]) + if e.calls: + for j in range(len(e.calls)): + se = e.calls[j] + if not isinstance(se.code, str): + e.calls[j] = type(se)((label(se.code),) + se[1:]) + +_fn2mod = {} + +def label(code): + if isinstance(code, str): + return code + try: + mname = _fn2mod[code.co_filename] + except KeyError: + for k, v in sys.modules.items(): + if v is None: + continue + if not hasattr(v, '__file__'): + continue + if not isinstance(v.__file__, str): + continue + if v.__file__.startswith(code.co_filename): + mname = _fn2mod[code.co_filename] = k + break + else: + mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename + + return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name) + + +if __name__ == '__main__': + import os + sys.argv = sys.argv[1:] + if not sys.argv: + print >> sys.stderr, "usage: lsprof.py